Add files using upload-large-folder tool
Browse filesThis view is limited to 50 files because it contains too many changes.
See raw diff
- .gitattributes +1 -0
- llmeval-env/lib/python3.10/site-packages/nvidia/cufft/lib/libcufft.so.11 +3 -0
- llmeval-env/lib/python3.10/site-packages/pandas/core/_numba/__init__.py +0 -0
- llmeval-env/lib/python3.10/site-packages/pandas/core/_numba/__pycache__/__init__.cpython-310.pyc +0 -0
- llmeval-env/lib/python3.10/site-packages/pandas/core/_numba/__pycache__/executor.cpython-310.pyc +0 -0
- llmeval-env/lib/python3.10/site-packages/pandas/core/_numba/__pycache__/extensions.cpython-310.pyc +0 -0
- llmeval-env/lib/python3.10/site-packages/pandas/core/_numba/executor.py +239 -0
- llmeval-env/lib/python3.10/site-packages/pandas/core/_numba/extensions.py +584 -0
- llmeval-env/lib/python3.10/site-packages/pandas/core/_numba/kernels/__init__.py +27 -0
- llmeval-env/lib/python3.10/site-packages/pandas/core/_numba/kernels/__pycache__/__init__.cpython-310.pyc +0 -0
- llmeval-env/lib/python3.10/site-packages/pandas/core/_numba/kernels/__pycache__/mean_.cpython-310.pyc +0 -0
- llmeval-env/lib/python3.10/site-packages/pandas/core/_numba/kernels/__pycache__/min_max_.cpython-310.pyc +0 -0
- llmeval-env/lib/python3.10/site-packages/pandas/core/_numba/kernels/__pycache__/shared.cpython-310.pyc +0 -0
- llmeval-env/lib/python3.10/site-packages/pandas/core/_numba/kernels/__pycache__/sum_.cpython-310.pyc +0 -0
- llmeval-env/lib/python3.10/site-packages/pandas/core/_numba/kernels/__pycache__/var_.cpython-310.pyc +0 -0
- llmeval-env/lib/python3.10/site-packages/pandas/core/_numba/kernels/mean_.py +196 -0
- llmeval-env/lib/python3.10/site-packages/pandas/core/_numba/kernels/min_max_.py +125 -0
- llmeval-env/lib/python3.10/site-packages/pandas/core/_numba/kernels/shared.py +29 -0
- llmeval-env/lib/python3.10/site-packages/pandas/core/_numba/kernels/sum_.py +244 -0
- llmeval-env/lib/python3.10/site-packages/pandas/core/_numba/kernels/var_.py +245 -0
- llmeval-env/lib/python3.10/site-packages/pandas/core/dtypes/__pycache__/base.cpython-310.pyc +0 -0
- llmeval-env/lib/python3.10/site-packages/pandas/core/dtypes/__pycache__/cast.cpython-310.pyc +0 -0
- llmeval-env/lib/python3.10/site-packages/pandas/core/dtypes/__pycache__/common.cpython-310.pyc +0 -0
- llmeval-env/lib/python3.10/site-packages/pandas/core/dtypes/__pycache__/concat.cpython-310.pyc +0 -0
- llmeval-env/lib/python3.10/site-packages/pandas/core/dtypes/__pycache__/generic.cpython-310.pyc +0 -0
- llmeval-env/lib/python3.10/site-packages/pandas/core/dtypes/__pycache__/inference.cpython-310.pyc +0 -0
- llmeval-env/lib/python3.10/site-packages/pandas/core/dtypes/__pycache__/missing.cpython-310.pyc +0 -0
- llmeval-env/lib/python3.10/site-packages/pandas/core/groupby/__init__.py +15 -0
- llmeval-env/lib/python3.10/site-packages/pandas/core/groupby/__pycache__/__init__.cpython-310.pyc +0 -0
- llmeval-env/lib/python3.10/site-packages/pandas/core/groupby/__pycache__/grouper.cpython-310.pyc +0 -0
- llmeval-env/lib/python3.10/site-packages/pandas/core/groupby/__pycache__/numba_.cpython-310.pyc +0 -0
- llmeval-env/lib/python3.10/site-packages/pandas/core/groupby/__pycache__/ops.cpython-310.pyc +0 -0
- llmeval-env/lib/python3.10/site-packages/pandas/core/groupby/base.py +121 -0
- llmeval-env/lib/python3.10/site-packages/pandas/core/groupby/categorical.py +87 -0
- llmeval-env/lib/python3.10/site-packages/pandas/core/groupby/generic.py +2852 -0
- llmeval-env/lib/python3.10/site-packages/pandas/core/groupby/groupby.py +0 -0
- llmeval-env/lib/python3.10/site-packages/pandas/core/groupby/grouper.py +1102 -0
- llmeval-env/lib/python3.10/site-packages/pandas/core/groupby/indexing.py +304 -0
- llmeval-env/lib/python3.10/site-packages/pandas/core/groupby/numba_.py +181 -0
- llmeval-env/lib/python3.10/site-packages/pandas/core/groupby/ops.py +1208 -0
- llmeval-env/lib/python3.10/site-packages/pandas/core/strings/__init__.py +28 -0
- llmeval-env/lib/python3.10/site-packages/pandas/core/strings/__pycache__/__init__.cpython-310.pyc +0 -0
- llmeval-env/lib/python3.10/site-packages/pandas/core/strings/__pycache__/accessor.cpython-310.pyc +0 -0
- llmeval-env/lib/python3.10/site-packages/pandas/core/strings/__pycache__/base.cpython-310.pyc +0 -0
- llmeval-env/lib/python3.10/site-packages/pandas/core/strings/__pycache__/object_array.cpython-310.pyc +0 -0
- llmeval-env/lib/python3.10/site-packages/pandas/core/strings/accessor.py +0 -0
- llmeval-env/lib/python3.10/site-packages/pandas/core/strings/base.py +262 -0
- llmeval-env/lib/python3.10/site-packages/pandas/core/strings/object_array.py +497 -0
- llmeval-env/lib/python3.10/site-packages/pandas/core/util/__init__.py +0 -0
- llmeval-env/lib/python3.10/site-packages/pandas/core/util/__pycache__/__init__.cpython-310.pyc +0 -0
.gitattributes
CHANGED
@@ -190,3 +190,4 @@ llmeval-env/lib/python3.10/site-packages/nvidia/cudnn/lib/libcudnn_adv_infer.so.
|
|
190 |
llmeval-env/lib/python3.10/site-packages/nvidia/nccl/lib/libnccl.so.2 filter=lfs diff=lfs merge=lfs -text
|
191 |
llmeval-env/lib/python3.10/site-packages/nvidia/cusparse/lib/libcusparse.so.12 filter=lfs diff=lfs merge=lfs -text
|
192 |
llmeval-env/lib/python3.10/site-packages/numexpr/interpreter.cpython-310-x86_64-linux-gnu.so filter=lfs diff=lfs merge=lfs -text
|
|
|
|
190 |
llmeval-env/lib/python3.10/site-packages/nvidia/nccl/lib/libnccl.so.2 filter=lfs diff=lfs merge=lfs -text
|
191 |
llmeval-env/lib/python3.10/site-packages/nvidia/cusparse/lib/libcusparse.so.12 filter=lfs diff=lfs merge=lfs -text
|
192 |
llmeval-env/lib/python3.10/site-packages/numexpr/interpreter.cpython-310-x86_64-linux-gnu.so filter=lfs diff=lfs merge=lfs -text
|
193 |
+
llmeval-env/lib/python3.10/site-packages/nvidia/cufft/lib/libcufft.so.11 filter=lfs diff=lfs merge=lfs -text
|
llmeval-env/lib/python3.10/site-packages/nvidia/cufft/lib/libcufft.so.11
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:9203da41484a93c1da684567bf6d732687b9b1307e3098256cb69225d6f575e4
|
3 |
+
size 192713344
|
llmeval-env/lib/python3.10/site-packages/pandas/core/_numba/__init__.py
ADDED
File without changes
|
llmeval-env/lib/python3.10/site-packages/pandas/core/_numba/__pycache__/__init__.cpython-310.pyc
ADDED
Binary file (191 Bytes). View file
|
|
llmeval-env/lib/python3.10/site-packages/pandas/core/_numba/__pycache__/executor.cpython-310.pyc
ADDED
Binary file (5.11 kB). View file
|
|
llmeval-env/lib/python3.10/site-packages/pandas/core/_numba/__pycache__/extensions.cpython-310.pyc
ADDED
Binary file (15.9 kB). View file
|
|
llmeval-env/lib/python3.10/site-packages/pandas/core/_numba/executor.py
ADDED
@@ -0,0 +1,239 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from __future__ import annotations
|
2 |
+
|
3 |
+
import functools
|
4 |
+
from typing import (
|
5 |
+
TYPE_CHECKING,
|
6 |
+
Any,
|
7 |
+
Callable,
|
8 |
+
)
|
9 |
+
|
10 |
+
if TYPE_CHECKING:
|
11 |
+
from pandas._typing import Scalar
|
12 |
+
|
13 |
+
import numpy as np
|
14 |
+
|
15 |
+
from pandas.compat._optional import import_optional_dependency
|
16 |
+
|
17 |
+
|
18 |
+
@functools.cache
|
19 |
+
def generate_apply_looper(func, nopython=True, nogil=True, parallel=False):
|
20 |
+
if TYPE_CHECKING:
|
21 |
+
import numba
|
22 |
+
else:
|
23 |
+
numba = import_optional_dependency("numba")
|
24 |
+
nb_compat_func = numba.extending.register_jitable(func)
|
25 |
+
|
26 |
+
@numba.jit(nopython=nopython, nogil=nogil, parallel=parallel)
|
27 |
+
def nb_looper(values, axis):
|
28 |
+
# Operate on the first row/col in order to get
|
29 |
+
# the output shape
|
30 |
+
if axis == 0:
|
31 |
+
first_elem = values[:, 0]
|
32 |
+
dim0 = values.shape[1]
|
33 |
+
else:
|
34 |
+
first_elem = values[0]
|
35 |
+
dim0 = values.shape[0]
|
36 |
+
res0 = nb_compat_func(first_elem)
|
37 |
+
# Use np.asarray to get shape for
|
38 |
+
# https://github.com/numba/numba/issues/4202#issuecomment-1185981507
|
39 |
+
buf_shape = (dim0,) + np.atleast_1d(np.asarray(res0)).shape
|
40 |
+
if axis == 0:
|
41 |
+
buf_shape = buf_shape[::-1]
|
42 |
+
buff = np.empty(buf_shape)
|
43 |
+
|
44 |
+
if axis == 1:
|
45 |
+
buff[0] = res0
|
46 |
+
for i in numba.prange(1, values.shape[0]):
|
47 |
+
buff[i] = nb_compat_func(values[i])
|
48 |
+
else:
|
49 |
+
buff[:, 0] = res0
|
50 |
+
for j in numba.prange(1, values.shape[1]):
|
51 |
+
buff[:, j] = nb_compat_func(values[:, j])
|
52 |
+
return buff
|
53 |
+
|
54 |
+
return nb_looper
|
55 |
+
|
56 |
+
|
57 |
+
@functools.cache
|
58 |
+
def make_looper(func, result_dtype, is_grouped_kernel, nopython, nogil, parallel):
|
59 |
+
if TYPE_CHECKING:
|
60 |
+
import numba
|
61 |
+
else:
|
62 |
+
numba = import_optional_dependency("numba")
|
63 |
+
|
64 |
+
if is_grouped_kernel:
|
65 |
+
|
66 |
+
@numba.jit(nopython=nopython, nogil=nogil, parallel=parallel)
|
67 |
+
def column_looper(
|
68 |
+
values: np.ndarray,
|
69 |
+
labels: np.ndarray,
|
70 |
+
ngroups: int,
|
71 |
+
min_periods: int,
|
72 |
+
*args,
|
73 |
+
):
|
74 |
+
result = np.empty((values.shape[0], ngroups), dtype=result_dtype)
|
75 |
+
na_positions = {}
|
76 |
+
for i in numba.prange(values.shape[0]):
|
77 |
+
output, na_pos = func(
|
78 |
+
values[i], result_dtype, labels, ngroups, min_periods, *args
|
79 |
+
)
|
80 |
+
result[i] = output
|
81 |
+
if len(na_pos) > 0:
|
82 |
+
na_positions[i] = np.array(na_pos)
|
83 |
+
return result, na_positions
|
84 |
+
|
85 |
+
else:
|
86 |
+
|
87 |
+
@numba.jit(nopython=nopython, nogil=nogil, parallel=parallel)
|
88 |
+
def column_looper(
|
89 |
+
values: np.ndarray,
|
90 |
+
start: np.ndarray,
|
91 |
+
end: np.ndarray,
|
92 |
+
min_periods: int,
|
93 |
+
*args,
|
94 |
+
):
|
95 |
+
result = np.empty((values.shape[0], len(start)), dtype=result_dtype)
|
96 |
+
na_positions = {}
|
97 |
+
for i in numba.prange(values.shape[0]):
|
98 |
+
output, na_pos = func(
|
99 |
+
values[i], result_dtype, start, end, min_periods, *args
|
100 |
+
)
|
101 |
+
result[i] = output
|
102 |
+
if len(na_pos) > 0:
|
103 |
+
na_positions[i] = np.array(na_pos)
|
104 |
+
return result, na_positions
|
105 |
+
|
106 |
+
return column_looper
|
107 |
+
|
108 |
+
|
109 |
+
default_dtype_mapping: dict[np.dtype, Any] = {
|
110 |
+
np.dtype("int8"): np.int64,
|
111 |
+
np.dtype("int16"): np.int64,
|
112 |
+
np.dtype("int32"): np.int64,
|
113 |
+
np.dtype("int64"): np.int64,
|
114 |
+
np.dtype("uint8"): np.uint64,
|
115 |
+
np.dtype("uint16"): np.uint64,
|
116 |
+
np.dtype("uint32"): np.uint64,
|
117 |
+
np.dtype("uint64"): np.uint64,
|
118 |
+
np.dtype("float32"): np.float64,
|
119 |
+
np.dtype("float64"): np.float64,
|
120 |
+
np.dtype("complex64"): np.complex128,
|
121 |
+
np.dtype("complex128"): np.complex128,
|
122 |
+
}
|
123 |
+
|
124 |
+
|
125 |
+
# TODO: Preserve complex dtypes
|
126 |
+
|
127 |
+
float_dtype_mapping: dict[np.dtype, Any] = {
|
128 |
+
np.dtype("int8"): np.float64,
|
129 |
+
np.dtype("int16"): np.float64,
|
130 |
+
np.dtype("int32"): np.float64,
|
131 |
+
np.dtype("int64"): np.float64,
|
132 |
+
np.dtype("uint8"): np.float64,
|
133 |
+
np.dtype("uint16"): np.float64,
|
134 |
+
np.dtype("uint32"): np.float64,
|
135 |
+
np.dtype("uint64"): np.float64,
|
136 |
+
np.dtype("float32"): np.float64,
|
137 |
+
np.dtype("float64"): np.float64,
|
138 |
+
np.dtype("complex64"): np.float64,
|
139 |
+
np.dtype("complex128"): np.float64,
|
140 |
+
}
|
141 |
+
|
142 |
+
identity_dtype_mapping: dict[np.dtype, Any] = {
|
143 |
+
np.dtype("int8"): np.int8,
|
144 |
+
np.dtype("int16"): np.int16,
|
145 |
+
np.dtype("int32"): np.int32,
|
146 |
+
np.dtype("int64"): np.int64,
|
147 |
+
np.dtype("uint8"): np.uint8,
|
148 |
+
np.dtype("uint16"): np.uint16,
|
149 |
+
np.dtype("uint32"): np.uint32,
|
150 |
+
np.dtype("uint64"): np.uint64,
|
151 |
+
np.dtype("float32"): np.float32,
|
152 |
+
np.dtype("float64"): np.float64,
|
153 |
+
np.dtype("complex64"): np.complex64,
|
154 |
+
np.dtype("complex128"): np.complex128,
|
155 |
+
}
|
156 |
+
|
157 |
+
|
158 |
+
def generate_shared_aggregator(
|
159 |
+
func: Callable[..., Scalar],
|
160 |
+
dtype_mapping: dict[np.dtype, np.dtype],
|
161 |
+
is_grouped_kernel: bool,
|
162 |
+
nopython: bool,
|
163 |
+
nogil: bool,
|
164 |
+
parallel: bool,
|
165 |
+
):
|
166 |
+
"""
|
167 |
+
Generate a Numba function that loops over the columns 2D object and applies
|
168 |
+
a 1D numba kernel over each column.
|
169 |
+
|
170 |
+
Parameters
|
171 |
+
----------
|
172 |
+
func : function
|
173 |
+
aggregation function to be applied to each column
|
174 |
+
dtype_mapping: dict or None
|
175 |
+
If not None, maps a dtype to a result dtype.
|
176 |
+
Otherwise, will fall back to default mapping.
|
177 |
+
is_grouped_kernel: bool, default False
|
178 |
+
Whether func operates using the group labels (True)
|
179 |
+
or using starts/ends arrays
|
180 |
+
|
181 |
+
If true, you also need to pass the number of groups to this function
|
182 |
+
nopython : bool
|
183 |
+
nopython to be passed into numba.jit
|
184 |
+
nogil : bool
|
185 |
+
nogil to be passed into numba.jit
|
186 |
+
parallel : bool
|
187 |
+
parallel to be passed into numba.jit
|
188 |
+
|
189 |
+
Returns
|
190 |
+
-------
|
191 |
+
Numba function
|
192 |
+
"""
|
193 |
+
|
194 |
+
# A wrapper around the looper function,
|
195 |
+
# to dispatch based on dtype since numba is unable to do that in nopython mode
|
196 |
+
|
197 |
+
# It also post-processes the values by inserting nans where number of observations
|
198 |
+
# is less than min_periods
|
199 |
+
# Cannot do this in numba nopython mode
|
200 |
+
# (you'll run into type-unification error when you cast int -> float)
|
201 |
+
def looper_wrapper(
|
202 |
+
values,
|
203 |
+
start=None,
|
204 |
+
end=None,
|
205 |
+
labels=None,
|
206 |
+
ngroups=None,
|
207 |
+
min_periods: int = 0,
|
208 |
+
**kwargs,
|
209 |
+
):
|
210 |
+
result_dtype = dtype_mapping[values.dtype]
|
211 |
+
column_looper = make_looper(
|
212 |
+
func, result_dtype, is_grouped_kernel, nopython, nogil, parallel
|
213 |
+
)
|
214 |
+
# Need to unpack kwargs since numba only supports *args
|
215 |
+
if is_grouped_kernel:
|
216 |
+
result, na_positions = column_looper(
|
217 |
+
values, labels, ngroups, min_periods, *kwargs.values()
|
218 |
+
)
|
219 |
+
else:
|
220 |
+
result, na_positions = column_looper(
|
221 |
+
values, start, end, min_periods, *kwargs.values()
|
222 |
+
)
|
223 |
+
if result.dtype.kind == "i":
|
224 |
+
# Look if na_positions is not empty
|
225 |
+
# If so, convert the whole block
|
226 |
+
# This is OK since int dtype cannot hold nan,
|
227 |
+
# so if min_periods not satisfied for 1 col, it is not satisfied for
|
228 |
+
# all columns at that index
|
229 |
+
for na_pos in na_positions.values():
|
230 |
+
if len(na_pos) > 0:
|
231 |
+
result = result.astype("float64")
|
232 |
+
break
|
233 |
+
# TODO: Optimize this
|
234 |
+
for i, na_pos in na_positions.items():
|
235 |
+
if len(na_pos) > 0:
|
236 |
+
result[i, na_pos] = np.nan
|
237 |
+
return result
|
238 |
+
|
239 |
+
return looper_wrapper
|
llmeval-env/lib/python3.10/site-packages/pandas/core/_numba/extensions.py
ADDED
@@ -0,0 +1,584 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Disable type checking for this module since numba's internals
|
2 |
+
# are not typed, and we use numba's internals via its extension API
|
3 |
+
# mypy: ignore-errors
|
4 |
+
"""
|
5 |
+
Utility classes/functions to let numba recognize
|
6 |
+
pandas Index/Series/DataFrame
|
7 |
+
|
8 |
+
Mostly vendored from https://github.com/numba/numba/blob/main/numba/tests/pdlike_usecase.py
|
9 |
+
"""
|
10 |
+
|
11 |
+
from __future__ import annotations
|
12 |
+
|
13 |
+
from contextlib import contextmanager
|
14 |
+
import operator
|
15 |
+
|
16 |
+
import numba
|
17 |
+
from numba import types
|
18 |
+
from numba.core import cgutils
|
19 |
+
from numba.core.datamodel import models
|
20 |
+
from numba.core.extending import (
|
21 |
+
NativeValue,
|
22 |
+
box,
|
23 |
+
lower_builtin,
|
24 |
+
make_attribute_wrapper,
|
25 |
+
overload,
|
26 |
+
overload_attribute,
|
27 |
+
overload_method,
|
28 |
+
register_model,
|
29 |
+
type_callable,
|
30 |
+
typeof_impl,
|
31 |
+
unbox,
|
32 |
+
)
|
33 |
+
from numba.core.imputils import impl_ret_borrowed
|
34 |
+
import numpy as np
|
35 |
+
|
36 |
+
from pandas._libs import lib
|
37 |
+
|
38 |
+
from pandas.core.indexes.base import Index
|
39 |
+
from pandas.core.indexing import _iLocIndexer
|
40 |
+
from pandas.core.internals import SingleBlockManager
|
41 |
+
from pandas.core.series import Series
|
42 |
+
|
43 |
+
|
44 |
+
# Helper function to hack around fact that Index casts numpy string dtype to object
|
45 |
+
#
|
46 |
+
# Idea is to set an attribute on a Index called _numba_data
|
47 |
+
# that is the original data, or the object data casted to numpy string dtype,
|
48 |
+
# with a context manager that is unset afterwards
|
49 |
+
@contextmanager
|
50 |
+
def set_numba_data(index: Index):
|
51 |
+
numba_data = index._data
|
52 |
+
if numba_data.dtype == object:
|
53 |
+
if not lib.is_string_array(numba_data):
|
54 |
+
raise ValueError(
|
55 |
+
"The numba engine only supports using string or numeric column names"
|
56 |
+
)
|
57 |
+
numba_data = numba_data.astype("U")
|
58 |
+
try:
|
59 |
+
index._numba_data = numba_data
|
60 |
+
yield index
|
61 |
+
finally:
|
62 |
+
del index._numba_data
|
63 |
+
|
64 |
+
|
65 |
+
# TODO: Range index support
|
66 |
+
# (this currently lowers OK, but does not round-trip)
|
67 |
+
class IndexType(types.Type):
|
68 |
+
"""
|
69 |
+
The type class for Index objects.
|
70 |
+
"""
|
71 |
+
|
72 |
+
def __init__(self, dtype, layout, pyclass: any) -> None:
|
73 |
+
self.pyclass = pyclass
|
74 |
+
name = f"index({dtype}, {layout})"
|
75 |
+
self.dtype = dtype
|
76 |
+
self.layout = layout
|
77 |
+
super().__init__(name)
|
78 |
+
|
79 |
+
@property
|
80 |
+
def key(self):
|
81 |
+
return self.pyclass, self.dtype, self.layout
|
82 |
+
|
83 |
+
@property
|
84 |
+
def as_array(self):
|
85 |
+
return types.Array(self.dtype, 1, self.layout)
|
86 |
+
|
87 |
+
def copy(self, dtype=None, ndim: int = 1, layout=None):
|
88 |
+
assert ndim == 1
|
89 |
+
if dtype is None:
|
90 |
+
dtype = self.dtype
|
91 |
+
layout = layout or self.layout
|
92 |
+
return type(self)(dtype, layout, self.pyclass)
|
93 |
+
|
94 |
+
|
95 |
+
class SeriesType(types.Type):
|
96 |
+
"""
|
97 |
+
The type class for Series objects.
|
98 |
+
"""
|
99 |
+
|
100 |
+
def __init__(self, dtype, index, namety) -> None:
|
101 |
+
assert isinstance(index, IndexType)
|
102 |
+
self.dtype = dtype
|
103 |
+
self.index = index
|
104 |
+
self.values = types.Array(self.dtype, 1, "C")
|
105 |
+
self.namety = namety
|
106 |
+
name = f"series({dtype}, {index}, {namety})"
|
107 |
+
super().__init__(name)
|
108 |
+
|
109 |
+
@property
|
110 |
+
def key(self):
|
111 |
+
return self.dtype, self.index, self.namety
|
112 |
+
|
113 |
+
@property
|
114 |
+
def as_array(self):
|
115 |
+
return self.values
|
116 |
+
|
117 |
+
def copy(self, dtype=None, ndim: int = 1, layout: str = "C"):
|
118 |
+
assert ndim == 1
|
119 |
+
assert layout == "C"
|
120 |
+
if dtype is None:
|
121 |
+
dtype = self.dtype
|
122 |
+
return type(self)(dtype, self.index, self.namety)
|
123 |
+
|
124 |
+
|
125 |
+
@typeof_impl.register(Index)
|
126 |
+
def typeof_index(val, c):
|
127 |
+
"""
|
128 |
+
This will assume that only strings are in object dtype
|
129 |
+
index.
|
130 |
+
(you should check this before this gets lowered down to numba)
|
131 |
+
"""
|
132 |
+
# arrty = typeof_impl(val._data, c)
|
133 |
+
arrty = typeof_impl(val._numba_data, c)
|
134 |
+
assert arrty.ndim == 1
|
135 |
+
return IndexType(arrty.dtype, arrty.layout, type(val))
|
136 |
+
|
137 |
+
|
138 |
+
@typeof_impl.register(Series)
|
139 |
+
def typeof_series(val, c):
|
140 |
+
index = typeof_impl(val.index, c)
|
141 |
+
arrty = typeof_impl(val.values, c)
|
142 |
+
namety = typeof_impl(val.name, c)
|
143 |
+
assert arrty.ndim == 1
|
144 |
+
assert arrty.layout == "C"
|
145 |
+
return SeriesType(arrty.dtype, index, namety)
|
146 |
+
|
147 |
+
|
148 |
+
@type_callable(Series)
|
149 |
+
def type_series_constructor(context):
|
150 |
+
def typer(data, index, name=None):
|
151 |
+
if isinstance(index, IndexType) and isinstance(data, types.Array):
|
152 |
+
assert data.ndim == 1
|
153 |
+
if name is None:
|
154 |
+
name = types.intp
|
155 |
+
return SeriesType(data.dtype, index, name)
|
156 |
+
|
157 |
+
return typer
|
158 |
+
|
159 |
+
|
160 |
+
@type_callable(Index)
|
161 |
+
def type_index_constructor(context):
|
162 |
+
def typer(data, hashmap=None):
|
163 |
+
if isinstance(data, types.Array):
|
164 |
+
assert data.layout == "C"
|
165 |
+
assert data.ndim == 1
|
166 |
+
assert hashmap is None or isinstance(hashmap, types.DictType)
|
167 |
+
return IndexType(data.dtype, layout=data.layout, pyclass=Index)
|
168 |
+
|
169 |
+
return typer
|
170 |
+
|
171 |
+
|
172 |
+
# Backend extensions for Index and Series and Frame
|
173 |
+
@register_model(IndexType)
|
174 |
+
class IndexModel(models.StructModel):
|
175 |
+
def __init__(self, dmm, fe_type) -> None:
|
176 |
+
# We don't want the numpy string scalar type in our hashmap
|
177 |
+
members = [
|
178 |
+
("data", fe_type.as_array),
|
179 |
+
# This is an attempt to emulate our hashtable code with a numba
|
180 |
+
# typed dict
|
181 |
+
# It maps from values in the index to their integer positions in the array
|
182 |
+
("hashmap", types.DictType(fe_type.dtype, types.intp)),
|
183 |
+
# Pointer to the Index object this was created from, or that it
|
184 |
+
# boxes to
|
185 |
+
# https://numba.discourse.group/t/qst-how-to-cache-the-boxing-of-an-object/2128/2?u=lithomas1
|
186 |
+
("parent", types.pyobject),
|
187 |
+
]
|
188 |
+
models.StructModel.__init__(self, dmm, fe_type, members)
|
189 |
+
|
190 |
+
|
191 |
+
@register_model(SeriesType)
|
192 |
+
class SeriesModel(models.StructModel):
|
193 |
+
def __init__(self, dmm, fe_type) -> None:
|
194 |
+
members = [
|
195 |
+
("index", fe_type.index),
|
196 |
+
("values", fe_type.as_array),
|
197 |
+
("name", fe_type.namety),
|
198 |
+
]
|
199 |
+
models.StructModel.__init__(self, dmm, fe_type, members)
|
200 |
+
|
201 |
+
|
202 |
+
make_attribute_wrapper(IndexType, "data", "_data")
|
203 |
+
make_attribute_wrapper(IndexType, "hashmap", "hashmap")
|
204 |
+
|
205 |
+
make_attribute_wrapper(SeriesType, "index", "index")
|
206 |
+
make_attribute_wrapper(SeriesType, "values", "values")
|
207 |
+
make_attribute_wrapper(SeriesType, "name", "name")
|
208 |
+
|
209 |
+
|
210 |
+
@lower_builtin(Series, types.Array, IndexType)
|
211 |
+
def pdseries_constructor(context, builder, sig, args):
|
212 |
+
data, index = args
|
213 |
+
series = cgutils.create_struct_proxy(sig.return_type)(context, builder)
|
214 |
+
series.index = index
|
215 |
+
series.values = data
|
216 |
+
series.name = context.get_constant(types.intp, 0)
|
217 |
+
return impl_ret_borrowed(context, builder, sig.return_type, series._getvalue())
|
218 |
+
|
219 |
+
|
220 |
+
@lower_builtin(Series, types.Array, IndexType, types.intp)
|
221 |
+
@lower_builtin(Series, types.Array, IndexType, types.float64)
|
222 |
+
@lower_builtin(Series, types.Array, IndexType, types.unicode_type)
|
223 |
+
def pdseries_constructor_with_name(context, builder, sig, args):
|
224 |
+
data, index, name = args
|
225 |
+
series = cgutils.create_struct_proxy(sig.return_type)(context, builder)
|
226 |
+
series.index = index
|
227 |
+
series.values = data
|
228 |
+
series.name = name
|
229 |
+
return impl_ret_borrowed(context, builder, sig.return_type, series._getvalue())
|
230 |
+
|
231 |
+
|
232 |
+
@lower_builtin(Index, types.Array, types.DictType, types.pyobject)
|
233 |
+
def index_constructor_2arg(context, builder, sig, args):
|
234 |
+
(data, hashmap, parent) = args
|
235 |
+
index = cgutils.create_struct_proxy(sig.return_type)(context, builder)
|
236 |
+
|
237 |
+
index.data = data
|
238 |
+
index.hashmap = hashmap
|
239 |
+
index.parent = parent
|
240 |
+
return impl_ret_borrowed(context, builder, sig.return_type, index._getvalue())
|
241 |
+
|
242 |
+
|
243 |
+
@lower_builtin(Index, types.Array, types.DictType)
|
244 |
+
def index_constructor_2arg_parent(context, builder, sig, args):
|
245 |
+
# Basically same as index_constructor_1arg, but also lets you specify the
|
246 |
+
# parent object
|
247 |
+
(data, hashmap) = args
|
248 |
+
index = cgutils.create_struct_proxy(sig.return_type)(context, builder)
|
249 |
+
|
250 |
+
index.data = data
|
251 |
+
index.hashmap = hashmap
|
252 |
+
return impl_ret_borrowed(context, builder, sig.return_type, index._getvalue())
|
253 |
+
|
254 |
+
|
255 |
+
@lower_builtin(Index, types.Array)
|
256 |
+
def index_constructor_1arg(context, builder, sig, args):
|
257 |
+
from numba.typed import Dict
|
258 |
+
|
259 |
+
key_type = sig.return_type.dtype
|
260 |
+
value_type = types.intp
|
261 |
+
|
262 |
+
def index_impl(data):
|
263 |
+
return Index(data, Dict.empty(key_type, value_type))
|
264 |
+
|
265 |
+
return context.compile_internal(builder, index_impl, sig, args)
|
266 |
+
|
267 |
+
|
268 |
+
# Helper to convert the unicodecharseq (numpy string scalar) into a unicode_type
|
269 |
+
# (regular string)
|
270 |
+
def maybe_cast_str(x):
|
271 |
+
# Dummy function that numba can overload
|
272 |
+
pass
|
273 |
+
|
274 |
+
|
275 |
+
@overload(maybe_cast_str)
|
276 |
+
def maybe_cast_str_impl(x):
|
277 |
+
"""Converts numba UnicodeCharSeq (numpy string scalar) -> unicode type (string).
|
278 |
+
Is a no-op for other types."""
|
279 |
+
if isinstance(x, types.UnicodeCharSeq):
|
280 |
+
return lambda x: str(x)
|
281 |
+
else:
|
282 |
+
return lambda x: x
|
283 |
+
|
284 |
+
|
285 |
+
@unbox(IndexType)
|
286 |
+
def unbox_index(typ, obj, c):
|
287 |
+
"""
|
288 |
+
Convert a Index object to a native structure.
|
289 |
+
|
290 |
+
Note: Object dtype is not allowed here
|
291 |
+
"""
|
292 |
+
data_obj = c.pyapi.object_getattr_string(obj, "_numba_data")
|
293 |
+
index = cgutils.create_struct_proxy(typ)(c.context, c.builder)
|
294 |
+
# If we see an object array, assume its been validated as only containing strings
|
295 |
+
# We still need to do the conversion though
|
296 |
+
index.data = c.unbox(typ.as_array, data_obj).value
|
297 |
+
typed_dict_obj = c.pyapi.unserialize(c.pyapi.serialize_object(numba.typed.Dict))
|
298 |
+
# Create an empty typed dict in numba for the hashmap for indexing
|
299 |
+
# equiv of numba.typed.Dict.empty(typ.dtype, types.intp)
|
300 |
+
arr_type_obj = c.pyapi.unserialize(c.pyapi.serialize_object(typ.dtype))
|
301 |
+
intp_type_obj = c.pyapi.unserialize(c.pyapi.serialize_object(types.intp))
|
302 |
+
hashmap_obj = c.pyapi.call_method(
|
303 |
+
typed_dict_obj, "empty", (arr_type_obj, intp_type_obj)
|
304 |
+
)
|
305 |
+
index.hashmap = c.unbox(types.DictType(typ.dtype, types.intp), hashmap_obj).value
|
306 |
+
# Set the parent for speedy boxing.
|
307 |
+
index.parent = obj
|
308 |
+
|
309 |
+
# Decrefs
|
310 |
+
c.pyapi.decref(data_obj)
|
311 |
+
c.pyapi.decref(arr_type_obj)
|
312 |
+
c.pyapi.decref(intp_type_obj)
|
313 |
+
c.pyapi.decref(typed_dict_obj)
|
314 |
+
|
315 |
+
return NativeValue(index._getvalue())
|
316 |
+
|
317 |
+
|
318 |
+
@unbox(SeriesType)
|
319 |
+
def unbox_series(typ, obj, c):
|
320 |
+
"""
|
321 |
+
Convert a Series object to a native structure.
|
322 |
+
"""
|
323 |
+
index_obj = c.pyapi.object_getattr_string(obj, "index")
|
324 |
+
values_obj = c.pyapi.object_getattr_string(obj, "values")
|
325 |
+
name_obj = c.pyapi.object_getattr_string(obj, "name")
|
326 |
+
|
327 |
+
series = cgutils.create_struct_proxy(typ)(c.context, c.builder)
|
328 |
+
series.index = c.unbox(typ.index, index_obj).value
|
329 |
+
series.values = c.unbox(typ.values, values_obj).value
|
330 |
+
series.name = c.unbox(typ.namety, name_obj).value
|
331 |
+
|
332 |
+
# Decrefs
|
333 |
+
c.pyapi.decref(index_obj)
|
334 |
+
c.pyapi.decref(values_obj)
|
335 |
+
c.pyapi.decref(name_obj)
|
336 |
+
|
337 |
+
return NativeValue(series._getvalue())
|
338 |
+
|
339 |
+
|
340 |
+
@box(IndexType)
|
341 |
+
def box_index(typ, val, c):
|
342 |
+
"""
|
343 |
+
Convert a native index structure to a Index object.
|
344 |
+
|
345 |
+
If our native index is of a numpy string dtype, we'll cast it to
|
346 |
+
object.
|
347 |
+
"""
|
348 |
+
# First build a Numpy array object, then wrap it in a Index
|
349 |
+
index = cgutils.create_struct_proxy(typ)(c.context, c.builder, value=val)
|
350 |
+
|
351 |
+
res = cgutils.alloca_once_value(c.builder, index.parent)
|
352 |
+
|
353 |
+
# Does parent exist?
|
354 |
+
# (it means already boxed once, or Index same as original df.index or df.columns)
|
355 |
+
# xref https://github.com/numba/numba/blob/596e8a55334cc46854e3192766e643767bd7c934/numba/core/boxing.py#L593C17-L593C17
|
356 |
+
with c.builder.if_else(cgutils.is_not_null(c.builder, index.parent)) as (
|
357 |
+
has_parent,
|
358 |
+
otherwise,
|
359 |
+
):
|
360 |
+
with has_parent:
|
361 |
+
c.pyapi.incref(index.parent)
|
362 |
+
with otherwise:
|
363 |
+
# TODO: preserve the original class for the index
|
364 |
+
# Also need preserve the name of the Index
|
365 |
+
# class_obj = c.pyapi.unserialize(c.pyapi.serialize_object(typ.pyclass))
|
366 |
+
class_obj = c.pyapi.unserialize(c.pyapi.serialize_object(Index))
|
367 |
+
array_obj = c.box(typ.as_array, index.data)
|
368 |
+
if isinstance(typ.dtype, types.UnicodeCharSeq):
|
369 |
+
# We converted to numpy string dtype, convert back
|
370 |
+
# to object since _simple_new won't do that for uss
|
371 |
+
object_str_obj = c.pyapi.unserialize(c.pyapi.serialize_object("object"))
|
372 |
+
array_obj = c.pyapi.call_method(array_obj, "astype", (object_str_obj,))
|
373 |
+
c.pyapi.decref(object_str_obj)
|
374 |
+
# this is basically Index._simple_new(array_obj, name_obj) in python
|
375 |
+
index_obj = c.pyapi.call_method(class_obj, "_simple_new", (array_obj,))
|
376 |
+
index.parent = index_obj
|
377 |
+
c.builder.store(index_obj, res)
|
378 |
+
|
379 |
+
# Decrefs
|
380 |
+
c.pyapi.decref(class_obj)
|
381 |
+
c.pyapi.decref(array_obj)
|
382 |
+
return c.builder.load(res)
|
383 |
+
|
384 |
+
|
385 |
+
@box(SeriesType)
|
386 |
+
def box_series(typ, val, c):
|
387 |
+
"""
|
388 |
+
Convert a native series structure to a Series object.
|
389 |
+
"""
|
390 |
+
series = cgutils.create_struct_proxy(typ)(c.context, c.builder, value=val)
|
391 |
+
series_const_obj = c.pyapi.unserialize(c.pyapi.serialize_object(Series._from_mgr))
|
392 |
+
mgr_const_obj = c.pyapi.unserialize(
|
393 |
+
c.pyapi.serialize_object(SingleBlockManager.from_array)
|
394 |
+
)
|
395 |
+
index_obj = c.box(typ.index, series.index)
|
396 |
+
array_obj = c.box(typ.as_array, series.values)
|
397 |
+
name_obj = c.box(typ.namety, series.name)
|
398 |
+
# This is basically equivalent of
|
399 |
+
# pd.Series(data=array_obj, index=index_obj)
|
400 |
+
# To improve perf, we will construct the Series from a manager
|
401 |
+
# object to avoid checks.
|
402 |
+
# We'll also set the name attribute manually to avoid validation
|
403 |
+
mgr_obj = c.pyapi.call_function_objargs(
|
404 |
+
mgr_const_obj,
|
405 |
+
(
|
406 |
+
array_obj,
|
407 |
+
index_obj,
|
408 |
+
),
|
409 |
+
)
|
410 |
+
mgr_axes_obj = c.pyapi.object_getattr_string(mgr_obj, "axes")
|
411 |
+
# Series._constructor_from_mgr(mgr, axes)
|
412 |
+
series_obj = c.pyapi.call_function_objargs(
|
413 |
+
series_const_obj, (mgr_obj, mgr_axes_obj)
|
414 |
+
)
|
415 |
+
c.pyapi.object_setattr_string(series_obj, "_name", name_obj)
|
416 |
+
|
417 |
+
# Decrefs
|
418 |
+
c.pyapi.decref(series_const_obj)
|
419 |
+
c.pyapi.decref(mgr_axes_obj)
|
420 |
+
c.pyapi.decref(mgr_obj)
|
421 |
+
c.pyapi.decref(mgr_const_obj)
|
422 |
+
c.pyapi.decref(index_obj)
|
423 |
+
c.pyapi.decref(array_obj)
|
424 |
+
c.pyapi.decref(name_obj)
|
425 |
+
|
426 |
+
return series_obj
|
427 |
+
|
428 |
+
|
429 |
+
# Add common series reductions (e.g. mean, sum),
|
430 |
+
# and also add common binops (e.g. add, sub, mul, div)
|
431 |
+
def generate_series_reduction(ser_reduction, ser_method):
|
432 |
+
@overload_method(SeriesType, ser_reduction)
|
433 |
+
def series_reduction(series):
|
434 |
+
def series_reduction_impl(series):
|
435 |
+
return ser_method(series.values)
|
436 |
+
|
437 |
+
return series_reduction_impl
|
438 |
+
|
439 |
+
return series_reduction
|
440 |
+
|
441 |
+
|
442 |
+
def generate_series_binop(binop):
|
443 |
+
@overload(binop)
|
444 |
+
def series_binop(series1, value):
|
445 |
+
if isinstance(series1, SeriesType):
|
446 |
+
if isinstance(value, SeriesType):
|
447 |
+
|
448 |
+
def series_binop_impl(series1, series2):
|
449 |
+
# TODO: Check index matching?
|
450 |
+
return Series(
|
451 |
+
binop(series1.values, series2.values),
|
452 |
+
series1.index,
|
453 |
+
series1.name,
|
454 |
+
)
|
455 |
+
|
456 |
+
return series_binop_impl
|
457 |
+
else:
|
458 |
+
|
459 |
+
def series_binop_impl(series1, value):
|
460 |
+
return Series(
|
461 |
+
binop(series1.values, value), series1.index, series1.name
|
462 |
+
)
|
463 |
+
|
464 |
+
return series_binop_impl
|
465 |
+
|
466 |
+
return series_binop
|
467 |
+
|
468 |
+
|
469 |
+
series_reductions = [
|
470 |
+
("sum", np.sum),
|
471 |
+
("mean", np.mean),
|
472 |
+
# Disabled due to discrepancies between numba std. dev
|
473 |
+
# and pandas std. dev (no way to specify dof)
|
474 |
+
# ("std", np.std),
|
475 |
+
# ("var", np.var),
|
476 |
+
("min", np.min),
|
477 |
+
("max", np.max),
|
478 |
+
]
|
479 |
+
for reduction, reduction_method in series_reductions:
|
480 |
+
generate_series_reduction(reduction, reduction_method)
|
481 |
+
|
482 |
+
series_binops = [operator.add, operator.sub, operator.mul, operator.truediv]
|
483 |
+
|
484 |
+
for ser_binop in series_binops:
|
485 |
+
generate_series_binop(ser_binop)
|
486 |
+
|
487 |
+
|
488 |
+
# get_loc on Index
|
489 |
+
@overload_method(IndexType, "get_loc")
|
490 |
+
def index_get_loc(index, item):
|
491 |
+
def index_get_loc_impl(index, item):
|
492 |
+
# Initialize the hash table if not initialized
|
493 |
+
if len(index.hashmap) == 0:
|
494 |
+
for i, val in enumerate(index._data):
|
495 |
+
index.hashmap[val] = i
|
496 |
+
return index.hashmap[item]
|
497 |
+
|
498 |
+
return index_get_loc_impl
|
499 |
+
|
500 |
+
|
501 |
+
# Indexing for Series/Index
|
502 |
+
@overload(operator.getitem)
|
503 |
+
def series_indexing(series, item):
|
504 |
+
if isinstance(series, SeriesType):
|
505 |
+
|
506 |
+
def series_getitem(series, item):
|
507 |
+
loc = series.index.get_loc(item)
|
508 |
+
return series.iloc[loc]
|
509 |
+
|
510 |
+
return series_getitem
|
511 |
+
|
512 |
+
|
513 |
+
@overload(operator.getitem)
|
514 |
+
def index_indexing(index, idx):
|
515 |
+
if isinstance(index, IndexType):
|
516 |
+
|
517 |
+
def index_getitem(index, idx):
|
518 |
+
return index._data[idx]
|
519 |
+
|
520 |
+
return index_getitem
|
521 |
+
|
522 |
+
|
523 |
+
class IlocType(types.Type):
|
524 |
+
def __init__(self, obj_type) -> None:
|
525 |
+
self.obj_type = obj_type
|
526 |
+
name = f"iLocIndexer({obj_type})"
|
527 |
+
super().__init__(name=name)
|
528 |
+
|
529 |
+
@property
|
530 |
+
def key(self):
|
531 |
+
return self.obj_type
|
532 |
+
|
533 |
+
|
534 |
+
@typeof_impl.register(_iLocIndexer)
|
535 |
+
def typeof_iloc(val, c):
|
536 |
+
objtype = typeof_impl(val.obj, c)
|
537 |
+
return IlocType(objtype)
|
538 |
+
|
539 |
+
|
540 |
+
@type_callable(_iLocIndexer)
|
541 |
+
def type_iloc_constructor(context):
|
542 |
+
def typer(obj):
|
543 |
+
if isinstance(obj, SeriesType):
|
544 |
+
return IlocType(obj)
|
545 |
+
|
546 |
+
return typer
|
547 |
+
|
548 |
+
|
549 |
+
@lower_builtin(_iLocIndexer, SeriesType)
|
550 |
+
def iloc_constructor(context, builder, sig, args):
|
551 |
+
(obj,) = args
|
552 |
+
iloc_indexer = cgutils.create_struct_proxy(sig.return_type)(context, builder)
|
553 |
+
iloc_indexer.obj = obj
|
554 |
+
return impl_ret_borrowed(
|
555 |
+
context, builder, sig.return_type, iloc_indexer._getvalue()
|
556 |
+
)
|
557 |
+
|
558 |
+
|
559 |
+
@register_model(IlocType)
|
560 |
+
class ILocModel(models.StructModel):
|
561 |
+
def __init__(self, dmm, fe_type) -> None:
|
562 |
+
members = [("obj", fe_type.obj_type)]
|
563 |
+
models.StructModel.__init__(self, dmm, fe_type, members)
|
564 |
+
|
565 |
+
|
566 |
+
make_attribute_wrapper(IlocType, "obj", "obj")
|
567 |
+
|
568 |
+
|
569 |
+
@overload_attribute(SeriesType, "iloc")
|
570 |
+
def series_iloc(series):
|
571 |
+
def get(series):
|
572 |
+
return _iLocIndexer(series)
|
573 |
+
|
574 |
+
return get
|
575 |
+
|
576 |
+
|
577 |
+
@overload(operator.getitem)
|
578 |
+
def iloc_getitem(iloc_indexer, i):
|
579 |
+
if isinstance(iloc_indexer, IlocType):
|
580 |
+
|
581 |
+
def getitem_impl(iloc_indexer, i):
|
582 |
+
return iloc_indexer.obj.values[i]
|
583 |
+
|
584 |
+
return getitem_impl
|
llmeval-env/lib/python3.10/site-packages/pandas/core/_numba/kernels/__init__.py
ADDED
@@ -0,0 +1,27 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from pandas.core._numba.kernels.mean_ import (
|
2 |
+
grouped_mean,
|
3 |
+
sliding_mean,
|
4 |
+
)
|
5 |
+
from pandas.core._numba.kernels.min_max_ import (
|
6 |
+
grouped_min_max,
|
7 |
+
sliding_min_max,
|
8 |
+
)
|
9 |
+
from pandas.core._numba.kernels.sum_ import (
|
10 |
+
grouped_sum,
|
11 |
+
sliding_sum,
|
12 |
+
)
|
13 |
+
from pandas.core._numba.kernels.var_ import (
|
14 |
+
grouped_var,
|
15 |
+
sliding_var,
|
16 |
+
)
|
17 |
+
|
18 |
+
__all__ = [
|
19 |
+
"sliding_mean",
|
20 |
+
"grouped_mean",
|
21 |
+
"sliding_sum",
|
22 |
+
"grouped_sum",
|
23 |
+
"sliding_var",
|
24 |
+
"grouped_var",
|
25 |
+
"sliding_min_max",
|
26 |
+
"grouped_min_max",
|
27 |
+
]
|
llmeval-env/lib/python3.10/site-packages/pandas/core/_numba/kernels/__pycache__/__init__.cpython-310.pyc
ADDED
Binary file (631 Bytes). View file
|
|
llmeval-env/lib/python3.10/site-packages/pandas/core/_numba/kernels/__pycache__/mean_.cpython-310.pyc
ADDED
Binary file (3.49 kB). View file
|
|
llmeval-env/lib/python3.10/site-packages/pandas/core/_numba/kernels/__pycache__/min_max_.cpython-310.pyc
ADDED
Binary file (2.46 kB). View file
|
|
llmeval-env/lib/python3.10/site-packages/pandas/core/_numba/kernels/__pycache__/shared.cpython-310.pyc
ADDED
Binary file (790 Bytes). View file
|
|
llmeval-env/lib/python3.10/site-packages/pandas/core/_numba/kernels/__pycache__/sum_.cpython-310.pyc
ADDED
Binary file (3.82 kB). View file
|
|
llmeval-env/lib/python3.10/site-packages/pandas/core/_numba/kernels/__pycache__/var_.cpython-310.pyc
ADDED
Binary file (3.96 kB). View file
|
|
llmeval-env/lib/python3.10/site-packages/pandas/core/_numba/kernels/mean_.py
ADDED
@@ -0,0 +1,196 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
"""
|
2 |
+
Numba 1D mean kernels that can be shared by
|
3 |
+
* Dataframe / Series
|
4 |
+
* groupby
|
5 |
+
* rolling / expanding
|
6 |
+
|
7 |
+
Mirrors pandas/_libs/window/aggregation.pyx
|
8 |
+
"""
|
9 |
+
from __future__ import annotations
|
10 |
+
|
11 |
+
from typing import TYPE_CHECKING
|
12 |
+
|
13 |
+
import numba
|
14 |
+
import numpy as np
|
15 |
+
|
16 |
+
from pandas.core._numba.kernels.shared import is_monotonic_increasing
|
17 |
+
from pandas.core._numba.kernels.sum_ import grouped_kahan_sum
|
18 |
+
|
19 |
+
if TYPE_CHECKING:
|
20 |
+
from pandas._typing import npt
|
21 |
+
|
22 |
+
|
23 |
+
@numba.jit(nopython=True, nogil=True, parallel=False)
|
24 |
+
def add_mean(
|
25 |
+
val: float,
|
26 |
+
nobs: int,
|
27 |
+
sum_x: float,
|
28 |
+
neg_ct: int,
|
29 |
+
compensation: float,
|
30 |
+
num_consecutive_same_value: int,
|
31 |
+
prev_value: float,
|
32 |
+
) -> tuple[int, float, int, float, int, float]:
|
33 |
+
if not np.isnan(val):
|
34 |
+
nobs += 1
|
35 |
+
y = val - compensation
|
36 |
+
t = sum_x + y
|
37 |
+
compensation = t - sum_x - y
|
38 |
+
sum_x = t
|
39 |
+
if val < 0:
|
40 |
+
neg_ct += 1
|
41 |
+
|
42 |
+
if val == prev_value:
|
43 |
+
num_consecutive_same_value += 1
|
44 |
+
else:
|
45 |
+
num_consecutive_same_value = 1
|
46 |
+
prev_value = val
|
47 |
+
|
48 |
+
return nobs, sum_x, neg_ct, compensation, num_consecutive_same_value, prev_value
|
49 |
+
|
50 |
+
|
51 |
+
@numba.jit(nopython=True, nogil=True, parallel=False)
|
52 |
+
def remove_mean(
|
53 |
+
val: float, nobs: int, sum_x: float, neg_ct: int, compensation: float
|
54 |
+
) -> tuple[int, float, int, float]:
|
55 |
+
if not np.isnan(val):
|
56 |
+
nobs -= 1
|
57 |
+
y = -val - compensation
|
58 |
+
t = sum_x + y
|
59 |
+
compensation = t - sum_x - y
|
60 |
+
sum_x = t
|
61 |
+
if val < 0:
|
62 |
+
neg_ct -= 1
|
63 |
+
return nobs, sum_x, neg_ct, compensation
|
64 |
+
|
65 |
+
|
66 |
+
@numba.jit(nopython=True, nogil=True, parallel=False)
|
67 |
+
def sliding_mean(
|
68 |
+
values: np.ndarray,
|
69 |
+
result_dtype: np.dtype,
|
70 |
+
start: np.ndarray,
|
71 |
+
end: np.ndarray,
|
72 |
+
min_periods: int,
|
73 |
+
) -> tuple[np.ndarray, list[int]]:
|
74 |
+
N = len(start)
|
75 |
+
nobs = 0
|
76 |
+
sum_x = 0.0
|
77 |
+
neg_ct = 0
|
78 |
+
compensation_add = 0.0
|
79 |
+
compensation_remove = 0.0
|
80 |
+
|
81 |
+
is_monotonic_increasing_bounds = is_monotonic_increasing(
|
82 |
+
start
|
83 |
+
) and is_monotonic_increasing(end)
|
84 |
+
|
85 |
+
output = np.empty(N, dtype=result_dtype)
|
86 |
+
|
87 |
+
for i in range(N):
|
88 |
+
s = start[i]
|
89 |
+
e = end[i]
|
90 |
+
if i == 0 or not is_monotonic_increasing_bounds:
|
91 |
+
prev_value = values[s]
|
92 |
+
num_consecutive_same_value = 0
|
93 |
+
|
94 |
+
for j in range(s, e):
|
95 |
+
val = values[j]
|
96 |
+
(
|
97 |
+
nobs,
|
98 |
+
sum_x,
|
99 |
+
neg_ct,
|
100 |
+
compensation_add,
|
101 |
+
num_consecutive_same_value,
|
102 |
+
prev_value,
|
103 |
+
) = add_mean(
|
104 |
+
val,
|
105 |
+
nobs,
|
106 |
+
sum_x,
|
107 |
+
neg_ct,
|
108 |
+
compensation_add,
|
109 |
+
num_consecutive_same_value,
|
110 |
+
prev_value, # pyright: ignore[reportGeneralTypeIssues]
|
111 |
+
)
|
112 |
+
else:
|
113 |
+
for j in range(start[i - 1], s):
|
114 |
+
val = values[j]
|
115 |
+
nobs, sum_x, neg_ct, compensation_remove = remove_mean(
|
116 |
+
val, nobs, sum_x, neg_ct, compensation_remove
|
117 |
+
)
|
118 |
+
|
119 |
+
for j in range(end[i - 1], e):
|
120 |
+
val = values[j]
|
121 |
+
(
|
122 |
+
nobs,
|
123 |
+
sum_x,
|
124 |
+
neg_ct,
|
125 |
+
compensation_add,
|
126 |
+
num_consecutive_same_value,
|
127 |
+
prev_value,
|
128 |
+
) = add_mean(
|
129 |
+
val,
|
130 |
+
nobs,
|
131 |
+
sum_x,
|
132 |
+
neg_ct,
|
133 |
+
compensation_add,
|
134 |
+
num_consecutive_same_value,
|
135 |
+
prev_value, # pyright: ignore[reportGeneralTypeIssues]
|
136 |
+
)
|
137 |
+
|
138 |
+
if nobs >= min_periods and nobs > 0:
|
139 |
+
result = sum_x / nobs
|
140 |
+
if num_consecutive_same_value >= nobs:
|
141 |
+
result = prev_value
|
142 |
+
elif neg_ct == 0 and result < 0:
|
143 |
+
result = 0
|
144 |
+
elif neg_ct == nobs and result > 0:
|
145 |
+
result = 0
|
146 |
+
else:
|
147 |
+
result = np.nan
|
148 |
+
|
149 |
+
output[i] = result
|
150 |
+
|
151 |
+
if not is_monotonic_increasing_bounds:
|
152 |
+
nobs = 0
|
153 |
+
sum_x = 0.0
|
154 |
+
neg_ct = 0
|
155 |
+
compensation_remove = 0.0
|
156 |
+
|
157 |
+
# na_position is empty list since float64 can already hold nans
|
158 |
+
# Do list comprehension, since numba cannot figure out that na_pos is
|
159 |
+
# empty list of ints on its own
|
160 |
+
na_pos = [0 for i in range(0)]
|
161 |
+
return output, na_pos
|
162 |
+
|
163 |
+
|
164 |
+
@numba.jit(nopython=True, nogil=True, parallel=False)
|
165 |
+
def grouped_mean(
|
166 |
+
values: np.ndarray,
|
167 |
+
result_dtype: np.dtype,
|
168 |
+
labels: npt.NDArray[np.intp],
|
169 |
+
ngroups: int,
|
170 |
+
min_periods: int,
|
171 |
+
) -> tuple[np.ndarray, list[int]]:
|
172 |
+
output, nobs_arr, comp_arr, consecutive_counts, prev_vals = grouped_kahan_sum(
|
173 |
+
values, result_dtype, labels, ngroups
|
174 |
+
)
|
175 |
+
|
176 |
+
# Post-processing, replace sums that don't satisfy min_periods
|
177 |
+
for lab in range(ngroups):
|
178 |
+
nobs = nobs_arr[lab]
|
179 |
+
num_consecutive_same_value = consecutive_counts[lab]
|
180 |
+
prev_value = prev_vals[lab]
|
181 |
+
sum_x = output[lab]
|
182 |
+
if nobs >= min_periods:
|
183 |
+
if num_consecutive_same_value >= nobs:
|
184 |
+
result = prev_value * nobs
|
185 |
+
else:
|
186 |
+
result = sum_x
|
187 |
+
else:
|
188 |
+
result = np.nan
|
189 |
+
result /= nobs
|
190 |
+
output[lab] = result
|
191 |
+
|
192 |
+
# na_position is empty list since float64 can already hold nans
|
193 |
+
# Do list comprehension, since numba cannot figure out that na_pos is
|
194 |
+
# empty list of ints on its own
|
195 |
+
na_pos = [0 for i in range(0)]
|
196 |
+
return output, na_pos
|
llmeval-env/lib/python3.10/site-packages/pandas/core/_numba/kernels/min_max_.py
ADDED
@@ -0,0 +1,125 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
"""
|
2 |
+
Numba 1D min/max kernels that can be shared by
|
3 |
+
* Dataframe / Series
|
4 |
+
* groupby
|
5 |
+
* rolling / expanding
|
6 |
+
|
7 |
+
Mirrors pandas/_libs/window/aggregation.pyx
|
8 |
+
"""
|
9 |
+
from __future__ import annotations
|
10 |
+
|
11 |
+
from typing import TYPE_CHECKING
|
12 |
+
|
13 |
+
import numba
|
14 |
+
import numpy as np
|
15 |
+
|
16 |
+
if TYPE_CHECKING:
|
17 |
+
from pandas._typing import npt
|
18 |
+
|
19 |
+
|
20 |
+
@numba.jit(nopython=True, nogil=True, parallel=False)
|
21 |
+
def sliding_min_max(
|
22 |
+
values: np.ndarray,
|
23 |
+
result_dtype: np.dtype,
|
24 |
+
start: np.ndarray,
|
25 |
+
end: np.ndarray,
|
26 |
+
min_periods: int,
|
27 |
+
is_max: bool,
|
28 |
+
) -> tuple[np.ndarray, list[int]]:
|
29 |
+
N = len(start)
|
30 |
+
nobs = 0
|
31 |
+
output = np.empty(N, dtype=result_dtype)
|
32 |
+
na_pos = []
|
33 |
+
# Use deque once numba supports it
|
34 |
+
# https://github.com/numba/numba/issues/7417
|
35 |
+
Q: list = []
|
36 |
+
W: list = []
|
37 |
+
for i in range(N):
|
38 |
+
curr_win_size = end[i] - start[i]
|
39 |
+
if i == 0:
|
40 |
+
st = start[i]
|
41 |
+
else:
|
42 |
+
st = end[i - 1]
|
43 |
+
|
44 |
+
for k in range(st, end[i]):
|
45 |
+
ai = values[k]
|
46 |
+
if not np.isnan(ai):
|
47 |
+
nobs += 1
|
48 |
+
elif is_max:
|
49 |
+
ai = -np.inf
|
50 |
+
else:
|
51 |
+
ai = np.inf
|
52 |
+
# Discard previous entries if we find new min or max
|
53 |
+
if is_max:
|
54 |
+
while Q and ((ai >= values[Q[-1]]) or values[Q[-1]] != values[Q[-1]]):
|
55 |
+
Q.pop()
|
56 |
+
else:
|
57 |
+
while Q and ((ai <= values[Q[-1]]) or values[Q[-1]] != values[Q[-1]]):
|
58 |
+
Q.pop()
|
59 |
+
Q.append(k)
|
60 |
+
W.append(k)
|
61 |
+
|
62 |
+
# Discard entries outside and left of current window
|
63 |
+
while Q and Q[0] <= start[i] - 1:
|
64 |
+
Q.pop(0)
|
65 |
+
while W and W[0] <= start[i] - 1:
|
66 |
+
if not np.isnan(values[W[0]]):
|
67 |
+
nobs -= 1
|
68 |
+
W.pop(0)
|
69 |
+
|
70 |
+
# Save output based on index in input value array
|
71 |
+
if Q and curr_win_size > 0 and nobs >= min_periods:
|
72 |
+
output[i] = values[Q[0]]
|
73 |
+
else:
|
74 |
+
if values.dtype.kind != "i":
|
75 |
+
output[i] = np.nan
|
76 |
+
else:
|
77 |
+
na_pos.append(i)
|
78 |
+
|
79 |
+
return output, na_pos
|
80 |
+
|
81 |
+
|
82 |
+
@numba.jit(nopython=True, nogil=True, parallel=False)
|
83 |
+
def grouped_min_max(
|
84 |
+
values: np.ndarray,
|
85 |
+
result_dtype: np.dtype,
|
86 |
+
labels: npt.NDArray[np.intp],
|
87 |
+
ngroups: int,
|
88 |
+
min_periods: int,
|
89 |
+
is_max: bool,
|
90 |
+
) -> tuple[np.ndarray, list[int]]:
|
91 |
+
N = len(labels)
|
92 |
+
nobs = np.zeros(ngroups, dtype=np.int64)
|
93 |
+
na_pos = []
|
94 |
+
output = np.empty(ngroups, dtype=result_dtype)
|
95 |
+
|
96 |
+
for i in range(N):
|
97 |
+
lab = labels[i]
|
98 |
+
val = values[i]
|
99 |
+
if lab < 0:
|
100 |
+
continue
|
101 |
+
|
102 |
+
if values.dtype.kind == "i" or not np.isnan(val):
|
103 |
+
nobs[lab] += 1
|
104 |
+
else:
|
105 |
+
# NaN value cannot be a min/max value
|
106 |
+
continue
|
107 |
+
|
108 |
+
if nobs[lab] == 1:
|
109 |
+
# First element in group, set output equal to this
|
110 |
+
output[lab] = val
|
111 |
+
continue
|
112 |
+
|
113 |
+
if is_max:
|
114 |
+
if val > output[lab]:
|
115 |
+
output[lab] = val
|
116 |
+
else:
|
117 |
+
if val < output[lab]:
|
118 |
+
output[lab] = val
|
119 |
+
|
120 |
+
# Set labels that don't satisfy min_periods as np.nan
|
121 |
+
for lab, count in enumerate(nobs):
|
122 |
+
if count < min_periods:
|
123 |
+
na_pos.append(lab)
|
124 |
+
|
125 |
+
return output, na_pos
|
llmeval-env/lib/python3.10/site-packages/pandas/core/_numba/kernels/shared.py
ADDED
@@ -0,0 +1,29 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from __future__ import annotations
|
2 |
+
|
3 |
+
from typing import TYPE_CHECKING
|
4 |
+
|
5 |
+
import numba
|
6 |
+
|
7 |
+
if TYPE_CHECKING:
|
8 |
+
import numpy as np
|
9 |
+
|
10 |
+
|
11 |
+
@numba.jit(
|
12 |
+
# error: Any? not callable
|
13 |
+
numba.boolean(numba.int64[:]), # type: ignore[misc]
|
14 |
+
nopython=True,
|
15 |
+
nogil=True,
|
16 |
+
parallel=False,
|
17 |
+
)
|
18 |
+
def is_monotonic_increasing(bounds: np.ndarray) -> bool:
|
19 |
+
"""Check if int64 values are monotonically increasing."""
|
20 |
+
n = len(bounds)
|
21 |
+
if n < 2:
|
22 |
+
return True
|
23 |
+
prev = bounds[0]
|
24 |
+
for i in range(1, n):
|
25 |
+
cur = bounds[i]
|
26 |
+
if cur < prev:
|
27 |
+
return False
|
28 |
+
prev = cur
|
29 |
+
return True
|
llmeval-env/lib/python3.10/site-packages/pandas/core/_numba/kernels/sum_.py
ADDED
@@ -0,0 +1,244 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
"""
|
2 |
+
Numba 1D sum kernels that can be shared by
|
3 |
+
* Dataframe / Series
|
4 |
+
* groupby
|
5 |
+
* rolling / expanding
|
6 |
+
|
7 |
+
Mirrors pandas/_libs/window/aggregation.pyx
|
8 |
+
"""
|
9 |
+
from __future__ import annotations
|
10 |
+
|
11 |
+
from typing import (
|
12 |
+
TYPE_CHECKING,
|
13 |
+
Any,
|
14 |
+
)
|
15 |
+
|
16 |
+
import numba
|
17 |
+
from numba.extending import register_jitable
|
18 |
+
import numpy as np
|
19 |
+
|
20 |
+
if TYPE_CHECKING:
|
21 |
+
from pandas._typing import npt
|
22 |
+
|
23 |
+
from pandas.core._numba.kernels.shared import is_monotonic_increasing
|
24 |
+
|
25 |
+
|
26 |
+
@numba.jit(nopython=True, nogil=True, parallel=False)
|
27 |
+
def add_sum(
|
28 |
+
val: Any,
|
29 |
+
nobs: int,
|
30 |
+
sum_x: Any,
|
31 |
+
compensation: Any,
|
32 |
+
num_consecutive_same_value: int,
|
33 |
+
prev_value: Any,
|
34 |
+
) -> tuple[int, Any, Any, int, Any]:
|
35 |
+
if not np.isnan(val):
|
36 |
+
nobs += 1
|
37 |
+
y = val - compensation
|
38 |
+
t = sum_x + y
|
39 |
+
compensation = t - sum_x - y
|
40 |
+
sum_x = t
|
41 |
+
|
42 |
+
if val == prev_value:
|
43 |
+
num_consecutive_same_value += 1
|
44 |
+
else:
|
45 |
+
num_consecutive_same_value = 1
|
46 |
+
prev_value = val
|
47 |
+
|
48 |
+
return nobs, sum_x, compensation, num_consecutive_same_value, prev_value
|
49 |
+
|
50 |
+
|
51 |
+
@numba.jit(nopython=True, nogil=True, parallel=False)
|
52 |
+
def remove_sum(
|
53 |
+
val: Any, nobs: int, sum_x: Any, compensation: Any
|
54 |
+
) -> tuple[int, Any, Any]:
|
55 |
+
if not np.isnan(val):
|
56 |
+
nobs -= 1
|
57 |
+
y = -val - compensation
|
58 |
+
t = sum_x + y
|
59 |
+
compensation = t - sum_x - y
|
60 |
+
sum_x = t
|
61 |
+
return nobs, sum_x, compensation
|
62 |
+
|
63 |
+
|
64 |
+
@numba.jit(nopython=True, nogil=True, parallel=False)
|
65 |
+
def sliding_sum(
|
66 |
+
values: np.ndarray,
|
67 |
+
result_dtype: np.dtype,
|
68 |
+
start: np.ndarray,
|
69 |
+
end: np.ndarray,
|
70 |
+
min_periods: int,
|
71 |
+
) -> tuple[np.ndarray, list[int]]:
|
72 |
+
dtype = values.dtype
|
73 |
+
|
74 |
+
na_val: object = np.nan
|
75 |
+
if dtype.kind == "i":
|
76 |
+
na_val = 0
|
77 |
+
|
78 |
+
N = len(start)
|
79 |
+
nobs = 0
|
80 |
+
sum_x = 0
|
81 |
+
compensation_add = 0
|
82 |
+
compensation_remove = 0
|
83 |
+
na_pos = []
|
84 |
+
|
85 |
+
is_monotonic_increasing_bounds = is_monotonic_increasing(
|
86 |
+
start
|
87 |
+
) and is_monotonic_increasing(end)
|
88 |
+
|
89 |
+
output = np.empty(N, dtype=result_dtype)
|
90 |
+
|
91 |
+
for i in range(N):
|
92 |
+
s = start[i]
|
93 |
+
e = end[i]
|
94 |
+
if i == 0 or not is_monotonic_increasing_bounds:
|
95 |
+
prev_value = values[s]
|
96 |
+
num_consecutive_same_value = 0
|
97 |
+
|
98 |
+
for j in range(s, e):
|
99 |
+
val = values[j]
|
100 |
+
(
|
101 |
+
nobs,
|
102 |
+
sum_x,
|
103 |
+
compensation_add,
|
104 |
+
num_consecutive_same_value,
|
105 |
+
prev_value,
|
106 |
+
) = add_sum(
|
107 |
+
val,
|
108 |
+
nobs,
|
109 |
+
sum_x,
|
110 |
+
compensation_add,
|
111 |
+
num_consecutive_same_value,
|
112 |
+
prev_value,
|
113 |
+
)
|
114 |
+
else:
|
115 |
+
for j in range(start[i - 1], s):
|
116 |
+
val = values[j]
|
117 |
+
nobs, sum_x, compensation_remove = remove_sum(
|
118 |
+
val, nobs, sum_x, compensation_remove
|
119 |
+
)
|
120 |
+
|
121 |
+
for j in range(end[i - 1], e):
|
122 |
+
val = values[j]
|
123 |
+
(
|
124 |
+
nobs,
|
125 |
+
sum_x,
|
126 |
+
compensation_add,
|
127 |
+
num_consecutive_same_value,
|
128 |
+
prev_value,
|
129 |
+
) = add_sum(
|
130 |
+
val,
|
131 |
+
nobs,
|
132 |
+
sum_x,
|
133 |
+
compensation_add,
|
134 |
+
num_consecutive_same_value,
|
135 |
+
prev_value,
|
136 |
+
)
|
137 |
+
|
138 |
+
if nobs == 0 == min_periods:
|
139 |
+
result: object = 0
|
140 |
+
elif nobs >= min_periods:
|
141 |
+
if num_consecutive_same_value >= nobs:
|
142 |
+
result = prev_value * nobs
|
143 |
+
else:
|
144 |
+
result = sum_x
|
145 |
+
else:
|
146 |
+
result = na_val
|
147 |
+
if dtype.kind == "i":
|
148 |
+
na_pos.append(i)
|
149 |
+
|
150 |
+
output[i] = result
|
151 |
+
|
152 |
+
if not is_monotonic_increasing_bounds:
|
153 |
+
nobs = 0
|
154 |
+
sum_x = 0
|
155 |
+
compensation_remove = 0
|
156 |
+
|
157 |
+
return output, na_pos
|
158 |
+
|
159 |
+
|
160 |
+
# Mypy/pyright don't like the fact that the decorator is untyped
|
161 |
+
@register_jitable # type: ignore[misc]
|
162 |
+
def grouped_kahan_sum(
|
163 |
+
values: np.ndarray,
|
164 |
+
result_dtype: np.dtype,
|
165 |
+
labels: npt.NDArray[np.intp],
|
166 |
+
ngroups: int,
|
167 |
+
) -> tuple[
|
168 |
+
np.ndarray, npt.NDArray[np.int64], np.ndarray, npt.NDArray[np.int64], np.ndarray
|
169 |
+
]:
|
170 |
+
N = len(labels)
|
171 |
+
|
172 |
+
nobs_arr = np.zeros(ngroups, dtype=np.int64)
|
173 |
+
comp_arr = np.zeros(ngroups, dtype=values.dtype)
|
174 |
+
consecutive_counts = np.zeros(ngroups, dtype=np.int64)
|
175 |
+
prev_vals = np.zeros(ngroups, dtype=values.dtype)
|
176 |
+
output = np.zeros(ngroups, dtype=result_dtype)
|
177 |
+
|
178 |
+
for i in range(N):
|
179 |
+
lab = labels[i]
|
180 |
+
val = values[i]
|
181 |
+
|
182 |
+
if lab < 0:
|
183 |
+
continue
|
184 |
+
|
185 |
+
sum_x = output[lab]
|
186 |
+
nobs = nobs_arr[lab]
|
187 |
+
compensation_add = comp_arr[lab]
|
188 |
+
num_consecutive_same_value = consecutive_counts[lab]
|
189 |
+
prev_value = prev_vals[lab]
|
190 |
+
|
191 |
+
(
|
192 |
+
nobs,
|
193 |
+
sum_x,
|
194 |
+
compensation_add,
|
195 |
+
num_consecutive_same_value,
|
196 |
+
prev_value,
|
197 |
+
) = add_sum(
|
198 |
+
val,
|
199 |
+
nobs,
|
200 |
+
sum_x,
|
201 |
+
compensation_add,
|
202 |
+
num_consecutive_same_value,
|
203 |
+
prev_value,
|
204 |
+
)
|
205 |
+
|
206 |
+
output[lab] = sum_x
|
207 |
+
consecutive_counts[lab] = num_consecutive_same_value
|
208 |
+
prev_vals[lab] = prev_value
|
209 |
+
comp_arr[lab] = compensation_add
|
210 |
+
nobs_arr[lab] = nobs
|
211 |
+
return output, nobs_arr, comp_arr, consecutive_counts, prev_vals
|
212 |
+
|
213 |
+
|
214 |
+
@numba.jit(nopython=True, nogil=True, parallel=False)
|
215 |
+
def grouped_sum(
|
216 |
+
values: np.ndarray,
|
217 |
+
result_dtype: np.dtype,
|
218 |
+
labels: npt.NDArray[np.intp],
|
219 |
+
ngroups: int,
|
220 |
+
min_periods: int,
|
221 |
+
) -> tuple[np.ndarray, list[int]]:
|
222 |
+
na_pos = []
|
223 |
+
|
224 |
+
output, nobs_arr, comp_arr, consecutive_counts, prev_vals = grouped_kahan_sum(
|
225 |
+
values, result_dtype, labels, ngroups
|
226 |
+
)
|
227 |
+
|
228 |
+
# Post-processing, replace sums that don't satisfy min_periods
|
229 |
+
for lab in range(ngroups):
|
230 |
+
nobs = nobs_arr[lab]
|
231 |
+
num_consecutive_same_value = consecutive_counts[lab]
|
232 |
+
prev_value = prev_vals[lab]
|
233 |
+
sum_x = output[lab]
|
234 |
+
if nobs >= min_periods:
|
235 |
+
if num_consecutive_same_value >= nobs:
|
236 |
+
result = prev_value * nobs
|
237 |
+
else:
|
238 |
+
result = sum_x
|
239 |
+
else:
|
240 |
+
result = sum_x # Don't change val, will be replaced by nan later
|
241 |
+
na_pos.append(lab)
|
242 |
+
output[lab] = result
|
243 |
+
|
244 |
+
return output, na_pos
|
llmeval-env/lib/python3.10/site-packages/pandas/core/_numba/kernels/var_.py
ADDED
@@ -0,0 +1,245 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
"""
|
2 |
+
Numba 1D var kernels that can be shared by
|
3 |
+
* Dataframe / Series
|
4 |
+
* groupby
|
5 |
+
* rolling / expanding
|
6 |
+
|
7 |
+
Mirrors pandas/_libs/window/aggregation.pyx
|
8 |
+
"""
|
9 |
+
from __future__ import annotations
|
10 |
+
|
11 |
+
from typing import TYPE_CHECKING
|
12 |
+
|
13 |
+
import numba
|
14 |
+
import numpy as np
|
15 |
+
|
16 |
+
if TYPE_CHECKING:
|
17 |
+
from pandas._typing import npt
|
18 |
+
|
19 |
+
from pandas.core._numba.kernels.shared import is_monotonic_increasing
|
20 |
+
|
21 |
+
|
22 |
+
@numba.jit(nopython=True, nogil=True, parallel=False)
|
23 |
+
def add_var(
|
24 |
+
val: float,
|
25 |
+
nobs: int,
|
26 |
+
mean_x: float,
|
27 |
+
ssqdm_x: float,
|
28 |
+
compensation: float,
|
29 |
+
num_consecutive_same_value: int,
|
30 |
+
prev_value: float,
|
31 |
+
) -> tuple[int, float, float, float, int, float]:
|
32 |
+
if not np.isnan(val):
|
33 |
+
if val == prev_value:
|
34 |
+
num_consecutive_same_value += 1
|
35 |
+
else:
|
36 |
+
num_consecutive_same_value = 1
|
37 |
+
prev_value = val
|
38 |
+
|
39 |
+
nobs += 1
|
40 |
+
prev_mean = mean_x - compensation
|
41 |
+
y = val - compensation
|
42 |
+
t = y - mean_x
|
43 |
+
compensation = t + mean_x - y
|
44 |
+
delta = t
|
45 |
+
if nobs:
|
46 |
+
mean_x += delta / nobs
|
47 |
+
else:
|
48 |
+
mean_x = 0
|
49 |
+
ssqdm_x += (val - prev_mean) * (val - mean_x)
|
50 |
+
return nobs, mean_x, ssqdm_x, compensation, num_consecutive_same_value, prev_value
|
51 |
+
|
52 |
+
|
53 |
+
@numba.jit(nopython=True, nogil=True, parallel=False)
|
54 |
+
def remove_var(
|
55 |
+
val: float, nobs: int, mean_x: float, ssqdm_x: float, compensation: float
|
56 |
+
) -> tuple[int, float, float, float]:
|
57 |
+
if not np.isnan(val):
|
58 |
+
nobs -= 1
|
59 |
+
if nobs:
|
60 |
+
prev_mean = mean_x - compensation
|
61 |
+
y = val - compensation
|
62 |
+
t = y - mean_x
|
63 |
+
compensation = t + mean_x - y
|
64 |
+
delta = t
|
65 |
+
mean_x -= delta / nobs
|
66 |
+
ssqdm_x -= (val - prev_mean) * (val - mean_x)
|
67 |
+
else:
|
68 |
+
mean_x = 0
|
69 |
+
ssqdm_x = 0
|
70 |
+
return nobs, mean_x, ssqdm_x, compensation
|
71 |
+
|
72 |
+
|
73 |
+
@numba.jit(nopython=True, nogil=True, parallel=False)
|
74 |
+
def sliding_var(
|
75 |
+
values: np.ndarray,
|
76 |
+
result_dtype: np.dtype,
|
77 |
+
start: np.ndarray,
|
78 |
+
end: np.ndarray,
|
79 |
+
min_periods: int,
|
80 |
+
ddof: int = 1,
|
81 |
+
) -> tuple[np.ndarray, list[int]]:
|
82 |
+
N = len(start)
|
83 |
+
nobs = 0
|
84 |
+
mean_x = 0.0
|
85 |
+
ssqdm_x = 0.0
|
86 |
+
compensation_add = 0.0
|
87 |
+
compensation_remove = 0.0
|
88 |
+
|
89 |
+
min_periods = max(min_periods, 1)
|
90 |
+
is_monotonic_increasing_bounds = is_monotonic_increasing(
|
91 |
+
start
|
92 |
+
) and is_monotonic_increasing(end)
|
93 |
+
|
94 |
+
output = np.empty(N, dtype=result_dtype)
|
95 |
+
|
96 |
+
for i in range(N):
|
97 |
+
s = start[i]
|
98 |
+
e = end[i]
|
99 |
+
if i == 0 or not is_monotonic_increasing_bounds:
|
100 |
+
prev_value = values[s]
|
101 |
+
num_consecutive_same_value = 0
|
102 |
+
|
103 |
+
for j in range(s, e):
|
104 |
+
val = values[j]
|
105 |
+
(
|
106 |
+
nobs,
|
107 |
+
mean_x,
|
108 |
+
ssqdm_x,
|
109 |
+
compensation_add,
|
110 |
+
num_consecutive_same_value,
|
111 |
+
prev_value,
|
112 |
+
) = add_var(
|
113 |
+
val,
|
114 |
+
nobs,
|
115 |
+
mean_x,
|
116 |
+
ssqdm_x,
|
117 |
+
compensation_add,
|
118 |
+
num_consecutive_same_value,
|
119 |
+
prev_value,
|
120 |
+
)
|
121 |
+
else:
|
122 |
+
for j in range(start[i - 1], s):
|
123 |
+
val = values[j]
|
124 |
+
nobs, mean_x, ssqdm_x, compensation_remove = remove_var(
|
125 |
+
val, nobs, mean_x, ssqdm_x, compensation_remove
|
126 |
+
)
|
127 |
+
|
128 |
+
for j in range(end[i - 1], e):
|
129 |
+
val = values[j]
|
130 |
+
(
|
131 |
+
nobs,
|
132 |
+
mean_x,
|
133 |
+
ssqdm_x,
|
134 |
+
compensation_add,
|
135 |
+
num_consecutive_same_value,
|
136 |
+
prev_value,
|
137 |
+
) = add_var(
|
138 |
+
val,
|
139 |
+
nobs,
|
140 |
+
mean_x,
|
141 |
+
ssqdm_x,
|
142 |
+
compensation_add,
|
143 |
+
num_consecutive_same_value,
|
144 |
+
prev_value,
|
145 |
+
)
|
146 |
+
|
147 |
+
if nobs >= min_periods and nobs > ddof:
|
148 |
+
if nobs == 1 or num_consecutive_same_value >= nobs:
|
149 |
+
result = 0.0
|
150 |
+
else:
|
151 |
+
result = ssqdm_x / (nobs - ddof)
|
152 |
+
else:
|
153 |
+
result = np.nan
|
154 |
+
|
155 |
+
output[i] = result
|
156 |
+
|
157 |
+
if not is_monotonic_increasing_bounds:
|
158 |
+
nobs = 0
|
159 |
+
mean_x = 0.0
|
160 |
+
ssqdm_x = 0.0
|
161 |
+
compensation_remove = 0.0
|
162 |
+
|
163 |
+
# na_position is empty list since float64 can already hold nans
|
164 |
+
# Do list comprehension, since numba cannot figure out that na_pos is
|
165 |
+
# empty list of ints on its own
|
166 |
+
na_pos = [0 for i in range(0)]
|
167 |
+
return output, na_pos
|
168 |
+
|
169 |
+
|
170 |
+
@numba.jit(nopython=True, nogil=True, parallel=False)
|
171 |
+
def grouped_var(
|
172 |
+
values: np.ndarray,
|
173 |
+
result_dtype: np.dtype,
|
174 |
+
labels: npt.NDArray[np.intp],
|
175 |
+
ngroups: int,
|
176 |
+
min_periods: int,
|
177 |
+
ddof: int = 1,
|
178 |
+
) -> tuple[np.ndarray, list[int]]:
|
179 |
+
N = len(labels)
|
180 |
+
|
181 |
+
nobs_arr = np.zeros(ngroups, dtype=np.int64)
|
182 |
+
comp_arr = np.zeros(ngroups, dtype=values.dtype)
|
183 |
+
consecutive_counts = np.zeros(ngroups, dtype=np.int64)
|
184 |
+
prev_vals = np.zeros(ngroups, dtype=values.dtype)
|
185 |
+
output = np.zeros(ngroups, dtype=result_dtype)
|
186 |
+
means = np.zeros(ngroups, dtype=result_dtype)
|
187 |
+
|
188 |
+
for i in range(N):
|
189 |
+
lab = labels[i]
|
190 |
+
val = values[i]
|
191 |
+
|
192 |
+
if lab < 0:
|
193 |
+
continue
|
194 |
+
|
195 |
+
mean_x = means[lab]
|
196 |
+
ssqdm_x = output[lab]
|
197 |
+
nobs = nobs_arr[lab]
|
198 |
+
compensation_add = comp_arr[lab]
|
199 |
+
num_consecutive_same_value = consecutive_counts[lab]
|
200 |
+
prev_value = prev_vals[lab]
|
201 |
+
|
202 |
+
(
|
203 |
+
nobs,
|
204 |
+
mean_x,
|
205 |
+
ssqdm_x,
|
206 |
+
compensation_add,
|
207 |
+
num_consecutive_same_value,
|
208 |
+
prev_value,
|
209 |
+
) = add_var(
|
210 |
+
val,
|
211 |
+
nobs,
|
212 |
+
mean_x,
|
213 |
+
ssqdm_x,
|
214 |
+
compensation_add,
|
215 |
+
num_consecutive_same_value,
|
216 |
+
prev_value,
|
217 |
+
)
|
218 |
+
|
219 |
+
output[lab] = ssqdm_x
|
220 |
+
means[lab] = mean_x
|
221 |
+
consecutive_counts[lab] = num_consecutive_same_value
|
222 |
+
prev_vals[lab] = prev_value
|
223 |
+
comp_arr[lab] = compensation_add
|
224 |
+
nobs_arr[lab] = nobs
|
225 |
+
|
226 |
+
# Post-processing, replace vars that don't satisfy min_periods
|
227 |
+
for lab in range(ngroups):
|
228 |
+
nobs = nobs_arr[lab]
|
229 |
+
num_consecutive_same_value = consecutive_counts[lab]
|
230 |
+
ssqdm_x = output[lab]
|
231 |
+
if nobs >= min_periods and nobs > ddof:
|
232 |
+
if nobs == 1 or num_consecutive_same_value >= nobs:
|
233 |
+
result = 0.0
|
234 |
+
else:
|
235 |
+
result = ssqdm_x / (nobs - ddof)
|
236 |
+
else:
|
237 |
+
result = np.nan
|
238 |
+
output[lab] = result
|
239 |
+
|
240 |
+
# Second pass to get the std.dev
|
241 |
+
# na_position is empty list since float64 can already hold nans
|
242 |
+
# Do list comprehension, since numba cannot figure out that na_pos is
|
243 |
+
# empty list of ints on its own
|
244 |
+
na_pos = [0 for i in range(0)]
|
245 |
+
return output, na_pos
|
llmeval-env/lib/python3.10/site-packages/pandas/core/dtypes/__pycache__/base.cpython-310.pyc
ADDED
Binary file (18.5 kB). View file
|
|
llmeval-env/lib/python3.10/site-packages/pandas/core/dtypes/__pycache__/cast.cpython-310.pyc
ADDED
Binary file (39.1 kB). View file
|
|
llmeval-env/lib/python3.10/site-packages/pandas/core/dtypes/__pycache__/common.cpython-310.pyc
ADDED
Binary file (42.1 kB). View file
|
|
llmeval-env/lib/python3.10/site-packages/pandas/core/dtypes/__pycache__/concat.cpython-310.pyc
ADDED
Binary file (10.7 kB). View file
|
|
llmeval-env/lib/python3.10/site-packages/pandas/core/dtypes/__pycache__/generic.cpython-310.pyc
ADDED
Binary file (3.23 kB). View file
|
|
llmeval-env/lib/python3.10/site-packages/pandas/core/dtypes/__pycache__/inference.cpython-310.pyc
ADDED
Binary file (9.55 kB). View file
|
|
llmeval-env/lib/python3.10/site-packages/pandas/core/dtypes/__pycache__/missing.cpython-310.pyc
ADDED
Binary file (19.5 kB). View file
|
|
llmeval-env/lib/python3.10/site-packages/pandas/core/groupby/__init__.py
ADDED
@@ -0,0 +1,15 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from pandas.core.groupby.generic import (
|
2 |
+
DataFrameGroupBy,
|
3 |
+
NamedAgg,
|
4 |
+
SeriesGroupBy,
|
5 |
+
)
|
6 |
+
from pandas.core.groupby.groupby import GroupBy
|
7 |
+
from pandas.core.groupby.grouper import Grouper
|
8 |
+
|
9 |
+
__all__ = [
|
10 |
+
"DataFrameGroupBy",
|
11 |
+
"NamedAgg",
|
12 |
+
"SeriesGroupBy",
|
13 |
+
"GroupBy",
|
14 |
+
"Grouper",
|
15 |
+
]
|
llmeval-env/lib/python3.10/site-packages/pandas/core/groupby/__pycache__/__init__.cpython-310.pyc
ADDED
Binary file (467 Bytes). View file
|
|
llmeval-env/lib/python3.10/site-packages/pandas/core/groupby/__pycache__/grouper.cpython-310.pyc
ADDED
Binary file (26.3 kB). View file
|
|
llmeval-env/lib/python3.10/site-packages/pandas/core/groupby/__pycache__/numba_.cpython-310.pyc
ADDED
Binary file (4.52 kB). View file
|
|
llmeval-env/lib/python3.10/site-packages/pandas/core/groupby/__pycache__/ops.cpython-310.pyc
ADDED
Binary file (29 kB). View file
|
|
llmeval-env/lib/python3.10/site-packages/pandas/core/groupby/base.py
ADDED
@@ -0,0 +1,121 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
"""
|
2 |
+
Provide basic components for groupby.
|
3 |
+
"""
|
4 |
+
from __future__ import annotations
|
5 |
+
|
6 |
+
import dataclasses
|
7 |
+
from typing import TYPE_CHECKING
|
8 |
+
|
9 |
+
if TYPE_CHECKING:
|
10 |
+
from collections.abc import Hashable
|
11 |
+
|
12 |
+
|
13 |
+
@dataclasses.dataclass(order=True, frozen=True)
|
14 |
+
class OutputKey:
|
15 |
+
label: Hashable
|
16 |
+
position: int
|
17 |
+
|
18 |
+
|
19 |
+
# special case to prevent duplicate plots when catching exceptions when
|
20 |
+
# forwarding methods from NDFrames
|
21 |
+
plotting_methods = frozenset(["plot", "hist"])
|
22 |
+
|
23 |
+
# cythonized transformations or canned "agg+broadcast", which do not
|
24 |
+
# require postprocessing of the result by transform.
|
25 |
+
cythonized_kernels = frozenset(["cumprod", "cumsum", "shift", "cummin", "cummax"])
|
26 |
+
|
27 |
+
# List of aggregation/reduction functions.
|
28 |
+
# These map each group to a single numeric value
|
29 |
+
reduction_kernels = frozenset(
|
30 |
+
[
|
31 |
+
"all",
|
32 |
+
"any",
|
33 |
+
"corrwith",
|
34 |
+
"count",
|
35 |
+
"first",
|
36 |
+
"idxmax",
|
37 |
+
"idxmin",
|
38 |
+
"last",
|
39 |
+
"max",
|
40 |
+
"mean",
|
41 |
+
"median",
|
42 |
+
"min",
|
43 |
+
"nunique",
|
44 |
+
"prod",
|
45 |
+
# as long as `quantile`'s signature accepts only
|
46 |
+
# a single quantile value, it's a reduction.
|
47 |
+
# GH#27526 might change that.
|
48 |
+
"quantile",
|
49 |
+
"sem",
|
50 |
+
"size",
|
51 |
+
"skew",
|
52 |
+
"std",
|
53 |
+
"sum",
|
54 |
+
"var",
|
55 |
+
]
|
56 |
+
)
|
57 |
+
|
58 |
+
# List of transformation functions.
|
59 |
+
# a transformation is a function that, for each group,
|
60 |
+
# produces a result that has the same shape as the group.
|
61 |
+
|
62 |
+
|
63 |
+
transformation_kernels = frozenset(
|
64 |
+
[
|
65 |
+
"bfill",
|
66 |
+
"cumcount",
|
67 |
+
"cummax",
|
68 |
+
"cummin",
|
69 |
+
"cumprod",
|
70 |
+
"cumsum",
|
71 |
+
"diff",
|
72 |
+
"ffill",
|
73 |
+
"fillna",
|
74 |
+
"ngroup",
|
75 |
+
"pct_change",
|
76 |
+
"rank",
|
77 |
+
"shift",
|
78 |
+
]
|
79 |
+
)
|
80 |
+
|
81 |
+
# these are all the public methods on Grouper which don't belong
|
82 |
+
# in either of the above lists
|
83 |
+
groupby_other_methods = frozenset(
|
84 |
+
[
|
85 |
+
"agg",
|
86 |
+
"aggregate",
|
87 |
+
"apply",
|
88 |
+
"boxplot",
|
89 |
+
# corr and cov return ngroups*ncolumns rows, so they
|
90 |
+
# are neither a transformation nor a reduction
|
91 |
+
"corr",
|
92 |
+
"cov",
|
93 |
+
"describe",
|
94 |
+
"dtypes",
|
95 |
+
"expanding",
|
96 |
+
"ewm",
|
97 |
+
"filter",
|
98 |
+
"get_group",
|
99 |
+
"groups",
|
100 |
+
"head",
|
101 |
+
"hist",
|
102 |
+
"indices",
|
103 |
+
"ndim",
|
104 |
+
"ngroups",
|
105 |
+
"nth",
|
106 |
+
"ohlc",
|
107 |
+
"pipe",
|
108 |
+
"plot",
|
109 |
+
"resample",
|
110 |
+
"rolling",
|
111 |
+
"tail",
|
112 |
+
"take",
|
113 |
+
"transform",
|
114 |
+
"sample",
|
115 |
+
"value_counts",
|
116 |
+
]
|
117 |
+
)
|
118 |
+
# Valid values of `name` for `groupby.transform(name)`
|
119 |
+
# NOTE: do NOT edit this directly. New additions should be inserted
|
120 |
+
# into the appropriate list above.
|
121 |
+
transform_kernel_allowlist = reduction_kernels | transformation_kernels
|
llmeval-env/lib/python3.10/site-packages/pandas/core/groupby/categorical.py
ADDED
@@ -0,0 +1,87 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from __future__ import annotations
|
2 |
+
|
3 |
+
import numpy as np
|
4 |
+
|
5 |
+
from pandas.core.algorithms import unique1d
|
6 |
+
from pandas.core.arrays.categorical import (
|
7 |
+
Categorical,
|
8 |
+
CategoricalDtype,
|
9 |
+
recode_for_categories,
|
10 |
+
)
|
11 |
+
|
12 |
+
|
13 |
+
def recode_for_groupby(
|
14 |
+
c: Categorical, sort: bool, observed: bool
|
15 |
+
) -> tuple[Categorical, Categorical | None]:
|
16 |
+
"""
|
17 |
+
Code the categories to ensure we can groupby for categoricals.
|
18 |
+
|
19 |
+
If observed=True, we return a new Categorical with the observed
|
20 |
+
categories only.
|
21 |
+
|
22 |
+
If sort=False, return a copy of self, coded with categories as
|
23 |
+
returned by .unique(), followed by any categories not appearing in
|
24 |
+
the data. If sort=True, return self.
|
25 |
+
|
26 |
+
This method is needed solely to ensure the categorical index of the
|
27 |
+
GroupBy result has categories in the order of appearance in the data
|
28 |
+
(GH-8868).
|
29 |
+
|
30 |
+
Parameters
|
31 |
+
----------
|
32 |
+
c : Categorical
|
33 |
+
sort : bool
|
34 |
+
The value of the sort parameter groupby was called with.
|
35 |
+
observed : bool
|
36 |
+
Account only for the observed values
|
37 |
+
|
38 |
+
Returns
|
39 |
+
-------
|
40 |
+
Categorical
|
41 |
+
If sort=False, the new categories are set to the order of
|
42 |
+
appearance in codes (unless ordered=True, in which case the
|
43 |
+
original order is preserved), followed by any unrepresented
|
44 |
+
categories in the original order.
|
45 |
+
Categorical or None
|
46 |
+
If we are observed, return the original categorical, otherwise None
|
47 |
+
"""
|
48 |
+
# we only care about observed values
|
49 |
+
if observed:
|
50 |
+
# In cases with c.ordered, this is equivalent to
|
51 |
+
# return c.remove_unused_categories(), c
|
52 |
+
|
53 |
+
unique_codes = unique1d(c.codes)
|
54 |
+
|
55 |
+
take_codes = unique_codes[unique_codes != -1]
|
56 |
+
if sort:
|
57 |
+
take_codes = np.sort(take_codes)
|
58 |
+
|
59 |
+
# we recode according to the uniques
|
60 |
+
categories = c.categories.take(take_codes)
|
61 |
+
codes = recode_for_categories(c.codes, c.categories, categories)
|
62 |
+
|
63 |
+
# return a new categorical that maps our new codes
|
64 |
+
# and categories
|
65 |
+
dtype = CategoricalDtype(categories, ordered=c.ordered)
|
66 |
+
return Categorical._simple_new(codes, dtype=dtype), c
|
67 |
+
|
68 |
+
# Already sorted according to c.categories; all is fine
|
69 |
+
if sort:
|
70 |
+
return c, None
|
71 |
+
|
72 |
+
# sort=False should order groups in as-encountered order (GH-8868)
|
73 |
+
|
74 |
+
# xref GH:46909: Re-ordering codes faster than using (set|add|reorder)_categories
|
75 |
+
all_codes = np.arange(c.categories.nunique())
|
76 |
+
# GH 38140: exclude nan from indexer for categories
|
77 |
+
unique_notnan_codes = unique1d(c.codes[c.codes != -1])
|
78 |
+
if sort:
|
79 |
+
unique_notnan_codes = np.sort(unique_notnan_codes)
|
80 |
+
if len(all_codes) > len(unique_notnan_codes):
|
81 |
+
# GH 13179: All categories need to be present, even if missing from the data
|
82 |
+
missing_codes = np.setdiff1d(all_codes, unique_notnan_codes, assume_unique=True)
|
83 |
+
take_codes = np.concatenate((unique_notnan_codes, missing_codes))
|
84 |
+
else:
|
85 |
+
take_codes = unique_notnan_codes
|
86 |
+
|
87 |
+
return Categorical(c, c.unique().categories.take(take_codes)), None
|
llmeval-env/lib/python3.10/site-packages/pandas/core/groupby/generic.py
ADDED
@@ -0,0 +1,2852 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
"""
|
2 |
+
Define the SeriesGroupBy and DataFrameGroupBy
|
3 |
+
classes that hold the groupby interfaces (and some implementations).
|
4 |
+
|
5 |
+
These are user facing as the result of the ``df.groupby(...)`` operations,
|
6 |
+
which here returns a DataFrameGroupBy object.
|
7 |
+
"""
|
8 |
+
from __future__ import annotations
|
9 |
+
|
10 |
+
from collections import abc
|
11 |
+
from functools import partial
|
12 |
+
from textwrap import dedent
|
13 |
+
from typing import (
|
14 |
+
TYPE_CHECKING,
|
15 |
+
Any,
|
16 |
+
Callable,
|
17 |
+
Literal,
|
18 |
+
NamedTuple,
|
19 |
+
TypeVar,
|
20 |
+
Union,
|
21 |
+
cast,
|
22 |
+
)
|
23 |
+
import warnings
|
24 |
+
|
25 |
+
import numpy as np
|
26 |
+
|
27 |
+
from pandas._libs import (
|
28 |
+
Interval,
|
29 |
+
lib,
|
30 |
+
)
|
31 |
+
from pandas._libs.hashtable import duplicated
|
32 |
+
from pandas.errors import SpecificationError
|
33 |
+
from pandas.util._decorators import (
|
34 |
+
Appender,
|
35 |
+
Substitution,
|
36 |
+
doc,
|
37 |
+
)
|
38 |
+
from pandas.util._exceptions import find_stack_level
|
39 |
+
|
40 |
+
from pandas.core.dtypes.common import (
|
41 |
+
ensure_int64,
|
42 |
+
is_bool,
|
43 |
+
is_dict_like,
|
44 |
+
is_integer_dtype,
|
45 |
+
is_list_like,
|
46 |
+
is_numeric_dtype,
|
47 |
+
is_scalar,
|
48 |
+
)
|
49 |
+
from pandas.core.dtypes.dtypes import (
|
50 |
+
CategoricalDtype,
|
51 |
+
IntervalDtype,
|
52 |
+
)
|
53 |
+
from pandas.core.dtypes.inference import is_hashable
|
54 |
+
from pandas.core.dtypes.missing import (
|
55 |
+
isna,
|
56 |
+
notna,
|
57 |
+
)
|
58 |
+
|
59 |
+
from pandas.core import algorithms
|
60 |
+
from pandas.core.apply import (
|
61 |
+
GroupByApply,
|
62 |
+
maybe_mangle_lambdas,
|
63 |
+
reconstruct_func,
|
64 |
+
validate_func_kwargs,
|
65 |
+
warn_alias_replacement,
|
66 |
+
)
|
67 |
+
import pandas.core.common as com
|
68 |
+
from pandas.core.frame import DataFrame
|
69 |
+
from pandas.core.groupby import (
|
70 |
+
base,
|
71 |
+
ops,
|
72 |
+
)
|
73 |
+
from pandas.core.groupby.groupby import (
|
74 |
+
GroupBy,
|
75 |
+
GroupByPlot,
|
76 |
+
_agg_template_frame,
|
77 |
+
_agg_template_series,
|
78 |
+
_apply_docs,
|
79 |
+
_transform_template,
|
80 |
+
)
|
81 |
+
from pandas.core.indexes.api import (
|
82 |
+
Index,
|
83 |
+
MultiIndex,
|
84 |
+
all_indexes_same,
|
85 |
+
default_index,
|
86 |
+
)
|
87 |
+
from pandas.core.series import Series
|
88 |
+
from pandas.core.sorting import get_group_index
|
89 |
+
from pandas.core.util.numba_ import maybe_use_numba
|
90 |
+
|
91 |
+
from pandas.plotting import boxplot_frame_groupby
|
92 |
+
|
93 |
+
if TYPE_CHECKING:
|
94 |
+
from collections.abc import (
|
95 |
+
Hashable,
|
96 |
+
Mapping,
|
97 |
+
Sequence,
|
98 |
+
)
|
99 |
+
|
100 |
+
from pandas._typing import (
|
101 |
+
ArrayLike,
|
102 |
+
Axis,
|
103 |
+
AxisInt,
|
104 |
+
CorrelationMethod,
|
105 |
+
FillnaOptions,
|
106 |
+
IndexLabel,
|
107 |
+
Manager,
|
108 |
+
Manager2D,
|
109 |
+
SingleManager,
|
110 |
+
TakeIndexer,
|
111 |
+
)
|
112 |
+
|
113 |
+
from pandas import Categorical
|
114 |
+
from pandas.core.generic import NDFrame
|
115 |
+
|
116 |
+
# TODO(typing) the return value on this callable should be any *scalar*.
|
117 |
+
AggScalar = Union[str, Callable[..., Any]]
|
118 |
+
# TODO: validate types on ScalarResult and move to _typing
|
119 |
+
# Blocked from using by https://github.com/python/mypy/issues/1484
|
120 |
+
# See note at _mangle_lambda_list
|
121 |
+
ScalarResult = TypeVar("ScalarResult")
|
122 |
+
|
123 |
+
|
124 |
+
class NamedAgg(NamedTuple):
|
125 |
+
"""
|
126 |
+
Helper for column specific aggregation with control over output column names.
|
127 |
+
|
128 |
+
Subclass of typing.NamedTuple.
|
129 |
+
|
130 |
+
Parameters
|
131 |
+
----------
|
132 |
+
column : Hashable
|
133 |
+
Column label in the DataFrame to apply aggfunc.
|
134 |
+
aggfunc : function or str
|
135 |
+
Function to apply to the provided column. If string, the name of a built-in
|
136 |
+
pandas function.
|
137 |
+
|
138 |
+
Examples
|
139 |
+
--------
|
140 |
+
>>> df = pd.DataFrame({"key": [1, 1, 2], "a": [-1, 0, 1], 1: [10, 11, 12]})
|
141 |
+
>>> agg_a = pd.NamedAgg(column="a", aggfunc="min")
|
142 |
+
>>> agg_1 = pd.NamedAgg(column=1, aggfunc=lambda x: np.mean(x))
|
143 |
+
>>> df.groupby("key").agg(result_a=agg_a, result_1=agg_1)
|
144 |
+
result_a result_1
|
145 |
+
key
|
146 |
+
1 -1 10.5
|
147 |
+
2 1 12.0
|
148 |
+
"""
|
149 |
+
|
150 |
+
column: Hashable
|
151 |
+
aggfunc: AggScalar
|
152 |
+
|
153 |
+
|
154 |
+
class SeriesGroupBy(GroupBy[Series]):
|
155 |
+
def _wrap_agged_manager(self, mgr: Manager) -> Series:
|
156 |
+
out = self.obj._constructor_from_mgr(mgr, axes=mgr.axes)
|
157 |
+
out._name = self.obj.name
|
158 |
+
return out
|
159 |
+
|
160 |
+
def _get_data_to_aggregate(
|
161 |
+
self, *, numeric_only: bool = False, name: str | None = None
|
162 |
+
) -> SingleManager:
|
163 |
+
ser = self._obj_with_exclusions
|
164 |
+
single = ser._mgr
|
165 |
+
if numeric_only and not is_numeric_dtype(ser.dtype):
|
166 |
+
# GH#41291 match Series behavior
|
167 |
+
kwd_name = "numeric_only"
|
168 |
+
raise TypeError(
|
169 |
+
f"Cannot use {kwd_name}=True with "
|
170 |
+
f"{type(self).__name__}.{name} and non-numeric dtypes."
|
171 |
+
)
|
172 |
+
return single
|
173 |
+
|
174 |
+
_agg_examples_doc = dedent(
|
175 |
+
"""
|
176 |
+
Examples
|
177 |
+
--------
|
178 |
+
>>> s = pd.Series([1, 2, 3, 4])
|
179 |
+
|
180 |
+
>>> s
|
181 |
+
0 1
|
182 |
+
1 2
|
183 |
+
2 3
|
184 |
+
3 4
|
185 |
+
dtype: int64
|
186 |
+
|
187 |
+
>>> s.groupby([1, 1, 2, 2]).min()
|
188 |
+
1 1
|
189 |
+
2 3
|
190 |
+
dtype: int64
|
191 |
+
|
192 |
+
>>> s.groupby([1, 1, 2, 2]).agg('min')
|
193 |
+
1 1
|
194 |
+
2 3
|
195 |
+
dtype: int64
|
196 |
+
|
197 |
+
>>> s.groupby([1, 1, 2, 2]).agg(['min', 'max'])
|
198 |
+
min max
|
199 |
+
1 1 2
|
200 |
+
2 3 4
|
201 |
+
|
202 |
+
The output column names can be controlled by passing
|
203 |
+
the desired column names and aggregations as keyword arguments.
|
204 |
+
|
205 |
+
>>> s.groupby([1, 1, 2, 2]).agg(
|
206 |
+
... minimum='min',
|
207 |
+
... maximum='max',
|
208 |
+
... )
|
209 |
+
minimum maximum
|
210 |
+
1 1 2
|
211 |
+
2 3 4
|
212 |
+
|
213 |
+
.. versionchanged:: 1.3.0
|
214 |
+
|
215 |
+
The resulting dtype will reflect the return value of the aggregating function.
|
216 |
+
|
217 |
+
>>> s.groupby([1, 1, 2, 2]).agg(lambda x: x.astype(float).min())
|
218 |
+
1 1.0
|
219 |
+
2 3.0
|
220 |
+
dtype: float64
|
221 |
+
"""
|
222 |
+
)
|
223 |
+
|
224 |
+
@Appender(
|
225 |
+
_apply_docs["template"].format(
|
226 |
+
input="series", examples=_apply_docs["series_examples"]
|
227 |
+
)
|
228 |
+
)
|
229 |
+
def apply(self, func, *args, **kwargs) -> Series:
|
230 |
+
return super().apply(func, *args, **kwargs)
|
231 |
+
|
232 |
+
@doc(_agg_template_series, examples=_agg_examples_doc, klass="Series")
|
233 |
+
def aggregate(self, func=None, *args, engine=None, engine_kwargs=None, **kwargs):
|
234 |
+
relabeling = func is None
|
235 |
+
columns = None
|
236 |
+
if relabeling:
|
237 |
+
columns, func = validate_func_kwargs(kwargs)
|
238 |
+
kwargs = {}
|
239 |
+
|
240 |
+
if isinstance(func, str):
|
241 |
+
if maybe_use_numba(engine) and engine is not None:
|
242 |
+
# Not all agg functions support numba, only propagate numba kwargs
|
243 |
+
# if user asks for numba, and engine is not None
|
244 |
+
# (if engine is None, the called function will handle the case where
|
245 |
+
# numba is requested via the global option)
|
246 |
+
kwargs["engine"] = engine
|
247 |
+
if engine_kwargs is not None:
|
248 |
+
kwargs["engine_kwargs"] = engine_kwargs
|
249 |
+
return getattr(self, func)(*args, **kwargs)
|
250 |
+
|
251 |
+
elif isinstance(func, abc.Iterable):
|
252 |
+
# Catch instances of lists / tuples
|
253 |
+
# but not the class list / tuple itself.
|
254 |
+
func = maybe_mangle_lambdas(func)
|
255 |
+
kwargs["engine"] = engine
|
256 |
+
kwargs["engine_kwargs"] = engine_kwargs
|
257 |
+
ret = self._aggregate_multiple_funcs(func, *args, **kwargs)
|
258 |
+
if relabeling:
|
259 |
+
# columns is not narrowed by mypy from relabeling flag
|
260 |
+
assert columns is not None # for mypy
|
261 |
+
ret.columns = columns
|
262 |
+
if not self.as_index:
|
263 |
+
ret = ret.reset_index()
|
264 |
+
return ret
|
265 |
+
|
266 |
+
else:
|
267 |
+
cyfunc = com.get_cython_func(func)
|
268 |
+
if cyfunc and not args and not kwargs:
|
269 |
+
warn_alias_replacement(self, func, cyfunc)
|
270 |
+
return getattr(self, cyfunc)()
|
271 |
+
|
272 |
+
if maybe_use_numba(engine):
|
273 |
+
return self._aggregate_with_numba(
|
274 |
+
func, *args, engine_kwargs=engine_kwargs, **kwargs
|
275 |
+
)
|
276 |
+
|
277 |
+
if self.ngroups == 0:
|
278 |
+
# e.g. test_evaluate_with_empty_groups without any groups to
|
279 |
+
# iterate over, we have no output on which to do dtype
|
280 |
+
# inference. We default to using the existing dtype.
|
281 |
+
# xref GH#51445
|
282 |
+
obj = self._obj_with_exclusions
|
283 |
+
return self.obj._constructor(
|
284 |
+
[],
|
285 |
+
name=self.obj.name,
|
286 |
+
index=self._grouper.result_index,
|
287 |
+
dtype=obj.dtype,
|
288 |
+
)
|
289 |
+
|
290 |
+
if self._grouper.nkeys > 1:
|
291 |
+
return self._python_agg_general(func, *args, **kwargs)
|
292 |
+
|
293 |
+
try:
|
294 |
+
return self._python_agg_general(func, *args, **kwargs)
|
295 |
+
except KeyError:
|
296 |
+
# KeyError raised in test_groupby.test_basic is bc the func does
|
297 |
+
# a dictionary lookup on group.name, but group name is not
|
298 |
+
# pinned in _python_agg_general, only in _aggregate_named
|
299 |
+
result = self._aggregate_named(func, *args, **kwargs)
|
300 |
+
|
301 |
+
warnings.warn(
|
302 |
+
"Pinning the groupby key to each group in "
|
303 |
+
f"{type(self).__name__}.agg is deprecated, and cases that "
|
304 |
+
"relied on it will raise in a future version. "
|
305 |
+
"If your operation requires utilizing the groupby keys, "
|
306 |
+
"iterate over the groupby object instead.",
|
307 |
+
FutureWarning,
|
308 |
+
stacklevel=find_stack_level(),
|
309 |
+
)
|
310 |
+
|
311 |
+
# result is a dict whose keys are the elements of result_index
|
312 |
+
result = Series(result, index=self._grouper.result_index)
|
313 |
+
result = self._wrap_aggregated_output(result)
|
314 |
+
return result
|
315 |
+
|
316 |
+
agg = aggregate
|
317 |
+
|
318 |
+
def _python_agg_general(self, func, *args, **kwargs):
|
319 |
+
orig_func = func
|
320 |
+
func = com.is_builtin_func(func)
|
321 |
+
if orig_func != func:
|
322 |
+
alias = com._builtin_table_alias[func]
|
323 |
+
warn_alias_replacement(self, orig_func, alias)
|
324 |
+
f = lambda x: func(x, *args, **kwargs)
|
325 |
+
|
326 |
+
obj = self._obj_with_exclusions
|
327 |
+
result = self._grouper.agg_series(obj, f)
|
328 |
+
res = obj._constructor(result, name=obj.name)
|
329 |
+
return self._wrap_aggregated_output(res)
|
330 |
+
|
331 |
+
def _aggregate_multiple_funcs(self, arg, *args, **kwargs) -> DataFrame:
|
332 |
+
if isinstance(arg, dict):
|
333 |
+
if self.as_index:
|
334 |
+
# GH 15931
|
335 |
+
raise SpecificationError("nested renamer is not supported")
|
336 |
+
else:
|
337 |
+
# GH#50684 - This accidentally worked in 1.x
|
338 |
+
msg = (
|
339 |
+
"Passing a dictionary to SeriesGroupBy.agg is deprecated "
|
340 |
+
"and will raise in a future version of pandas. Pass a list "
|
341 |
+
"of aggregations instead."
|
342 |
+
)
|
343 |
+
warnings.warn(
|
344 |
+
message=msg,
|
345 |
+
category=FutureWarning,
|
346 |
+
stacklevel=find_stack_level(),
|
347 |
+
)
|
348 |
+
arg = list(arg.items())
|
349 |
+
elif any(isinstance(x, (tuple, list)) for x in arg):
|
350 |
+
arg = [(x, x) if not isinstance(x, (tuple, list)) else x for x in arg]
|
351 |
+
else:
|
352 |
+
# list of functions / function names
|
353 |
+
columns = (com.get_callable_name(f) or f for f in arg)
|
354 |
+
arg = zip(columns, arg)
|
355 |
+
|
356 |
+
results: dict[base.OutputKey, DataFrame | Series] = {}
|
357 |
+
with com.temp_setattr(self, "as_index", True):
|
358 |
+
# Combine results using the index, need to adjust index after
|
359 |
+
# if as_index=False (GH#50724)
|
360 |
+
for idx, (name, func) in enumerate(arg):
|
361 |
+
key = base.OutputKey(label=name, position=idx)
|
362 |
+
results[key] = self.aggregate(func, *args, **kwargs)
|
363 |
+
|
364 |
+
if any(isinstance(x, DataFrame) for x in results.values()):
|
365 |
+
from pandas import concat
|
366 |
+
|
367 |
+
res_df = concat(
|
368 |
+
results.values(), axis=1, keys=[key.label for key in results]
|
369 |
+
)
|
370 |
+
return res_df
|
371 |
+
|
372 |
+
indexed_output = {key.position: val for key, val in results.items()}
|
373 |
+
output = self.obj._constructor_expanddim(indexed_output, index=None)
|
374 |
+
output.columns = Index(key.label for key in results)
|
375 |
+
|
376 |
+
return output
|
377 |
+
|
378 |
+
def _wrap_applied_output(
|
379 |
+
self,
|
380 |
+
data: Series,
|
381 |
+
values: list[Any],
|
382 |
+
not_indexed_same: bool = False,
|
383 |
+
is_transform: bool = False,
|
384 |
+
) -> DataFrame | Series:
|
385 |
+
"""
|
386 |
+
Wrap the output of SeriesGroupBy.apply into the expected result.
|
387 |
+
|
388 |
+
Parameters
|
389 |
+
----------
|
390 |
+
data : Series
|
391 |
+
Input data for groupby operation.
|
392 |
+
values : List[Any]
|
393 |
+
Applied output for each group.
|
394 |
+
not_indexed_same : bool, default False
|
395 |
+
Whether the applied outputs are not indexed the same as the group axes.
|
396 |
+
|
397 |
+
Returns
|
398 |
+
-------
|
399 |
+
DataFrame or Series
|
400 |
+
"""
|
401 |
+
if len(values) == 0:
|
402 |
+
# GH #6265
|
403 |
+
if is_transform:
|
404 |
+
# GH#47787 see test_group_on_empty_multiindex
|
405 |
+
res_index = data.index
|
406 |
+
else:
|
407 |
+
res_index = self._grouper.result_index
|
408 |
+
|
409 |
+
return self.obj._constructor(
|
410 |
+
[],
|
411 |
+
name=self.obj.name,
|
412 |
+
index=res_index,
|
413 |
+
dtype=data.dtype,
|
414 |
+
)
|
415 |
+
assert values is not None
|
416 |
+
|
417 |
+
if isinstance(values[0], dict):
|
418 |
+
# GH #823 #24880
|
419 |
+
index = self._grouper.result_index
|
420 |
+
res_df = self.obj._constructor_expanddim(values, index=index)
|
421 |
+
res_df = self._reindex_output(res_df)
|
422 |
+
# if self.observed is False,
|
423 |
+
# keep all-NaN rows created while re-indexing
|
424 |
+
res_ser = res_df.stack(future_stack=True)
|
425 |
+
res_ser.name = self.obj.name
|
426 |
+
return res_ser
|
427 |
+
elif isinstance(values[0], (Series, DataFrame)):
|
428 |
+
result = self._concat_objects(
|
429 |
+
values,
|
430 |
+
not_indexed_same=not_indexed_same,
|
431 |
+
is_transform=is_transform,
|
432 |
+
)
|
433 |
+
if isinstance(result, Series):
|
434 |
+
result.name = self.obj.name
|
435 |
+
if not self.as_index and not_indexed_same:
|
436 |
+
result = self._insert_inaxis_grouper(result)
|
437 |
+
result.index = default_index(len(result))
|
438 |
+
return result
|
439 |
+
else:
|
440 |
+
# GH #6265 #24880
|
441 |
+
result = self.obj._constructor(
|
442 |
+
data=values, index=self._grouper.result_index, name=self.obj.name
|
443 |
+
)
|
444 |
+
if not self.as_index:
|
445 |
+
result = self._insert_inaxis_grouper(result)
|
446 |
+
result.index = default_index(len(result))
|
447 |
+
return self._reindex_output(result)
|
448 |
+
|
449 |
+
def _aggregate_named(self, func, *args, **kwargs):
|
450 |
+
# Note: this is very similar to _aggregate_series_pure_python,
|
451 |
+
# but that does not pin group.name
|
452 |
+
result = {}
|
453 |
+
initialized = False
|
454 |
+
|
455 |
+
for name, group in self._grouper.get_iterator(
|
456 |
+
self._obj_with_exclusions, axis=self.axis
|
457 |
+
):
|
458 |
+
# needed for pandas/tests/groupby/test_groupby.py::test_basic_aggregations
|
459 |
+
object.__setattr__(group, "name", name)
|
460 |
+
|
461 |
+
output = func(group, *args, **kwargs)
|
462 |
+
output = ops.extract_result(output)
|
463 |
+
if not initialized:
|
464 |
+
# We only do this validation on the first iteration
|
465 |
+
ops.check_result_array(output, group.dtype)
|
466 |
+
initialized = True
|
467 |
+
result[name] = output
|
468 |
+
|
469 |
+
return result
|
470 |
+
|
471 |
+
__examples_series_doc = dedent(
|
472 |
+
"""
|
473 |
+
>>> ser = pd.Series([390.0, 350.0, 30.0, 20.0],
|
474 |
+
... index=["Falcon", "Falcon", "Parrot", "Parrot"],
|
475 |
+
... name="Max Speed")
|
476 |
+
>>> grouped = ser.groupby([1, 1, 2, 2])
|
477 |
+
>>> grouped.transform(lambda x: (x - x.mean()) / x.std())
|
478 |
+
Falcon 0.707107
|
479 |
+
Falcon -0.707107
|
480 |
+
Parrot 0.707107
|
481 |
+
Parrot -0.707107
|
482 |
+
Name: Max Speed, dtype: float64
|
483 |
+
|
484 |
+
Broadcast result of the transformation
|
485 |
+
|
486 |
+
>>> grouped.transform(lambda x: x.max() - x.min())
|
487 |
+
Falcon 40.0
|
488 |
+
Falcon 40.0
|
489 |
+
Parrot 10.0
|
490 |
+
Parrot 10.0
|
491 |
+
Name: Max Speed, dtype: float64
|
492 |
+
|
493 |
+
>>> grouped.transform("mean")
|
494 |
+
Falcon 370.0
|
495 |
+
Falcon 370.0
|
496 |
+
Parrot 25.0
|
497 |
+
Parrot 25.0
|
498 |
+
Name: Max Speed, dtype: float64
|
499 |
+
|
500 |
+
.. versionchanged:: 1.3.0
|
501 |
+
|
502 |
+
The resulting dtype will reflect the return value of the passed ``func``,
|
503 |
+
for example:
|
504 |
+
|
505 |
+
>>> grouped.transform(lambda x: x.astype(int).max())
|
506 |
+
Falcon 390
|
507 |
+
Falcon 390
|
508 |
+
Parrot 30
|
509 |
+
Parrot 30
|
510 |
+
Name: Max Speed, dtype: int64
|
511 |
+
"""
|
512 |
+
)
|
513 |
+
|
514 |
+
@Substitution(klass="Series", example=__examples_series_doc)
|
515 |
+
@Appender(_transform_template)
|
516 |
+
def transform(self, func, *args, engine=None, engine_kwargs=None, **kwargs):
|
517 |
+
return self._transform(
|
518 |
+
func, *args, engine=engine, engine_kwargs=engine_kwargs, **kwargs
|
519 |
+
)
|
520 |
+
|
521 |
+
def _cython_transform(
|
522 |
+
self, how: str, numeric_only: bool = False, axis: AxisInt = 0, **kwargs
|
523 |
+
):
|
524 |
+
assert axis == 0 # handled by caller
|
525 |
+
|
526 |
+
obj = self._obj_with_exclusions
|
527 |
+
|
528 |
+
try:
|
529 |
+
result = self._grouper._cython_operation(
|
530 |
+
"transform", obj._values, how, axis, **kwargs
|
531 |
+
)
|
532 |
+
except NotImplementedError as err:
|
533 |
+
# e.g. test_groupby_raises_string
|
534 |
+
raise TypeError(f"{how} is not supported for {obj.dtype} dtype") from err
|
535 |
+
|
536 |
+
return obj._constructor(result, index=self.obj.index, name=obj.name)
|
537 |
+
|
538 |
+
def _transform_general(
|
539 |
+
self, func: Callable, engine, engine_kwargs, *args, **kwargs
|
540 |
+
) -> Series:
|
541 |
+
"""
|
542 |
+
Transform with a callable `func`.
|
543 |
+
"""
|
544 |
+
if maybe_use_numba(engine):
|
545 |
+
return self._transform_with_numba(
|
546 |
+
func, *args, engine_kwargs=engine_kwargs, **kwargs
|
547 |
+
)
|
548 |
+
assert callable(func)
|
549 |
+
klass = type(self.obj)
|
550 |
+
|
551 |
+
results = []
|
552 |
+
for name, group in self._grouper.get_iterator(
|
553 |
+
self._obj_with_exclusions, axis=self.axis
|
554 |
+
):
|
555 |
+
# this setattr is needed for test_transform_lambda_with_datetimetz
|
556 |
+
object.__setattr__(group, "name", name)
|
557 |
+
res = func(group, *args, **kwargs)
|
558 |
+
|
559 |
+
results.append(klass(res, index=group.index))
|
560 |
+
|
561 |
+
# check for empty "results" to avoid concat ValueError
|
562 |
+
if results:
|
563 |
+
from pandas.core.reshape.concat import concat
|
564 |
+
|
565 |
+
concatenated = concat(results)
|
566 |
+
result = self._set_result_index_ordered(concatenated)
|
567 |
+
else:
|
568 |
+
result = self.obj._constructor(dtype=np.float64)
|
569 |
+
|
570 |
+
result.name = self.obj.name
|
571 |
+
return result
|
572 |
+
|
573 |
+
def filter(self, func, dropna: bool = True, *args, **kwargs):
|
574 |
+
"""
|
575 |
+
Filter elements from groups that don't satisfy a criterion.
|
576 |
+
|
577 |
+
Elements from groups are filtered if they do not satisfy the
|
578 |
+
boolean criterion specified by func.
|
579 |
+
|
580 |
+
Parameters
|
581 |
+
----------
|
582 |
+
func : function
|
583 |
+
Criterion to apply to each group. Should return True or False.
|
584 |
+
dropna : bool
|
585 |
+
Drop groups that do not pass the filter. True by default; if False,
|
586 |
+
groups that evaluate False are filled with NaNs.
|
587 |
+
|
588 |
+
Returns
|
589 |
+
-------
|
590 |
+
Series
|
591 |
+
|
592 |
+
Notes
|
593 |
+
-----
|
594 |
+
Functions that mutate the passed object can produce unexpected
|
595 |
+
behavior or errors and are not supported. See :ref:`gotchas.udf-mutation`
|
596 |
+
for more details.
|
597 |
+
|
598 |
+
Examples
|
599 |
+
--------
|
600 |
+
>>> df = pd.DataFrame({'A' : ['foo', 'bar', 'foo', 'bar',
|
601 |
+
... 'foo', 'bar'],
|
602 |
+
... 'B' : [1, 2, 3, 4, 5, 6],
|
603 |
+
... 'C' : [2.0, 5., 8., 1., 2., 9.]})
|
604 |
+
>>> grouped = df.groupby('A')
|
605 |
+
>>> df.groupby('A').B.filter(lambda x: x.mean() > 3.)
|
606 |
+
1 2
|
607 |
+
3 4
|
608 |
+
5 6
|
609 |
+
Name: B, dtype: int64
|
610 |
+
"""
|
611 |
+
if isinstance(func, str):
|
612 |
+
wrapper = lambda x: getattr(x, func)(*args, **kwargs)
|
613 |
+
else:
|
614 |
+
wrapper = lambda x: func(x, *args, **kwargs)
|
615 |
+
|
616 |
+
# Interpret np.nan as False.
|
617 |
+
def true_and_notna(x) -> bool:
|
618 |
+
b = wrapper(x)
|
619 |
+
return notna(b) and b
|
620 |
+
|
621 |
+
try:
|
622 |
+
indices = [
|
623 |
+
self._get_index(name)
|
624 |
+
for name, group in self._grouper.get_iterator(
|
625 |
+
self._obj_with_exclusions, axis=self.axis
|
626 |
+
)
|
627 |
+
if true_and_notna(group)
|
628 |
+
]
|
629 |
+
except (ValueError, TypeError) as err:
|
630 |
+
raise TypeError("the filter must return a boolean result") from err
|
631 |
+
|
632 |
+
filtered = self._apply_filter(indices, dropna)
|
633 |
+
return filtered
|
634 |
+
|
635 |
+
def nunique(self, dropna: bool = True) -> Series | DataFrame:
|
636 |
+
"""
|
637 |
+
Return number of unique elements in the group.
|
638 |
+
|
639 |
+
Returns
|
640 |
+
-------
|
641 |
+
Series
|
642 |
+
Number of unique values within each group.
|
643 |
+
|
644 |
+
Examples
|
645 |
+
--------
|
646 |
+
For SeriesGroupby:
|
647 |
+
|
648 |
+
>>> lst = ['a', 'a', 'b', 'b']
|
649 |
+
>>> ser = pd.Series([1, 2, 3, 3], index=lst)
|
650 |
+
>>> ser
|
651 |
+
a 1
|
652 |
+
a 2
|
653 |
+
b 3
|
654 |
+
b 3
|
655 |
+
dtype: int64
|
656 |
+
>>> ser.groupby(level=0).nunique()
|
657 |
+
a 2
|
658 |
+
b 1
|
659 |
+
dtype: int64
|
660 |
+
|
661 |
+
For Resampler:
|
662 |
+
|
663 |
+
>>> ser = pd.Series([1, 2, 3, 3], index=pd.DatetimeIndex(
|
664 |
+
... ['2023-01-01', '2023-01-15', '2023-02-01', '2023-02-15']))
|
665 |
+
>>> ser
|
666 |
+
2023-01-01 1
|
667 |
+
2023-01-15 2
|
668 |
+
2023-02-01 3
|
669 |
+
2023-02-15 3
|
670 |
+
dtype: int64
|
671 |
+
>>> ser.resample('MS').nunique()
|
672 |
+
2023-01-01 2
|
673 |
+
2023-02-01 1
|
674 |
+
Freq: MS, dtype: int64
|
675 |
+
"""
|
676 |
+
ids, _, ngroups = self._grouper.group_info
|
677 |
+
val = self.obj._values
|
678 |
+
codes, uniques = algorithms.factorize(val, use_na_sentinel=dropna, sort=False)
|
679 |
+
|
680 |
+
if self._grouper.has_dropped_na:
|
681 |
+
mask = ids >= 0
|
682 |
+
ids = ids[mask]
|
683 |
+
codes = codes[mask]
|
684 |
+
|
685 |
+
group_index = get_group_index(
|
686 |
+
labels=[ids, codes],
|
687 |
+
shape=(ngroups, len(uniques)),
|
688 |
+
sort=False,
|
689 |
+
xnull=dropna,
|
690 |
+
)
|
691 |
+
|
692 |
+
if dropna:
|
693 |
+
mask = group_index >= 0
|
694 |
+
if (~mask).any():
|
695 |
+
ids = ids[mask]
|
696 |
+
group_index = group_index[mask]
|
697 |
+
|
698 |
+
mask = duplicated(group_index, "first")
|
699 |
+
res = np.bincount(ids[~mask], minlength=ngroups)
|
700 |
+
res = ensure_int64(res)
|
701 |
+
|
702 |
+
ri = self._grouper.result_index
|
703 |
+
result: Series | DataFrame = self.obj._constructor(
|
704 |
+
res, index=ri, name=self.obj.name
|
705 |
+
)
|
706 |
+
if not self.as_index:
|
707 |
+
result = self._insert_inaxis_grouper(result)
|
708 |
+
result.index = default_index(len(result))
|
709 |
+
return self._reindex_output(result, fill_value=0)
|
710 |
+
|
711 |
+
@doc(Series.describe)
|
712 |
+
def describe(self, percentiles=None, include=None, exclude=None) -> Series:
|
713 |
+
return super().describe(
|
714 |
+
percentiles=percentiles, include=include, exclude=exclude
|
715 |
+
)
|
716 |
+
|
717 |
+
def value_counts(
|
718 |
+
self,
|
719 |
+
normalize: bool = False,
|
720 |
+
sort: bool = True,
|
721 |
+
ascending: bool = False,
|
722 |
+
bins=None,
|
723 |
+
dropna: bool = True,
|
724 |
+
) -> Series | DataFrame:
|
725 |
+
name = "proportion" if normalize else "count"
|
726 |
+
|
727 |
+
if bins is None:
|
728 |
+
result = self._value_counts(
|
729 |
+
normalize=normalize, sort=sort, ascending=ascending, dropna=dropna
|
730 |
+
)
|
731 |
+
result.name = name
|
732 |
+
return result
|
733 |
+
|
734 |
+
from pandas.core.reshape.merge import get_join_indexers
|
735 |
+
from pandas.core.reshape.tile import cut
|
736 |
+
|
737 |
+
ids, _, _ = self._grouper.group_info
|
738 |
+
val = self.obj._values
|
739 |
+
|
740 |
+
index_names = self._grouper.names + [self.obj.name]
|
741 |
+
|
742 |
+
if isinstance(val.dtype, CategoricalDtype) or (
|
743 |
+
bins is not None and not np.iterable(bins)
|
744 |
+
):
|
745 |
+
# scalar bins cannot be done at top level
|
746 |
+
# in a backward compatible way
|
747 |
+
# GH38672 relates to categorical dtype
|
748 |
+
ser = self.apply(
|
749 |
+
Series.value_counts,
|
750 |
+
normalize=normalize,
|
751 |
+
sort=sort,
|
752 |
+
ascending=ascending,
|
753 |
+
bins=bins,
|
754 |
+
)
|
755 |
+
ser.name = name
|
756 |
+
ser.index.names = index_names
|
757 |
+
return ser
|
758 |
+
|
759 |
+
# groupby removes null keys from groupings
|
760 |
+
mask = ids != -1
|
761 |
+
ids, val = ids[mask], val[mask]
|
762 |
+
|
763 |
+
lab: Index | np.ndarray
|
764 |
+
if bins is None:
|
765 |
+
lab, lev = algorithms.factorize(val, sort=True)
|
766 |
+
llab = lambda lab, inc: lab[inc]
|
767 |
+
else:
|
768 |
+
# lab is a Categorical with categories an IntervalIndex
|
769 |
+
cat_ser = cut(Series(val, copy=False), bins, include_lowest=True)
|
770 |
+
cat_obj = cast("Categorical", cat_ser._values)
|
771 |
+
lev = cat_obj.categories
|
772 |
+
lab = lev.take(
|
773 |
+
cat_obj.codes,
|
774 |
+
allow_fill=True,
|
775 |
+
fill_value=lev._na_value,
|
776 |
+
)
|
777 |
+
llab = lambda lab, inc: lab[inc]._multiindex.codes[-1]
|
778 |
+
|
779 |
+
if isinstance(lab.dtype, IntervalDtype):
|
780 |
+
# TODO: should we do this inside II?
|
781 |
+
lab_interval = cast(Interval, lab)
|
782 |
+
|
783 |
+
sorter = np.lexsort((lab_interval.left, lab_interval.right, ids))
|
784 |
+
else:
|
785 |
+
sorter = np.lexsort((lab, ids))
|
786 |
+
|
787 |
+
ids, lab = ids[sorter], lab[sorter]
|
788 |
+
|
789 |
+
# group boundaries are where group ids change
|
790 |
+
idchanges = 1 + np.nonzero(ids[1:] != ids[:-1])[0]
|
791 |
+
idx = np.r_[0, idchanges]
|
792 |
+
if not len(ids):
|
793 |
+
idx = idchanges
|
794 |
+
|
795 |
+
# new values are where sorted labels change
|
796 |
+
lchanges = llab(lab, slice(1, None)) != llab(lab, slice(None, -1))
|
797 |
+
inc = np.r_[True, lchanges]
|
798 |
+
if not len(val):
|
799 |
+
inc = lchanges
|
800 |
+
inc[idx] = True # group boundaries are also new values
|
801 |
+
out = np.diff(np.nonzero(np.r_[inc, True])[0]) # value counts
|
802 |
+
|
803 |
+
# num. of times each group should be repeated
|
804 |
+
rep = partial(np.repeat, repeats=np.add.reduceat(inc, idx))
|
805 |
+
|
806 |
+
# multi-index components
|
807 |
+
codes = self._grouper.reconstructed_codes
|
808 |
+
codes = [rep(level_codes) for level_codes in codes] + [llab(lab, inc)]
|
809 |
+
levels = [ping._group_index for ping in self._grouper.groupings] + [lev]
|
810 |
+
|
811 |
+
if dropna:
|
812 |
+
mask = codes[-1] != -1
|
813 |
+
if mask.all():
|
814 |
+
dropna = False
|
815 |
+
else:
|
816 |
+
out, codes = out[mask], [level_codes[mask] for level_codes in codes]
|
817 |
+
|
818 |
+
if normalize:
|
819 |
+
out = out.astype("float")
|
820 |
+
d = np.diff(np.r_[idx, len(ids)])
|
821 |
+
if dropna:
|
822 |
+
m = ids[lab == -1]
|
823 |
+
np.add.at(d, m, -1)
|
824 |
+
acc = rep(d)[mask]
|
825 |
+
else:
|
826 |
+
acc = rep(d)
|
827 |
+
out /= acc
|
828 |
+
|
829 |
+
if sort and bins is None:
|
830 |
+
cat = ids[inc][mask] if dropna else ids[inc]
|
831 |
+
sorter = np.lexsort((out if ascending else -out, cat))
|
832 |
+
out, codes[-1] = out[sorter], codes[-1][sorter]
|
833 |
+
|
834 |
+
if bins is not None:
|
835 |
+
# for compat. with libgroupby.value_counts need to ensure every
|
836 |
+
# bin is present at every index level, null filled with zeros
|
837 |
+
diff = np.zeros(len(out), dtype="bool")
|
838 |
+
for level_codes in codes[:-1]:
|
839 |
+
diff |= np.r_[True, level_codes[1:] != level_codes[:-1]]
|
840 |
+
|
841 |
+
ncat, nbin = diff.sum(), len(levels[-1])
|
842 |
+
|
843 |
+
left = [np.repeat(np.arange(ncat), nbin), np.tile(np.arange(nbin), ncat)]
|
844 |
+
|
845 |
+
right = [diff.cumsum() - 1, codes[-1]]
|
846 |
+
|
847 |
+
# error: Argument 1 to "get_join_indexers" has incompatible type
|
848 |
+
# "List[ndarray[Any, Any]]"; expected "List[Union[Union[ExtensionArray,
|
849 |
+
# ndarray[Any, Any]], Index, Series]]
|
850 |
+
_, idx = get_join_indexers(
|
851 |
+
left, right, sort=False, how="left" # type: ignore[arg-type]
|
852 |
+
)
|
853 |
+
if idx is not None:
|
854 |
+
out = np.where(idx != -1, out[idx], 0)
|
855 |
+
|
856 |
+
if sort:
|
857 |
+
sorter = np.lexsort((out if ascending else -out, left[0]))
|
858 |
+
out, left[-1] = out[sorter], left[-1][sorter]
|
859 |
+
|
860 |
+
# build the multi-index w/ full levels
|
861 |
+
def build_codes(lev_codes: np.ndarray) -> np.ndarray:
|
862 |
+
return np.repeat(lev_codes[diff], nbin)
|
863 |
+
|
864 |
+
codes = [build_codes(lev_codes) for lev_codes in codes[:-1]]
|
865 |
+
codes.append(left[-1])
|
866 |
+
|
867 |
+
mi = MultiIndex(
|
868 |
+
levels=levels, codes=codes, names=index_names, verify_integrity=False
|
869 |
+
)
|
870 |
+
|
871 |
+
if is_integer_dtype(out.dtype):
|
872 |
+
out = ensure_int64(out)
|
873 |
+
result = self.obj._constructor(out, index=mi, name=name)
|
874 |
+
if not self.as_index:
|
875 |
+
result = result.reset_index()
|
876 |
+
return result
|
877 |
+
|
878 |
+
def fillna(
|
879 |
+
self,
|
880 |
+
value: object | ArrayLike | None = None,
|
881 |
+
method: FillnaOptions | None = None,
|
882 |
+
axis: Axis | None | lib.NoDefault = lib.no_default,
|
883 |
+
inplace: bool = False,
|
884 |
+
limit: int | None = None,
|
885 |
+
downcast: dict | None | lib.NoDefault = lib.no_default,
|
886 |
+
) -> Series | None:
|
887 |
+
"""
|
888 |
+
Fill NA/NaN values using the specified method within groups.
|
889 |
+
|
890 |
+
.. deprecated:: 2.2.0
|
891 |
+
This method is deprecated and will be removed in a future version.
|
892 |
+
Use the :meth:`.SeriesGroupBy.ffill` or :meth:`.SeriesGroupBy.bfill`
|
893 |
+
for forward or backward filling instead. If you want to fill with a
|
894 |
+
single value, use :meth:`Series.fillna` instead.
|
895 |
+
|
896 |
+
Parameters
|
897 |
+
----------
|
898 |
+
value : scalar, dict, Series, or DataFrame
|
899 |
+
Value to use to fill holes (e.g. 0), alternately a
|
900 |
+
dict/Series/DataFrame of values specifying which value to use for
|
901 |
+
each index (for a Series) or column (for a DataFrame). Values not
|
902 |
+
in the dict/Series/DataFrame will not be filled. This value cannot
|
903 |
+
be a list. Users wanting to use the ``value`` argument and not ``method``
|
904 |
+
should prefer :meth:`.Series.fillna` as this
|
905 |
+
will produce the same result and be more performant.
|
906 |
+
method : {{'bfill', 'ffill', None}}, default None
|
907 |
+
Method to use for filling holes. ``'ffill'`` will propagate
|
908 |
+
the last valid observation forward within a group.
|
909 |
+
``'bfill'`` will use next valid observation to fill the gap.
|
910 |
+
axis : {0 or 'index', 1 or 'columns'}
|
911 |
+
Unused, only for compatibility with :meth:`DataFrameGroupBy.fillna`.
|
912 |
+
inplace : bool, default False
|
913 |
+
Broken. Do not set to True.
|
914 |
+
limit : int, default None
|
915 |
+
If method is specified, this is the maximum number of consecutive
|
916 |
+
NaN values to forward/backward fill within a group. In other words,
|
917 |
+
if there is a gap with more than this number of consecutive NaNs,
|
918 |
+
it will only be partially filled. If method is not specified, this is the
|
919 |
+
maximum number of entries along the entire axis where NaNs will be
|
920 |
+
filled. Must be greater than 0 if not None.
|
921 |
+
downcast : dict, default is None
|
922 |
+
A dict of item->dtype of what to downcast if possible,
|
923 |
+
or the string 'infer' which will try to downcast to an appropriate
|
924 |
+
equal type (e.g. float64 to int64 if possible).
|
925 |
+
|
926 |
+
Returns
|
927 |
+
-------
|
928 |
+
Series
|
929 |
+
Object with missing values filled within groups.
|
930 |
+
|
931 |
+
See Also
|
932 |
+
--------
|
933 |
+
ffill : Forward fill values within a group.
|
934 |
+
bfill : Backward fill values within a group.
|
935 |
+
|
936 |
+
Examples
|
937 |
+
--------
|
938 |
+
For SeriesGroupBy:
|
939 |
+
|
940 |
+
>>> lst = ['cat', 'cat', 'cat', 'mouse', 'mouse']
|
941 |
+
>>> ser = pd.Series([1, None, None, 2, None], index=lst)
|
942 |
+
>>> ser
|
943 |
+
cat 1.0
|
944 |
+
cat NaN
|
945 |
+
cat NaN
|
946 |
+
mouse 2.0
|
947 |
+
mouse NaN
|
948 |
+
dtype: float64
|
949 |
+
>>> ser.groupby(level=0).fillna(0, limit=1)
|
950 |
+
cat 1.0
|
951 |
+
cat 0.0
|
952 |
+
cat NaN
|
953 |
+
mouse 2.0
|
954 |
+
mouse 0.0
|
955 |
+
dtype: float64
|
956 |
+
"""
|
957 |
+
warnings.warn(
|
958 |
+
f"{type(self).__name__}.fillna is deprecated and "
|
959 |
+
"will be removed in a future version. Use obj.ffill() or obj.bfill() "
|
960 |
+
"for forward or backward filling instead. If you want to fill with a "
|
961 |
+
f"single value, use {type(self.obj).__name__}.fillna instead",
|
962 |
+
FutureWarning,
|
963 |
+
stacklevel=find_stack_level(),
|
964 |
+
)
|
965 |
+
result = self._op_via_apply(
|
966 |
+
"fillna",
|
967 |
+
value=value,
|
968 |
+
method=method,
|
969 |
+
axis=axis,
|
970 |
+
inplace=inplace,
|
971 |
+
limit=limit,
|
972 |
+
downcast=downcast,
|
973 |
+
)
|
974 |
+
return result
|
975 |
+
|
976 |
+
def take(
|
977 |
+
self,
|
978 |
+
indices: TakeIndexer,
|
979 |
+
axis: Axis | lib.NoDefault = lib.no_default,
|
980 |
+
**kwargs,
|
981 |
+
) -> Series:
|
982 |
+
"""
|
983 |
+
Return the elements in the given *positional* indices in each group.
|
984 |
+
|
985 |
+
This means that we are not indexing according to actual values in
|
986 |
+
the index attribute of the object. We are indexing according to the
|
987 |
+
actual position of the element in the object.
|
988 |
+
|
989 |
+
If a requested index does not exist for some group, this method will raise.
|
990 |
+
To get similar behavior that ignores indices that don't exist, see
|
991 |
+
:meth:`.SeriesGroupBy.nth`.
|
992 |
+
|
993 |
+
Parameters
|
994 |
+
----------
|
995 |
+
indices : array-like
|
996 |
+
An array of ints indicating which positions to take in each group.
|
997 |
+
axis : {0 or 'index', 1 or 'columns', None}, default 0
|
998 |
+
The axis on which to select elements. ``0`` means that we are
|
999 |
+
selecting rows, ``1`` means that we are selecting columns.
|
1000 |
+
For `SeriesGroupBy` this parameter is unused and defaults to 0.
|
1001 |
+
|
1002 |
+
.. deprecated:: 2.1.0
|
1003 |
+
For axis=1, operate on the underlying object instead. Otherwise
|
1004 |
+
the axis keyword is not necessary.
|
1005 |
+
|
1006 |
+
**kwargs
|
1007 |
+
For compatibility with :meth:`numpy.take`. Has no effect on the
|
1008 |
+
output.
|
1009 |
+
|
1010 |
+
Returns
|
1011 |
+
-------
|
1012 |
+
Series
|
1013 |
+
A Series containing the elements taken from each group.
|
1014 |
+
|
1015 |
+
See Also
|
1016 |
+
--------
|
1017 |
+
Series.take : Take elements from a Series along an axis.
|
1018 |
+
Series.loc : Select a subset of a DataFrame by labels.
|
1019 |
+
Series.iloc : Select a subset of a DataFrame by positions.
|
1020 |
+
numpy.take : Take elements from an array along an axis.
|
1021 |
+
SeriesGroupBy.nth : Similar to take, won't raise if indices don't exist.
|
1022 |
+
|
1023 |
+
Examples
|
1024 |
+
--------
|
1025 |
+
>>> df = pd.DataFrame([('falcon', 'bird', 389.0),
|
1026 |
+
... ('parrot', 'bird', 24.0),
|
1027 |
+
... ('lion', 'mammal', 80.5),
|
1028 |
+
... ('monkey', 'mammal', np.nan),
|
1029 |
+
... ('rabbit', 'mammal', 15.0)],
|
1030 |
+
... columns=['name', 'class', 'max_speed'],
|
1031 |
+
... index=[4, 3, 2, 1, 0])
|
1032 |
+
>>> df
|
1033 |
+
name class max_speed
|
1034 |
+
4 falcon bird 389.0
|
1035 |
+
3 parrot bird 24.0
|
1036 |
+
2 lion mammal 80.5
|
1037 |
+
1 monkey mammal NaN
|
1038 |
+
0 rabbit mammal 15.0
|
1039 |
+
>>> gb = df["name"].groupby([1, 1, 2, 2, 2])
|
1040 |
+
|
1041 |
+
Take elements at positions 0 and 1 along the axis 0 in each group (default).
|
1042 |
+
|
1043 |
+
>>> gb.take([0, 1])
|
1044 |
+
1 4 falcon
|
1045 |
+
3 parrot
|
1046 |
+
2 2 lion
|
1047 |
+
1 monkey
|
1048 |
+
Name: name, dtype: object
|
1049 |
+
|
1050 |
+
We may take elements using negative integers for positive indices,
|
1051 |
+
starting from the end of the object, just like with Python lists.
|
1052 |
+
|
1053 |
+
>>> gb.take([-1, -2])
|
1054 |
+
1 3 parrot
|
1055 |
+
4 falcon
|
1056 |
+
2 0 rabbit
|
1057 |
+
1 monkey
|
1058 |
+
Name: name, dtype: object
|
1059 |
+
"""
|
1060 |
+
result = self._op_via_apply("take", indices=indices, axis=axis, **kwargs)
|
1061 |
+
return result
|
1062 |
+
|
1063 |
+
def skew(
|
1064 |
+
self,
|
1065 |
+
axis: Axis | lib.NoDefault = lib.no_default,
|
1066 |
+
skipna: bool = True,
|
1067 |
+
numeric_only: bool = False,
|
1068 |
+
**kwargs,
|
1069 |
+
) -> Series:
|
1070 |
+
"""
|
1071 |
+
Return unbiased skew within groups.
|
1072 |
+
|
1073 |
+
Normalized by N-1.
|
1074 |
+
|
1075 |
+
Parameters
|
1076 |
+
----------
|
1077 |
+
axis : {0 or 'index', 1 or 'columns', None}, default 0
|
1078 |
+
Axis for the function to be applied on.
|
1079 |
+
This parameter is only for compatibility with DataFrame and is unused.
|
1080 |
+
|
1081 |
+
.. deprecated:: 2.1.0
|
1082 |
+
For axis=1, operate on the underlying object instead. Otherwise
|
1083 |
+
the axis keyword is not necessary.
|
1084 |
+
|
1085 |
+
skipna : bool, default True
|
1086 |
+
Exclude NA/null values when computing the result.
|
1087 |
+
|
1088 |
+
numeric_only : bool, default False
|
1089 |
+
Include only float, int, boolean columns. Not implemented for Series.
|
1090 |
+
|
1091 |
+
**kwargs
|
1092 |
+
Additional keyword arguments to be passed to the function.
|
1093 |
+
|
1094 |
+
Returns
|
1095 |
+
-------
|
1096 |
+
Series
|
1097 |
+
|
1098 |
+
See Also
|
1099 |
+
--------
|
1100 |
+
Series.skew : Return unbiased skew over requested axis.
|
1101 |
+
|
1102 |
+
Examples
|
1103 |
+
--------
|
1104 |
+
>>> ser = pd.Series([390., 350., 357., np.nan, 22., 20., 30.],
|
1105 |
+
... index=['Falcon', 'Falcon', 'Falcon', 'Falcon',
|
1106 |
+
... 'Parrot', 'Parrot', 'Parrot'],
|
1107 |
+
... name="Max Speed")
|
1108 |
+
>>> ser
|
1109 |
+
Falcon 390.0
|
1110 |
+
Falcon 350.0
|
1111 |
+
Falcon 357.0
|
1112 |
+
Falcon NaN
|
1113 |
+
Parrot 22.0
|
1114 |
+
Parrot 20.0
|
1115 |
+
Parrot 30.0
|
1116 |
+
Name: Max Speed, dtype: float64
|
1117 |
+
>>> ser.groupby(level=0).skew()
|
1118 |
+
Falcon 1.525174
|
1119 |
+
Parrot 1.457863
|
1120 |
+
Name: Max Speed, dtype: float64
|
1121 |
+
>>> ser.groupby(level=0).skew(skipna=False)
|
1122 |
+
Falcon NaN
|
1123 |
+
Parrot 1.457863
|
1124 |
+
Name: Max Speed, dtype: float64
|
1125 |
+
"""
|
1126 |
+
if axis is lib.no_default:
|
1127 |
+
axis = 0
|
1128 |
+
|
1129 |
+
if axis != 0:
|
1130 |
+
result = self._op_via_apply(
|
1131 |
+
"skew",
|
1132 |
+
axis=axis,
|
1133 |
+
skipna=skipna,
|
1134 |
+
numeric_only=numeric_only,
|
1135 |
+
**kwargs,
|
1136 |
+
)
|
1137 |
+
return result
|
1138 |
+
|
1139 |
+
def alt(obj):
|
1140 |
+
# This should not be reached since the cython path should raise
|
1141 |
+
# TypeError and not NotImplementedError.
|
1142 |
+
raise TypeError(f"'skew' is not supported for dtype={obj.dtype}")
|
1143 |
+
|
1144 |
+
return self._cython_agg_general(
|
1145 |
+
"skew", alt=alt, skipna=skipna, numeric_only=numeric_only, **kwargs
|
1146 |
+
)
|
1147 |
+
|
1148 |
+
@property
|
1149 |
+
@doc(Series.plot.__doc__)
|
1150 |
+
def plot(self) -> GroupByPlot:
|
1151 |
+
result = GroupByPlot(self)
|
1152 |
+
return result
|
1153 |
+
|
1154 |
+
@doc(Series.nlargest.__doc__)
|
1155 |
+
def nlargest(
|
1156 |
+
self, n: int = 5, keep: Literal["first", "last", "all"] = "first"
|
1157 |
+
) -> Series:
|
1158 |
+
f = partial(Series.nlargest, n=n, keep=keep)
|
1159 |
+
data = self._obj_with_exclusions
|
1160 |
+
# Don't change behavior if result index happens to be the same, i.e.
|
1161 |
+
# already ordered and n >= all group sizes.
|
1162 |
+
result = self._python_apply_general(f, data, not_indexed_same=True)
|
1163 |
+
return result
|
1164 |
+
|
1165 |
+
@doc(Series.nsmallest.__doc__)
|
1166 |
+
def nsmallest(
|
1167 |
+
self, n: int = 5, keep: Literal["first", "last", "all"] = "first"
|
1168 |
+
) -> Series:
|
1169 |
+
f = partial(Series.nsmallest, n=n, keep=keep)
|
1170 |
+
data = self._obj_with_exclusions
|
1171 |
+
# Don't change behavior if result index happens to be the same, i.e.
|
1172 |
+
# already ordered and n >= all group sizes.
|
1173 |
+
result = self._python_apply_general(f, data, not_indexed_same=True)
|
1174 |
+
return result
|
1175 |
+
|
1176 |
+
@doc(Series.idxmin.__doc__)
|
1177 |
+
def idxmin(
|
1178 |
+
self, axis: Axis | lib.NoDefault = lib.no_default, skipna: bool = True
|
1179 |
+
) -> Series:
|
1180 |
+
return self._idxmax_idxmin("idxmin", axis=axis, skipna=skipna)
|
1181 |
+
|
1182 |
+
@doc(Series.idxmax.__doc__)
|
1183 |
+
def idxmax(
|
1184 |
+
self, axis: Axis | lib.NoDefault = lib.no_default, skipna: bool = True
|
1185 |
+
) -> Series:
|
1186 |
+
return self._idxmax_idxmin("idxmax", axis=axis, skipna=skipna)
|
1187 |
+
|
1188 |
+
@doc(Series.corr.__doc__)
|
1189 |
+
def corr(
|
1190 |
+
self,
|
1191 |
+
other: Series,
|
1192 |
+
method: CorrelationMethod = "pearson",
|
1193 |
+
min_periods: int | None = None,
|
1194 |
+
) -> Series:
|
1195 |
+
result = self._op_via_apply(
|
1196 |
+
"corr", other=other, method=method, min_periods=min_periods
|
1197 |
+
)
|
1198 |
+
return result
|
1199 |
+
|
1200 |
+
@doc(Series.cov.__doc__)
|
1201 |
+
def cov(
|
1202 |
+
self, other: Series, min_periods: int | None = None, ddof: int | None = 1
|
1203 |
+
) -> Series:
|
1204 |
+
result = self._op_via_apply(
|
1205 |
+
"cov", other=other, min_periods=min_periods, ddof=ddof
|
1206 |
+
)
|
1207 |
+
return result
|
1208 |
+
|
1209 |
+
@property
|
1210 |
+
def is_monotonic_increasing(self) -> Series:
|
1211 |
+
"""
|
1212 |
+
Return whether each group's values are monotonically increasing.
|
1213 |
+
|
1214 |
+
Returns
|
1215 |
+
-------
|
1216 |
+
Series
|
1217 |
+
|
1218 |
+
Examples
|
1219 |
+
--------
|
1220 |
+
>>> s = pd.Series([2, 1, 3, 4], index=['Falcon', 'Falcon', 'Parrot', 'Parrot'])
|
1221 |
+
>>> s.groupby(level=0).is_monotonic_increasing
|
1222 |
+
Falcon False
|
1223 |
+
Parrot True
|
1224 |
+
dtype: bool
|
1225 |
+
"""
|
1226 |
+
return self.apply(lambda ser: ser.is_monotonic_increasing)
|
1227 |
+
|
1228 |
+
@property
|
1229 |
+
def is_monotonic_decreasing(self) -> Series:
|
1230 |
+
"""
|
1231 |
+
Return whether each group's values are monotonically decreasing.
|
1232 |
+
|
1233 |
+
Returns
|
1234 |
+
-------
|
1235 |
+
Series
|
1236 |
+
|
1237 |
+
Examples
|
1238 |
+
--------
|
1239 |
+
>>> s = pd.Series([2, 1, 3, 4], index=['Falcon', 'Falcon', 'Parrot', 'Parrot'])
|
1240 |
+
>>> s.groupby(level=0).is_monotonic_decreasing
|
1241 |
+
Falcon True
|
1242 |
+
Parrot False
|
1243 |
+
dtype: bool
|
1244 |
+
"""
|
1245 |
+
return self.apply(lambda ser: ser.is_monotonic_decreasing)
|
1246 |
+
|
1247 |
+
@doc(Series.hist.__doc__)
|
1248 |
+
def hist(
|
1249 |
+
self,
|
1250 |
+
by=None,
|
1251 |
+
ax=None,
|
1252 |
+
grid: bool = True,
|
1253 |
+
xlabelsize: int | None = None,
|
1254 |
+
xrot: float | None = None,
|
1255 |
+
ylabelsize: int | None = None,
|
1256 |
+
yrot: float | None = None,
|
1257 |
+
figsize: tuple[int, int] | None = None,
|
1258 |
+
bins: int | Sequence[int] = 10,
|
1259 |
+
backend: str | None = None,
|
1260 |
+
legend: bool = False,
|
1261 |
+
**kwargs,
|
1262 |
+
):
|
1263 |
+
result = self._op_via_apply(
|
1264 |
+
"hist",
|
1265 |
+
by=by,
|
1266 |
+
ax=ax,
|
1267 |
+
grid=grid,
|
1268 |
+
xlabelsize=xlabelsize,
|
1269 |
+
xrot=xrot,
|
1270 |
+
ylabelsize=ylabelsize,
|
1271 |
+
yrot=yrot,
|
1272 |
+
figsize=figsize,
|
1273 |
+
bins=bins,
|
1274 |
+
backend=backend,
|
1275 |
+
legend=legend,
|
1276 |
+
**kwargs,
|
1277 |
+
)
|
1278 |
+
return result
|
1279 |
+
|
1280 |
+
@property
|
1281 |
+
@doc(Series.dtype.__doc__)
|
1282 |
+
def dtype(self) -> Series:
|
1283 |
+
return self.apply(lambda ser: ser.dtype)
|
1284 |
+
|
1285 |
+
def unique(self) -> Series:
|
1286 |
+
"""
|
1287 |
+
Return unique values for each group.
|
1288 |
+
|
1289 |
+
It returns unique values for each of the grouped values. Returned in
|
1290 |
+
order of appearance. Hash table-based unique, therefore does NOT sort.
|
1291 |
+
|
1292 |
+
Returns
|
1293 |
+
-------
|
1294 |
+
Series
|
1295 |
+
Unique values for each of the grouped values.
|
1296 |
+
|
1297 |
+
See Also
|
1298 |
+
--------
|
1299 |
+
Series.unique : Return unique values of Series object.
|
1300 |
+
|
1301 |
+
Examples
|
1302 |
+
--------
|
1303 |
+
>>> df = pd.DataFrame([('Chihuahua', 'dog', 6.1),
|
1304 |
+
... ('Beagle', 'dog', 15.2),
|
1305 |
+
... ('Chihuahua', 'dog', 6.9),
|
1306 |
+
... ('Persian', 'cat', 9.2),
|
1307 |
+
... ('Chihuahua', 'dog', 7),
|
1308 |
+
... ('Persian', 'cat', 8.8)],
|
1309 |
+
... columns=['breed', 'animal', 'height_in'])
|
1310 |
+
>>> df
|
1311 |
+
breed animal height_in
|
1312 |
+
0 Chihuahua dog 6.1
|
1313 |
+
1 Beagle dog 15.2
|
1314 |
+
2 Chihuahua dog 6.9
|
1315 |
+
3 Persian cat 9.2
|
1316 |
+
4 Chihuahua dog 7.0
|
1317 |
+
5 Persian cat 8.8
|
1318 |
+
>>> ser = df.groupby('animal')['breed'].unique()
|
1319 |
+
>>> ser
|
1320 |
+
animal
|
1321 |
+
cat [Persian]
|
1322 |
+
dog [Chihuahua, Beagle]
|
1323 |
+
Name: breed, dtype: object
|
1324 |
+
"""
|
1325 |
+
result = self._op_via_apply("unique")
|
1326 |
+
return result
|
1327 |
+
|
1328 |
+
|
1329 |
+
class DataFrameGroupBy(GroupBy[DataFrame]):
|
1330 |
+
_agg_examples_doc = dedent(
|
1331 |
+
"""
|
1332 |
+
Examples
|
1333 |
+
--------
|
1334 |
+
>>> data = {"A": [1, 1, 2, 2],
|
1335 |
+
... "B": [1, 2, 3, 4],
|
1336 |
+
... "C": [0.362838, 0.227877, 1.267767, -0.562860]}
|
1337 |
+
>>> df = pd.DataFrame(data)
|
1338 |
+
>>> df
|
1339 |
+
A B C
|
1340 |
+
0 1 1 0.362838
|
1341 |
+
1 1 2 0.227877
|
1342 |
+
2 2 3 1.267767
|
1343 |
+
3 2 4 -0.562860
|
1344 |
+
|
1345 |
+
The aggregation is for each column.
|
1346 |
+
|
1347 |
+
>>> df.groupby('A').agg('min')
|
1348 |
+
B C
|
1349 |
+
A
|
1350 |
+
1 1 0.227877
|
1351 |
+
2 3 -0.562860
|
1352 |
+
|
1353 |
+
Multiple aggregations
|
1354 |
+
|
1355 |
+
>>> df.groupby('A').agg(['min', 'max'])
|
1356 |
+
B C
|
1357 |
+
min max min max
|
1358 |
+
A
|
1359 |
+
1 1 2 0.227877 0.362838
|
1360 |
+
2 3 4 -0.562860 1.267767
|
1361 |
+
|
1362 |
+
Select a column for aggregation
|
1363 |
+
|
1364 |
+
>>> df.groupby('A').B.agg(['min', 'max'])
|
1365 |
+
min max
|
1366 |
+
A
|
1367 |
+
1 1 2
|
1368 |
+
2 3 4
|
1369 |
+
|
1370 |
+
User-defined function for aggregation
|
1371 |
+
|
1372 |
+
>>> df.groupby('A').agg(lambda x: sum(x) + 2)
|
1373 |
+
B C
|
1374 |
+
A
|
1375 |
+
1 5 2.590715
|
1376 |
+
2 9 2.704907
|
1377 |
+
|
1378 |
+
Different aggregations per column
|
1379 |
+
|
1380 |
+
>>> df.groupby('A').agg({'B': ['min', 'max'], 'C': 'sum'})
|
1381 |
+
B C
|
1382 |
+
min max sum
|
1383 |
+
A
|
1384 |
+
1 1 2 0.590715
|
1385 |
+
2 3 4 0.704907
|
1386 |
+
|
1387 |
+
To control the output names with different aggregations per column,
|
1388 |
+
pandas supports "named aggregation"
|
1389 |
+
|
1390 |
+
>>> df.groupby("A").agg(
|
1391 |
+
... b_min=pd.NamedAgg(column="B", aggfunc="min"),
|
1392 |
+
... c_sum=pd.NamedAgg(column="C", aggfunc="sum")
|
1393 |
+
... )
|
1394 |
+
b_min c_sum
|
1395 |
+
A
|
1396 |
+
1 1 0.590715
|
1397 |
+
2 3 0.704907
|
1398 |
+
|
1399 |
+
- The keywords are the *output* column names
|
1400 |
+
- The values are tuples whose first element is the column to select
|
1401 |
+
and the second element is the aggregation to apply to that column.
|
1402 |
+
Pandas provides the ``pandas.NamedAgg`` namedtuple with the fields
|
1403 |
+
``['column', 'aggfunc']`` to make it clearer what the arguments are.
|
1404 |
+
As usual, the aggregation can be a callable or a string alias.
|
1405 |
+
|
1406 |
+
See :ref:`groupby.aggregate.named` for more.
|
1407 |
+
|
1408 |
+
.. versionchanged:: 1.3.0
|
1409 |
+
|
1410 |
+
The resulting dtype will reflect the return value of the aggregating function.
|
1411 |
+
|
1412 |
+
>>> df.groupby("A")[["B"]].agg(lambda x: x.astype(float).min())
|
1413 |
+
B
|
1414 |
+
A
|
1415 |
+
1 1.0
|
1416 |
+
2 3.0
|
1417 |
+
"""
|
1418 |
+
)
|
1419 |
+
|
1420 |
+
@doc(_agg_template_frame, examples=_agg_examples_doc, klass="DataFrame")
|
1421 |
+
def aggregate(self, func=None, *args, engine=None, engine_kwargs=None, **kwargs):
|
1422 |
+
relabeling, func, columns, order = reconstruct_func(func, **kwargs)
|
1423 |
+
func = maybe_mangle_lambdas(func)
|
1424 |
+
|
1425 |
+
if maybe_use_numba(engine):
|
1426 |
+
# Not all agg functions support numba, only propagate numba kwargs
|
1427 |
+
# if user asks for numba
|
1428 |
+
kwargs["engine"] = engine
|
1429 |
+
kwargs["engine_kwargs"] = engine_kwargs
|
1430 |
+
|
1431 |
+
op = GroupByApply(self, func, args=args, kwargs=kwargs)
|
1432 |
+
result = op.agg()
|
1433 |
+
if not is_dict_like(func) and result is not None:
|
1434 |
+
# GH #52849
|
1435 |
+
if not self.as_index and is_list_like(func):
|
1436 |
+
return result.reset_index()
|
1437 |
+
else:
|
1438 |
+
return result
|
1439 |
+
elif relabeling:
|
1440 |
+
# this should be the only (non-raising) case with relabeling
|
1441 |
+
# used reordered index of columns
|
1442 |
+
result = cast(DataFrame, result)
|
1443 |
+
result = result.iloc[:, order]
|
1444 |
+
result = cast(DataFrame, result)
|
1445 |
+
# error: Incompatible types in assignment (expression has type
|
1446 |
+
# "Optional[List[str]]", variable has type
|
1447 |
+
# "Union[Union[Union[ExtensionArray, ndarray[Any, Any]],
|
1448 |
+
# Index, Series], Sequence[Any]]")
|
1449 |
+
result.columns = columns # type: ignore[assignment]
|
1450 |
+
|
1451 |
+
if result is None:
|
1452 |
+
# Remove the kwargs we inserted
|
1453 |
+
# (already stored in engine, engine_kwargs arguments)
|
1454 |
+
if "engine" in kwargs:
|
1455 |
+
del kwargs["engine"]
|
1456 |
+
del kwargs["engine_kwargs"]
|
1457 |
+
# at this point func is not a str, list-like, dict-like,
|
1458 |
+
# or a known callable(e.g. sum)
|
1459 |
+
if maybe_use_numba(engine):
|
1460 |
+
return self._aggregate_with_numba(
|
1461 |
+
func, *args, engine_kwargs=engine_kwargs, **kwargs
|
1462 |
+
)
|
1463 |
+
# grouper specific aggregations
|
1464 |
+
if self._grouper.nkeys > 1:
|
1465 |
+
# test_groupby_as_index_series_scalar gets here with 'not self.as_index'
|
1466 |
+
return self._python_agg_general(func, *args, **kwargs)
|
1467 |
+
elif args or kwargs:
|
1468 |
+
# test_pass_args_kwargs gets here (with and without as_index)
|
1469 |
+
# can't return early
|
1470 |
+
result = self._aggregate_frame(func, *args, **kwargs)
|
1471 |
+
|
1472 |
+
elif self.axis == 1:
|
1473 |
+
# _aggregate_multiple_funcs does not allow self.axis == 1
|
1474 |
+
# Note: axis == 1 precludes 'not self.as_index', see __init__
|
1475 |
+
result = self._aggregate_frame(func)
|
1476 |
+
return result
|
1477 |
+
|
1478 |
+
else:
|
1479 |
+
# try to treat as if we are passing a list
|
1480 |
+
gba = GroupByApply(self, [func], args=(), kwargs={})
|
1481 |
+
try:
|
1482 |
+
result = gba.agg()
|
1483 |
+
|
1484 |
+
except ValueError as err:
|
1485 |
+
if "No objects to concatenate" not in str(err):
|
1486 |
+
raise
|
1487 |
+
# _aggregate_frame can fail with e.g. func=Series.mode,
|
1488 |
+
# where it expects 1D values but would be getting 2D values
|
1489 |
+
# In other tests, using aggregate_frame instead of GroupByApply
|
1490 |
+
# would give correct values but incorrect dtypes
|
1491 |
+
# object vs float64 in test_cython_agg_empty_buckets
|
1492 |
+
# float64 vs int64 in test_category_order_apply
|
1493 |
+
result = self._aggregate_frame(func)
|
1494 |
+
|
1495 |
+
else:
|
1496 |
+
# GH#32040, GH#35246
|
1497 |
+
# e.g. test_groupby_as_index_select_column_sum_empty_df
|
1498 |
+
result = cast(DataFrame, result)
|
1499 |
+
result.columns = self._obj_with_exclusions.columns.copy()
|
1500 |
+
|
1501 |
+
if not self.as_index:
|
1502 |
+
result = self._insert_inaxis_grouper(result)
|
1503 |
+
result.index = default_index(len(result))
|
1504 |
+
|
1505 |
+
return result
|
1506 |
+
|
1507 |
+
agg = aggregate
|
1508 |
+
|
1509 |
+
def _python_agg_general(self, func, *args, **kwargs):
|
1510 |
+
orig_func = func
|
1511 |
+
func = com.is_builtin_func(func)
|
1512 |
+
if orig_func != func:
|
1513 |
+
alias = com._builtin_table_alias[func]
|
1514 |
+
warn_alias_replacement(self, orig_func, alias)
|
1515 |
+
f = lambda x: func(x, *args, **kwargs)
|
1516 |
+
|
1517 |
+
if self.ngroups == 0:
|
1518 |
+
# e.g. test_evaluate_with_empty_groups different path gets different
|
1519 |
+
# result dtype in empty case.
|
1520 |
+
return self._python_apply_general(f, self._selected_obj, is_agg=True)
|
1521 |
+
|
1522 |
+
obj = self._obj_with_exclusions
|
1523 |
+
if self.axis == 1:
|
1524 |
+
obj = obj.T
|
1525 |
+
|
1526 |
+
if not len(obj.columns):
|
1527 |
+
# e.g. test_margins_no_values_no_cols
|
1528 |
+
return self._python_apply_general(f, self._selected_obj)
|
1529 |
+
|
1530 |
+
output: dict[int, ArrayLike] = {}
|
1531 |
+
for idx, (name, ser) in enumerate(obj.items()):
|
1532 |
+
result = self._grouper.agg_series(ser, f)
|
1533 |
+
output[idx] = result
|
1534 |
+
|
1535 |
+
res = self.obj._constructor(output)
|
1536 |
+
res.columns = obj.columns.copy(deep=False)
|
1537 |
+
return self._wrap_aggregated_output(res)
|
1538 |
+
|
1539 |
+
def _aggregate_frame(self, func, *args, **kwargs) -> DataFrame:
|
1540 |
+
if self._grouper.nkeys != 1:
|
1541 |
+
raise AssertionError("Number of keys must be 1")
|
1542 |
+
|
1543 |
+
obj = self._obj_with_exclusions
|
1544 |
+
|
1545 |
+
result: dict[Hashable, NDFrame | np.ndarray] = {}
|
1546 |
+
for name, grp_df in self._grouper.get_iterator(obj, self.axis):
|
1547 |
+
fres = func(grp_df, *args, **kwargs)
|
1548 |
+
result[name] = fres
|
1549 |
+
|
1550 |
+
result_index = self._grouper.result_index
|
1551 |
+
other_ax = obj.axes[1 - self.axis]
|
1552 |
+
out = self.obj._constructor(result, index=other_ax, columns=result_index)
|
1553 |
+
if self.axis == 0:
|
1554 |
+
out = out.T
|
1555 |
+
|
1556 |
+
return out
|
1557 |
+
|
1558 |
+
def _wrap_applied_output(
|
1559 |
+
self,
|
1560 |
+
data: DataFrame,
|
1561 |
+
values: list,
|
1562 |
+
not_indexed_same: bool = False,
|
1563 |
+
is_transform: bool = False,
|
1564 |
+
):
|
1565 |
+
if len(values) == 0:
|
1566 |
+
if is_transform:
|
1567 |
+
# GH#47787 see test_group_on_empty_multiindex
|
1568 |
+
res_index = data.index
|
1569 |
+
else:
|
1570 |
+
res_index = self._grouper.result_index
|
1571 |
+
|
1572 |
+
result = self.obj._constructor(index=res_index, columns=data.columns)
|
1573 |
+
result = result.astype(data.dtypes, copy=False)
|
1574 |
+
return result
|
1575 |
+
|
1576 |
+
# GH12824
|
1577 |
+
# using values[0] here breaks test_groupby_apply_none_first
|
1578 |
+
first_not_none = next(com.not_none(*values), None)
|
1579 |
+
|
1580 |
+
if first_not_none is None:
|
1581 |
+
# GH9684 - All values are None, return an empty frame.
|
1582 |
+
return self.obj._constructor()
|
1583 |
+
elif isinstance(first_not_none, DataFrame):
|
1584 |
+
return self._concat_objects(
|
1585 |
+
values,
|
1586 |
+
not_indexed_same=not_indexed_same,
|
1587 |
+
is_transform=is_transform,
|
1588 |
+
)
|
1589 |
+
|
1590 |
+
key_index = self._grouper.result_index if self.as_index else None
|
1591 |
+
|
1592 |
+
if isinstance(first_not_none, (np.ndarray, Index)):
|
1593 |
+
# GH#1738: values is list of arrays of unequal lengths
|
1594 |
+
# fall through to the outer else clause
|
1595 |
+
# TODO: sure this is right? we used to do this
|
1596 |
+
# after raising AttributeError above
|
1597 |
+
# GH 18930
|
1598 |
+
if not is_hashable(self._selection):
|
1599 |
+
# error: Need type annotation for "name"
|
1600 |
+
name = tuple(self._selection) # type: ignore[var-annotated, arg-type]
|
1601 |
+
else:
|
1602 |
+
# error: Incompatible types in assignment
|
1603 |
+
# (expression has type "Hashable", variable
|
1604 |
+
# has type "Tuple[Any, ...]")
|
1605 |
+
name = self._selection # type: ignore[assignment]
|
1606 |
+
return self.obj._constructor_sliced(values, index=key_index, name=name)
|
1607 |
+
elif not isinstance(first_not_none, Series):
|
1608 |
+
# values are not series or array-like but scalars
|
1609 |
+
# self._selection not passed through to Series as the
|
1610 |
+
# result should not take the name of original selection
|
1611 |
+
# of columns
|
1612 |
+
if self.as_index:
|
1613 |
+
return self.obj._constructor_sliced(values, index=key_index)
|
1614 |
+
else:
|
1615 |
+
result = self.obj._constructor(values, columns=[self._selection])
|
1616 |
+
result = self._insert_inaxis_grouper(result)
|
1617 |
+
return result
|
1618 |
+
else:
|
1619 |
+
# values are Series
|
1620 |
+
return self._wrap_applied_output_series(
|
1621 |
+
values,
|
1622 |
+
not_indexed_same,
|
1623 |
+
first_not_none,
|
1624 |
+
key_index,
|
1625 |
+
is_transform,
|
1626 |
+
)
|
1627 |
+
|
1628 |
+
def _wrap_applied_output_series(
|
1629 |
+
self,
|
1630 |
+
values: list[Series],
|
1631 |
+
not_indexed_same: bool,
|
1632 |
+
first_not_none,
|
1633 |
+
key_index: Index | None,
|
1634 |
+
is_transform: bool,
|
1635 |
+
) -> DataFrame | Series:
|
1636 |
+
kwargs = first_not_none._construct_axes_dict()
|
1637 |
+
backup = Series(**kwargs)
|
1638 |
+
values = [x if (x is not None) else backup for x in values]
|
1639 |
+
|
1640 |
+
all_indexed_same = all_indexes_same(x.index for x in values)
|
1641 |
+
|
1642 |
+
if not all_indexed_same:
|
1643 |
+
# GH 8467
|
1644 |
+
return self._concat_objects(
|
1645 |
+
values,
|
1646 |
+
not_indexed_same=True,
|
1647 |
+
is_transform=is_transform,
|
1648 |
+
)
|
1649 |
+
|
1650 |
+
# Combine values
|
1651 |
+
# vstack+constructor is faster than concat and handles MI-columns
|
1652 |
+
stacked_values = np.vstack([np.asarray(v) for v in values])
|
1653 |
+
|
1654 |
+
if self.axis == 0:
|
1655 |
+
index = key_index
|
1656 |
+
columns = first_not_none.index.copy()
|
1657 |
+
if columns.name is None:
|
1658 |
+
# GH6124 - propagate name of Series when it's consistent
|
1659 |
+
names = {v.name for v in values}
|
1660 |
+
if len(names) == 1:
|
1661 |
+
columns.name = next(iter(names))
|
1662 |
+
else:
|
1663 |
+
index = first_not_none.index
|
1664 |
+
columns = key_index
|
1665 |
+
stacked_values = stacked_values.T
|
1666 |
+
|
1667 |
+
if stacked_values.dtype == object:
|
1668 |
+
# We'll have the DataFrame constructor do inference
|
1669 |
+
stacked_values = stacked_values.tolist()
|
1670 |
+
result = self.obj._constructor(stacked_values, index=index, columns=columns)
|
1671 |
+
|
1672 |
+
if not self.as_index:
|
1673 |
+
result = self._insert_inaxis_grouper(result)
|
1674 |
+
|
1675 |
+
return self._reindex_output(result)
|
1676 |
+
|
1677 |
+
def _cython_transform(
|
1678 |
+
self,
|
1679 |
+
how: str,
|
1680 |
+
numeric_only: bool = False,
|
1681 |
+
axis: AxisInt = 0,
|
1682 |
+
**kwargs,
|
1683 |
+
) -> DataFrame:
|
1684 |
+
assert axis == 0 # handled by caller
|
1685 |
+
|
1686 |
+
# With self.axis == 0, we have multi-block tests
|
1687 |
+
# e.g. test_rank_min_int, test_cython_transform_frame
|
1688 |
+
# test_transform_numeric_ret
|
1689 |
+
# With self.axis == 1, _get_data_to_aggregate does a transpose
|
1690 |
+
# so we always have a single block.
|
1691 |
+
mgr: Manager2D = self._get_data_to_aggregate(
|
1692 |
+
numeric_only=numeric_only, name=how
|
1693 |
+
)
|
1694 |
+
|
1695 |
+
def arr_func(bvalues: ArrayLike) -> ArrayLike:
|
1696 |
+
return self._grouper._cython_operation(
|
1697 |
+
"transform", bvalues, how, 1, **kwargs
|
1698 |
+
)
|
1699 |
+
|
1700 |
+
# We could use `mgr.apply` here and not have to set_axis, but
|
1701 |
+
# we would have to do shape gymnastics for ArrayManager compat
|
1702 |
+
res_mgr = mgr.grouped_reduce(arr_func)
|
1703 |
+
res_mgr.set_axis(1, mgr.axes[1])
|
1704 |
+
|
1705 |
+
res_df = self.obj._constructor_from_mgr(res_mgr, axes=res_mgr.axes)
|
1706 |
+
res_df = self._maybe_transpose_result(res_df)
|
1707 |
+
return res_df
|
1708 |
+
|
1709 |
+
def _transform_general(self, func, engine, engine_kwargs, *args, **kwargs):
|
1710 |
+
if maybe_use_numba(engine):
|
1711 |
+
return self._transform_with_numba(
|
1712 |
+
func, *args, engine_kwargs=engine_kwargs, **kwargs
|
1713 |
+
)
|
1714 |
+
from pandas.core.reshape.concat import concat
|
1715 |
+
|
1716 |
+
applied = []
|
1717 |
+
obj = self._obj_with_exclusions
|
1718 |
+
gen = self._grouper.get_iterator(obj, axis=self.axis)
|
1719 |
+
fast_path, slow_path = self._define_paths(func, *args, **kwargs)
|
1720 |
+
|
1721 |
+
# Determine whether to use slow or fast path by evaluating on the first group.
|
1722 |
+
# Need to handle the case of an empty generator and process the result so that
|
1723 |
+
# it does not need to be computed again.
|
1724 |
+
try:
|
1725 |
+
name, group = next(gen)
|
1726 |
+
except StopIteration:
|
1727 |
+
pass
|
1728 |
+
else:
|
1729 |
+
# 2023-02-27 No tests broken by disabling this pinning
|
1730 |
+
object.__setattr__(group, "name", name)
|
1731 |
+
try:
|
1732 |
+
path, res = self._choose_path(fast_path, slow_path, group)
|
1733 |
+
except ValueError as err:
|
1734 |
+
# e.g. test_transform_with_non_scalar_group
|
1735 |
+
msg = "transform must return a scalar value for each group"
|
1736 |
+
raise ValueError(msg) from err
|
1737 |
+
if group.size > 0:
|
1738 |
+
res = _wrap_transform_general_frame(self.obj, group, res)
|
1739 |
+
applied.append(res)
|
1740 |
+
|
1741 |
+
# Compute and process with the remaining groups
|
1742 |
+
for name, group in gen:
|
1743 |
+
if group.size == 0:
|
1744 |
+
continue
|
1745 |
+
# 2023-02-27 No tests broken by disabling this pinning
|
1746 |
+
object.__setattr__(group, "name", name)
|
1747 |
+
res = path(group)
|
1748 |
+
|
1749 |
+
res = _wrap_transform_general_frame(self.obj, group, res)
|
1750 |
+
applied.append(res)
|
1751 |
+
|
1752 |
+
concat_index = obj.columns if self.axis == 0 else obj.index
|
1753 |
+
other_axis = 1 if self.axis == 0 else 0 # switches between 0 & 1
|
1754 |
+
concatenated = concat(applied, axis=self.axis, verify_integrity=False)
|
1755 |
+
concatenated = concatenated.reindex(concat_index, axis=other_axis, copy=False)
|
1756 |
+
return self._set_result_index_ordered(concatenated)
|
1757 |
+
|
1758 |
+
__examples_dataframe_doc = dedent(
|
1759 |
+
"""
|
1760 |
+
>>> df = pd.DataFrame({'A' : ['foo', 'bar', 'foo', 'bar',
|
1761 |
+
... 'foo', 'bar'],
|
1762 |
+
... 'B' : ['one', 'one', 'two', 'three',
|
1763 |
+
... 'two', 'two'],
|
1764 |
+
... 'C' : [1, 5, 5, 2, 5, 5],
|
1765 |
+
... 'D' : [2.0, 5., 8., 1., 2., 9.]})
|
1766 |
+
>>> grouped = df.groupby('A')[['C', 'D']]
|
1767 |
+
>>> grouped.transform(lambda x: (x - x.mean()) / x.std())
|
1768 |
+
C D
|
1769 |
+
0 -1.154701 -0.577350
|
1770 |
+
1 0.577350 0.000000
|
1771 |
+
2 0.577350 1.154701
|
1772 |
+
3 -1.154701 -1.000000
|
1773 |
+
4 0.577350 -0.577350
|
1774 |
+
5 0.577350 1.000000
|
1775 |
+
|
1776 |
+
Broadcast result of the transformation
|
1777 |
+
|
1778 |
+
>>> grouped.transform(lambda x: x.max() - x.min())
|
1779 |
+
C D
|
1780 |
+
0 4.0 6.0
|
1781 |
+
1 3.0 8.0
|
1782 |
+
2 4.0 6.0
|
1783 |
+
3 3.0 8.0
|
1784 |
+
4 4.0 6.0
|
1785 |
+
5 3.0 8.0
|
1786 |
+
|
1787 |
+
>>> grouped.transform("mean")
|
1788 |
+
C D
|
1789 |
+
0 3.666667 4.0
|
1790 |
+
1 4.000000 5.0
|
1791 |
+
2 3.666667 4.0
|
1792 |
+
3 4.000000 5.0
|
1793 |
+
4 3.666667 4.0
|
1794 |
+
5 4.000000 5.0
|
1795 |
+
|
1796 |
+
.. versionchanged:: 1.3.0
|
1797 |
+
|
1798 |
+
The resulting dtype will reflect the return value of the passed ``func``,
|
1799 |
+
for example:
|
1800 |
+
|
1801 |
+
>>> grouped.transform(lambda x: x.astype(int).max())
|
1802 |
+
C D
|
1803 |
+
0 5 8
|
1804 |
+
1 5 9
|
1805 |
+
2 5 8
|
1806 |
+
3 5 9
|
1807 |
+
4 5 8
|
1808 |
+
5 5 9
|
1809 |
+
"""
|
1810 |
+
)
|
1811 |
+
|
1812 |
+
@Substitution(klass="DataFrame", example=__examples_dataframe_doc)
|
1813 |
+
@Appender(_transform_template)
|
1814 |
+
def transform(self, func, *args, engine=None, engine_kwargs=None, **kwargs):
|
1815 |
+
return self._transform(
|
1816 |
+
func, *args, engine=engine, engine_kwargs=engine_kwargs, **kwargs
|
1817 |
+
)
|
1818 |
+
|
1819 |
+
def _define_paths(self, func, *args, **kwargs):
|
1820 |
+
if isinstance(func, str):
|
1821 |
+
fast_path = lambda group: getattr(group, func)(*args, **kwargs)
|
1822 |
+
slow_path = lambda group: group.apply(
|
1823 |
+
lambda x: getattr(x, func)(*args, **kwargs), axis=self.axis
|
1824 |
+
)
|
1825 |
+
else:
|
1826 |
+
fast_path = lambda group: func(group, *args, **kwargs)
|
1827 |
+
slow_path = lambda group: group.apply(
|
1828 |
+
lambda x: func(x, *args, **kwargs), axis=self.axis
|
1829 |
+
)
|
1830 |
+
return fast_path, slow_path
|
1831 |
+
|
1832 |
+
def _choose_path(self, fast_path: Callable, slow_path: Callable, group: DataFrame):
|
1833 |
+
path = slow_path
|
1834 |
+
res = slow_path(group)
|
1835 |
+
|
1836 |
+
if self.ngroups == 1:
|
1837 |
+
# no need to evaluate multiple paths when only
|
1838 |
+
# a single group exists
|
1839 |
+
return path, res
|
1840 |
+
|
1841 |
+
# if we make it here, test if we can use the fast path
|
1842 |
+
try:
|
1843 |
+
res_fast = fast_path(group)
|
1844 |
+
except AssertionError:
|
1845 |
+
raise # pragma: no cover
|
1846 |
+
except Exception:
|
1847 |
+
# GH#29631 For user-defined function, we can't predict what may be
|
1848 |
+
# raised; see test_transform.test_transform_fastpath_raises
|
1849 |
+
return path, res
|
1850 |
+
|
1851 |
+
# verify fast path returns either:
|
1852 |
+
# a DataFrame with columns equal to group.columns
|
1853 |
+
# OR a Series with index equal to group.columns
|
1854 |
+
if isinstance(res_fast, DataFrame):
|
1855 |
+
if not res_fast.columns.equals(group.columns):
|
1856 |
+
return path, res
|
1857 |
+
elif isinstance(res_fast, Series):
|
1858 |
+
if not res_fast.index.equals(group.columns):
|
1859 |
+
return path, res
|
1860 |
+
else:
|
1861 |
+
return path, res
|
1862 |
+
|
1863 |
+
if res_fast.equals(res):
|
1864 |
+
path = fast_path
|
1865 |
+
|
1866 |
+
return path, res
|
1867 |
+
|
1868 |
+
def filter(self, func, dropna: bool = True, *args, **kwargs):
|
1869 |
+
"""
|
1870 |
+
Filter elements from groups that don't satisfy a criterion.
|
1871 |
+
|
1872 |
+
Elements from groups are filtered if they do not satisfy the
|
1873 |
+
boolean criterion specified by func.
|
1874 |
+
|
1875 |
+
Parameters
|
1876 |
+
----------
|
1877 |
+
func : function
|
1878 |
+
Criterion to apply to each group. Should return True or False.
|
1879 |
+
dropna : bool
|
1880 |
+
Drop groups that do not pass the filter. True by default; if False,
|
1881 |
+
groups that evaluate False are filled with NaNs.
|
1882 |
+
|
1883 |
+
Returns
|
1884 |
+
-------
|
1885 |
+
DataFrame
|
1886 |
+
|
1887 |
+
Notes
|
1888 |
+
-----
|
1889 |
+
Each subframe is endowed the attribute 'name' in case you need to know
|
1890 |
+
which group you are working on.
|
1891 |
+
|
1892 |
+
Functions that mutate the passed object can produce unexpected
|
1893 |
+
behavior or errors and are not supported. See :ref:`gotchas.udf-mutation`
|
1894 |
+
for more details.
|
1895 |
+
|
1896 |
+
Examples
|
1897 |
+
--------
|
1898 |
+
>>> df = pd.DataFrame({'A' : ['foo', 'bar', 'foo', 'bar',
|
1899 |
+
... 'foo', 'bar'],
|
1900 |
+
... 'B' : [1, 2, 3, 4, 5, 6],
|
1901 |
+
... 'C' : [2.0, 5., 8., 1., 2., 9.]})
|
1902 |
+
>>> grouped = df.groupby('A')
|
1903 |
+
>>> grouped.filter(lambda x: x['B'].mean() > 3.)
|
1904 |
+
A B C
|
1905 |
+
1 bar 2 5.0
|
1906 |
+
3 bar 4 1.0
|
1907 |
+
5 bar 6 9.0
|
1908 |
+
"""
|
1909 |
+
indices = []
|
1910 |
+
|
1911 |
+
obj = self._selected_obj
|
1912 |
+
gen = self._grouper.get_iterator(obj, axis=self.axis)
|
1913 |
+
|
1914 |
+
for name, group in gen:
|
1915 |
+
# 2023-02-27 no tests are broken this pinning, but it is documented in the
|
1916 |
+
# docstring above.
|
1917 |
+
object.__setattr__(group, "name", name)
|
1918 |
+
|
1919 |
+
res = func(group, *args, **kwargs)
|
1920 |
+
|
1921 |
+
try:
|
1922 |
+
res = res.squeeze()
|
1923 |
+
except AttributeError: # allow e.g., scalars and frames to pass
|
1924 |
+
pass
|
1925 |
+
|
1926 |
+
# interpret the result of the filter
|
1927 |
+
if is_bool(res) or (is_scalar(res) and isna(res)):
|
1928 |
+
if notna(res) and res:
|
1929 |
+
indices.append(self._get_index(name))
|
1930 |
+
else:
|
1931 |
+
# non scalars aren't allowed
|
1932 |
+
raise TypeError(
|
1933 |
+
f"filter function returned a {type(res).__name__}, "
|
1934 |
+
"but expected a scalar bool"
|
1935 |
+
)
|
1936 |
+
|
1937 |
+
return self._apply_filter(indices, dropna)
|
1938 |
+
|
1939 |
+
def __getitem__(self, key) -> DataFrameGroupBy | SeriesGroupBy:
|
1940 |
+
if self.axis == 1:
|
1941 |
+
# GH 37725
|
1942 |
+
raise ValueError("Cannot subset columns when using axis=1")
|
1943 |
+
# per GH 23566
|
1944 |
+
if isinstance(key, tuple) and len(key) > 1:
|
1945 |
+
# if len == 1, then it becomes a SeriesGroupBy and this is actually
|
1946 |
+
# valid syntax, so don't raise
|
1947 |
+
raise ValueError(
|
1948 |
+
"Cannot subset columns with a tuple with more than one element. "
|
1949 |
+
"Use a list instead."
|
1950 |
+
)
|
1951 |
+
return super().__getitem__(key)
|
1952 |
+
|
1953 |
+
def _gotitem(self, key, ndim: int, subset=None):
|
1954 |
+
"""
|
1955 |
+
sub-classes to define
|
1956 |
+
return a sliced object
|
1957 |
+
|
1958 |
+
Parameters
|
1959 |
+
----------
|
1960 |
+
key : string / list of selections
|
1961 |
+
ndim : {1, 2}
|
1962 |
+
requested ndim of result
|
1963 |
+
subset : object, default None
|
1964 |
+
subset to act on
|
1965 |
+
"""
|
1966 |
+
if ndim == 2:
|
1967 |
+
if subset is None:
|
1968 |
+
subset = self.obj
|
1969 |
+
return DataFrameGroupBy(
|
1970 |
+
subset,
|
1971 |
+
self.keys,
|
1972 |
+
axis=self.axis,
|
1973 |
+
level=self.level,
|
1974 |
+
grouper=self._grouper,
|
1975 |
+
exclusions=self.exclusions,
|
1976 |
+
selection=key,
|
1977 |
+
as_index=self.as_index,
|
1978 |
+
sort=self.sort,
|
1979 |
+
group_keys=self.group_keys,
|
1980 |
+
observed=self.observed,
|
1981 |
+
dropna=self.dropna,
|
1982 |
+
)
|
1983 |
+
elif ndim == 1:
|
1984 |
+
if subset is None:
|
1985 |
+
subset = self.obj[key]
|
1986 |
+
return SeriesGroupBy(
|
1987 |
+
subset,
|
1988 |
+
self.keys,
|
1989 |
+
level=self.level,
|
1990 |
+
grouper=self._grouper,
|
1991 |
+
exclusions=self.exclusions,
|
1992 |
+
selection=key,
|
1993 |
+
as_index=self.as_index,
|
1994 |
+
sort=self.sort,
|
1995 |
+
group_keys=self.group_keys,
|
1996 |
+
observed=self.observed,
|
1997 |
+
dropna=self.dropna,
|
1998 |
+
)
|
1999 |
+
|
2000 |
+
raise AssertionError("invalid ndim for _gotitem")
|
2001 |
+
|
2002 |
+
def _get_data_to_aggregate(
|
2003 |
+
self, *, numeric_only: bool = False, name: str | None = None
|
2004 |
+
) -> Manager2D:
|
2005 |
+
obj = self._obj_with_exclusions
|
2006 |
+
if self.axis == 1:
|
2007 |
+
mgr = obj.T._mgr
|
2008 |
+
else:
|
2009 |
+
mgr = obj._mgr
|
2010 |
+
|
2011 |
+
if numeric_only:
|
2012 |
+
mgr = mgr.get_numeric_data()
|
2013 |
+
return mgr
|
2014 |
+
|
2015 |
+
def _wrap_agged_manager(self, mgr: Manager2D) -> DataFrame:
|
2016 |
+
return self.obj._constructor_from_mgr(mgr, axes=mgr.axes)
|
2017 |
+
|
2018 |
+
def _apply_to_column_groupbys(self, func) -> DataFrame:
|
2019 |
+
from pandas.core.reshape.concat import concat
|
2020 |
+
|
2021 |
+
obj = self._obj_with_exclusions
|
2022 |
+
columns = obj.columns
|
2023 |
+
sgbs = [
|
2024 |
+
SeriesGroupBy(
|
2025 |
+
obj.iloc[:, i],
|
2026 |
+
selection=colname,
|
2027 |
+
grouper=self._grouper,
|
2028 |
+
exclusions=self.exclusions,
|
2029 |
+
observed=self.observed,
|
2030 |
+
)
|
2031 |
+
for i, colname in enumerate(obj.columns)
|
2032 |
+
]
|
2033 |
+
results = [func(sgb) for sgb in sgbs]
|
2034 |
+
|
2035 |
+
if not len(results):
|
2036 |
+
# concat would raise
|
2037 |
+
res_df = DataFrame([], columns=columns, index=self._grouper.result_index)
|
2038 |
+
else:
|
2039 |
+
res_df = concat(results, keys=columns, axis=1)
|
2040 |
+
|
2041 |
+
if not self.as_index:
|
2042 |
+
res_df.index = default_index(len(res_df))
|
2043 |
+
res_df = self._insert_inaxis_grouper(res_df)
|
2044 |
+
return res_df
|
2045 |
+
|
2046 |
+
def nunique(self, dropna: bool = True) -> DataFrame:
|
2047 |
+
"""
|
2048 |
+
Return DataFrame with counts of unique elements in each position.
|
2049 |
+
|
2050 |
+
Parameters
|
2051 |
+
----------
|
2052 |
+
dropna : bool, default True
|
2053 |
+
Don't include NaN in the counts.
|
2054 |
+
|
2055 |
+
Returns
|
2056 |
+
-------
|
2057 |
+
nunique: DataFrame
|
2058 |
+
|
2059 |
+
Examples
|
2060 |
+
--------
|
2061 |
+
>>> df = pd.DataFrame({'id': ['spam', 'egg', 'egg', 'spam',
|
2062 |
+
... 'ham', 'ham'],
|
2063 |
+
... 'value1': [1, 5, 5, 2, 5, 5],
|
2064 |
+
... 'value2': list('abbaxy')})
|
2065 |
+
>>> df
|
2066 |
+
id value1 value2
|
2067 |
+
0 spam 1 a
|
2068 |
+
1 egg 5 b
|
2069 |
+
2 egg 5 b
|
2070 |
+
3 spam 2 a
|
2071 |
+
4 ham 5 x
|
2072 |
+
5 ham 5 y
|
2073 |
+
|
2074 |
+
>>> df.groupby('id').nunique()
|
2075 |
+
value1 value2
|
2076 |
+
id
|
2077 |
+
egg 1 1
|
2078 |
+
ham 1 2
|
2079 |
+
spam 2 1
|
2080 |
+
|
2081 |
+
Check for rows with the same id but conflicting values:
|
2082 |
+
|
2083 |
+
>>> df.groupby('id').filter(lambda g: (g.nunique() > 1).any())
|
2084 |
+
id value1 value2
|
2085 |
+
0 spam 1 a
|
2086 |
+
3 spam 2 a
|
2087 |
+
4 ham 5 x
|
2088 |
+
5 ham 5 y
|
2089 |
+
"""
|
2090 |
+
|
2091 |
+
if self.axis != 0:
|
2092 |
+
# see test_groupby_crash_on_nunique
|
2093 |
+
return self._python_apply_general(
|
2094 |
+
lambda sgb: sgb.nunique(dropna), self._obj_with_exclusions, is_agg=True
|
2095 |
+
)
|
2096 |
+
|
2097 |
+
return self._apply_to_column_groupbys(lambda sgb: sgb.nunique(dropna))
|
2098 |
+
|
2099 |
+
def idxmax(
|
2100 |
+
self,
|
2101 |
+
axis: Axis | None | lib.NoDefault = lib.no_default,
|
2102 |
+
skipna: bool = True,
|
2103 |
+
numeric_only: bool = False,
|
2104 |
+
) -> DataFrame:
|
2105 |
+
"""
|
2106 |
+
Return index of first occurrence of maximum over requested axis.
|
2107 |
+
|
2108 |
+
NA/null values are excluded.
|
2109 |
+
|
2110 |
+
Parameters
|
2111 |
+
----------
|
2112 |
+
axis : {{0 or 'index', 1 or 'columns'}}, default None
|
2113 |
+
The axis to use. 0 or 'index' for row-wise, 1 or 'columns' for column-wise.
|
2114 |
+
If axis is not provided, grouper's axis is used.
|
2115 |
+
|
2116 |
+
.. versionchanged:: 2.0.0
|
2117 |
+
|
2118 |
+
.. deprecated:: 2.1.0
|
2119 |
+
For axis=1, operate on the underlying object instead. Otherwise
|
2120 |
+
the axis keyword is not necessary.
|
2121 |
+
|
2122 |
+
skipna : bool, default True
|
2123 |
+
Exclude NA/null values. If an entire row/column is NA, the result
|
2124 |
+
will be NA.
|
2125 |
+
numeric_only : bool, default False
|
2126 |
+
Include only `float`, `int` or `boolean` data.
|
2127 |
+
|
2128 |
+
.. versionadded:: 1.5.0
|
2129 |
+
|
2130 |
+
Returns
|
2131 |
+
-------
|
2132 |
+
Series
|
2133 |
+
Indexes of maxima along the specified axis.
|
2134 |
+
|
2135 |
+
Raises
|
2136 |
+
------
|
2137 |
+
ValueError
|
2138 |
+
* If the row/column is empty
|
2139 |
+
|
2140 |
+
See Also
|
2141 |
+
--------
|
2142 |
+
Series.idxmax : Return index of the maximum element.
|
2143 |
+
|
2144 |
+
Notes
|
2145 |
+
-----
|
2146 |
+
This method is the DataFrame version of ``ndarray.argmax``.
|
2147 |
+
|
2148 |
+
Examples
|
2149 |
+
--------
|
2150 |
+
Consider a dataset containing food consumption in Argentina.
|
2151 |
+
|
2152 |
+
>>> df = pd.DataFrame({'consumption': [10.51, 103.11, 55.48],
|
2153 |
+
... 'co2_emissions': [37.2, 19.66, 1712]},
|
2154 |
+
... index=['Pork', 'Wheat Products', 'Beef'])
|
2155 |
+
|
2156 |
+
>>> df
|
2157 |
+
consumption co2_emissions
|
2158 |
+
Pork 10.51 37.20
|
2159 |
+
Wheat Products 103.11 19.66
|
2160 |
+
Beef 55.48 1712.00
|
2161 |
+
|
2162 |
+
By default, it returns the index for the maximum value in each column.
|
2163 |
+
|
2164 |
+
>>> df.idxmax()
|
2165 |
+
consumption Wheat Products
|
2166 |
+
co2_emissions Beef
|
2167 |
+
dtype: object
|
2168 |
+
|
2169 |
+
To return the index for the maximum value in each row, use ``axis="columns"``.
|
2170 |
+
|
2171 |
+
>>> df.idxmax(axis="columns")
|
2172 |
+
Pork co2_emissions
|
2173 |
+
Wheat Products consumption
|
2174 |
+
Beef co2_emissions
|
2175 |
+
dtype: object
|
2176 |
+
"""
|
2177 |
+
return self._idxmax_idxmin(
|
2178 |
+
"idxmax", axis=axis, numeric_only=numeric_only, skipna=skipna
|
2179 |
+
)
|
2180 |
+
|
2181 |
+
def idxmin(
|
2182 |
+
self,
|
2183 |
+
axis: Axis | None | lib.NoDefault = lib.no_default,
|
2184 |
+
skipna: bool = True,
|
2185 |
+
numeric_only: bool = False,
|
2186 |
+
) -> DataFrame:
|
2187 |
+
"""
|
2188 |
+
Return index of first occurrence of minimum over requested axis.
|
2189 |
+
|
2190 |
+
NA/null values are excluded.
|
2191 |
+
|
2192 |
+
Parameters
|
2193 |
+
----------
|
2194 |
+
axis : {{0 or 'index', 1 or 'columns'}}, default None
|
2195 |
+
The axis to use. 0 or 'index' for row-wise, 1 or 'columns' for column-wise.
|
2196 |
+
If axis is not provided, grouper's axis is used.
|
2197 |
+
|
2198 |
+
.. versionchanged:: 2.0.0
|
2199 |
+
|
2200 |
+
.. deprecated:: 2.1.0
|
2201 |
+
For axis=1, operate on the underlying object instead. Otherwise
|
2202 |
+
the axis keyword is not necessary.
|
2203 |
+
|
2204 |
+
skipna : bool, default True
|
2205 |
+
Exclude NA/null values. If an entire row/column is NA, the result
|
2206 |
+
will be NA.
|
2207 |
+
numeric_only : bool, default False
|
2208 |
+
Include only `float`, `int` or `boolean` data.
|
2209 |
+
|
2210 |
+
.. versionadded:: 1.5.0
|
2211 |
+
|
2212 |
+
Returns
|
2213 |
+
-------
|
2214 |
+
Series
|
2215 |
+
Indexes of minima along the specified axis.
|
2216 |
+
|
2217 |
+
Raises
|
2218 |
+
------
|
2219 |
+
ValueError
|
2220 |
+
* If the row/column is empty
|
2221 |
+
|
2222 |
+
See Also
|
2223 |
+
--------
|
2224 |
+
Series.idxmin : Return index of the minimum element.
|
2225 |
+
|
2226 |
+
Notes
|
2227 |
+
-----
|
2228 |
+
This method is the DataFrame version of ``ndarray.argmin``.
|
2229 |
+
|
2230 |
+
Examples
|
2231 |
+
--------
|
2232 |
+
Consider a dataset containing food consumption in Argentina.
|
2233 |
+
|
2234 |
+
>>> df = pd.DataFrame({'consumption': [10.51, 103.11, 55.48],
|
2235 |
+
... 'co2_emissions': [37.2, 19.66, 1712]},
|
2236 |
+
... index=['Pork', 'Wheat Products', 'Beef'])
|
2237 |
+
|
2238 |
+
>>> df
|
2239 |
+
consumption co2_emissions
|
2240 |
+
Pork 10.51 37.20
|
2241 |
+
Wheat Products 103.11 19.66
|
2242 |
+
Beef 55.48 1712.00
|
2243 |
+
|
2244 |
+
By default, it returns the index for the minimum value in each column.
|
2245 |
+
|
2246 |
+
>>> df.idxmin()
|
2247 |
+
consumption Pork
|
2248 |
+
co2_emissions Wheat Products
|
2249 |
+
dtype: object
|
2250 |
+
|
2251 |
+
To return the index for the minimum value in each row, use ``axis="columns"``.
|
2252 |
+
|
2253 |
+
>>> df.idxmin(axis="columns")
|
2254 |
+
Pork consumption
|
2255 |
+
Wheat Products co2_emissions
|
2256 |
+
Beef consumption
|
2257 |
+
dtype: object
|
2258 |
+
"""
|
2259 |
+
return self._idxmax_idxmin(
|
2260 |
+
"idxmin", axis=axis, numeric_only=numeric_only, skipna=skipna
|
2261 |
+
)
|
2262 |
+
|
2263 |
+
boxplot = boxplot_frame_groupby
|
2264 |
+
|
2265 |
+
def value_counts(
|
2266 |
+
self,
|
2267 |
+
subset: Sequence[Hashable] | None = None,
|
2268 |
+
normalize: bool = False,
|
2269 |
+
sort: bool = True,
|
2270 |
+
ascending: bool = False,
|
2271 |
+
dropna: bool = True,
|
2272 |
+
) -> DataFrame | Series:
|
2273 |
+
"""
|
2274 |
+
Return a Series or DataFrame containing counts of unique rows.
|
2275 |
+
|
2276 |
+
.. versionadded:: 1.4.0
|
2277 |
+
|
2278 |
+
Parameters
|
2279 |
+
----------
|
2280 |
+
subset : list-like, optional
|
2281 |
+
Columns to use when counting unique combinations.
|
2282 |
+
normalize : bool, default False
|
2283 |
+
Return proportions rather than frequencies.
|
2284 |
+
sort : bool, default True
|
2285 |
+
Sort by frequencies.
|
2286 |
+
ascending : bool, default False
|
2287 |
+
Sort in ascending order.
|
2288 |
+
dropna : bool, default True
|
2289 |
+
Don't include counts of rows that contain NA values.
|
2290 |
+
|
2291 |
+
Returns
|
2292 |
+
-------
|
2293 |
+
Series or DataFrame
|
2294 |
+
Series if the groupby as_index is True, otherwise DataFrame.
|
2295 |
+
|
2296 |
+
See Also
|
2297 |
+
--------
|
2298 |
+
Series.value_counts: Equivalent method on Series.
|
2299 |
+
DataFrame.value_counts: Equivalent method on DataFrame.
|
2300 |
+
SeriesGroupBy.value_counts: Equivalent method on SeriesGroupBy.
|
2301 |
+
|
2302 |
+
Notes
|
2303 |
+
-----
|
2304 |
+
- If the groupby as_index is True then the returned Series will have a
|
2305 |
+
MultiIndex with one level per input column.
|
2306 |
+
- If the groupby as_index is False then the returned DataFrame will have an
|
2307 |
+
additional column with the value_counts. The column is labelled 'count' or
|
2308 |
+
'proportion', depending on the ``normalize`` parameter.
|
2309 |
+
|
2310 |
+
By default, rows that contain any NA values are omitted from
|
2311 |
+
the result.
|
2312 |
+
|
2313 |
+
By default, the result will be in descending order so that the
|
2314 |
+
first element of each group is the most frequently-occurring row.
|
2315 |
+
|
2316 |
+
Examples
|
2317 |
+
--------
|
2318 |
+
>>> df = pd.DataFrame({
|
2319 |
+
... 'gender': ['male', 'male', 'female', 'male', 'female', 'male'],
|
2320 |
+
... 'education': ['low', 'medium', 'high', 'low', 'high', 'low'],
|
2321 |
+
... 'country': ['US', 'FR', 'US', 'FR', 'FR', 'FR']
|
2322 |
+
... })
|
2323 |
+
|
2324 |
+
>>> df
|
2325 |
+
gender education country
|
2326 |
+
0 male low US
|
2327 |
+
1 male medium FR
|
2328 |
+
2 female high US
|
2329 |
+
3 male low FR
|
2330 |
+
4 female high FR
|
2331 |
+
5 male low FR
|
2332 |
+
|
2333 |
+
>>> df.groupby('gender').value_counts()
|
2334 |
+
gender education country
|
2335 |
+
female high FR 1
|
2336 |
+
US 1
|
2337 |
+
male low FR 2
|
2338 |
+
US 1
|
2339 |
+
medium FR 1
|
2340 |
+
Name: count, dtype: int64
|
2341 |
+
|
2342 |
+
>>> df.groupby('gender').value_counts(ascending=True)
|
2343 |
+
gender education country
|
2344 |
+
female high FR 1
|
2345 |
+
US 1
|
2346 |
+
male low US 1
|
2347 |
+
medium FR 1
|
2348 |
+
low FR 2
|
2349 |
+
Name: count, dtype: int64
|
2350 |
+
|
2351 |
+
>>> df.groupby('gender').value_counts(normalize=True)
|
2352 |
+
gender education country
|
2353 |
+
female high FR 0.50
|
2354 |
+
US 0.50
|
2355 |
+
male low FR 0.50
|
2356 |
+
US 0.25
|
2357 |
+
medium FR 0.25
|
2358 |
+
Name: proportion, dtype: float64
|
2359 |
+
|
2360 |
+
>>> df.groupby('gender', as_index=False).value_counts()
|
2361 |
+
gender education country count
|
2362 |
+
0 female high FR 1
|
2363 |
+
1 female high US 1
|
2364 |
+
2 male low FR 2
|
2365 |
+
3 male low US 1
|
2366 |
+
4 male medium FR 1
|
2367 |
+
|
2368 |
+
>>> df.groupby('gender', as_index=False).value_counts(normalize=True)
|
2369 |
+
gender education country proportion
|
2370 |
+
0 female high FR 0.50
|
2371 |
+
1 female high US 0.50
|
2372 |
+
2 male low FR 0.50
|
2373 |
+
3 male low US 0.25
|
2374 |
+
4 male medium FR 0.25
|
2375 |
+
"""
|
2376 |
+
return self._value_counts(subset, normalize, sort, ascending, dropna)
|
2377 |
+
|
2378 |
+
def fillna(
|
2379 |
+
self,
|
2380 |
+
value: Hashable | Mapping | Series | DataFrame | None = None,
|
2381 |
+
method: FillnaOptions | None = None,
|
2382 |
+
axis: Axis | None | lib.NoDefault = lib.no_default,
|
2383 |
+
inplace: bool = False,
|
2384 |
+
limit: int | None = None,
|
2385 |
+
downcast=lib.no_default,
|
2386 |
+
) -> DataFrame | None:
|
2387 |
+
"""
|
2388 |
+
Fill NA/NaN values using the specified method within groups.
|
2389 |
+
|
2390 |
+
.. deprecated:: 2.2.0
|
2391 |
+
This method is deprecated and will be removed in a future version.
|
2392 |
+
Use the :meth:`.DataFrameGroupBy.ffill` or :meth:`.DataFrameGroupBy.bfill`
|
2393 |
+
for forward or backward filling instead. If you want to fill with a
|
2394 |
+
single value, use :meth:`DataFrame.fillna` instead.
|
2395 |
+
|
2396 |
+
Parameters
|
2397 |
+
----------
|
2398 |
+
value : scalar, dict, Series, or DataFrame
|
2399 |
+
Value to use to fill holes (e.g. 0), alternately a
|
2400 |
+
dict/Series/DataFrame of values specifying which value to use for
|
2401 |
+
each index (for a Series) or column (for a DataFrame). Values not
|
2402 |
+
in the dict/Series/DataFrame will not be filled. This value cannot
|
2403 |
+
be a list. Users wanting to use the ``value`` argument and not ``method``
|
2404 |
+
should prefer :meth:`.DataFrame.fillna` as this
|
2405 |
+
will produce the same result and be more performant.
|
2406 |
+
method : {{'bfill', 'ffill', None}}, default None
|
2407 |
+
Method to use for filling holes. ``'ffill'`` will propagate
|
2408 |
+
the last valid observation forward within a group.
|
2409 |
+
``'bfill'`` will use next valid observation to fill the gap.
|
2410 |
+
axis : {0 or 'index', 1 or 'columns'}
|
2411 |
+
Axis along which to fill missing values. When the :class:`DataFrameGroupBy`
|
2412 |
+
``axis`` argument is ``0``, using ``axis=1`` here will produce
|
2413 |
+
the same results as :meth:`.DataFrame.fillna`. When the
|
2414 |
+
:class:`DataFrameGroupBy` ``axis`` argument is ``1``, using ``axis=0``
|
2415 |
+
or ``axis=1`` here will produce the same results.
|
2416 |
+
inplace : bool, default False
|
2417 |
+
Broken. Do not set to True.
|
2418 |
+
limit : int, default None
|
2419 |
+
If method is specified, this is the maximum number of consecutive
|
2420 |
+
NaN values to forward/backward fill within a group. In other words,
|
2421 |
+
if there is a gap with more than this number of consecutive NaNs,
|
2422 |
+
it will only be partially filled. If method is not specified, this is the
|
2423 |
+
maximum number of entries along the entire axis where NaNs will be
|
2424 |
+
filled. Must be greater than 0 if not None.
|
2425 |
+
downcast : dict, default is None
|
2426 |
+
A dict of item->dtype of what to downcast if possible,
|
2427 |
+
or the string 'infer' which will try to downcast to an appropriate
|
2428 |
+
equal type (e.g. float64 to int64 if possible).
|
2429 |
+
|
2430 |
+
Returns
|
2431 |
+
-------
|
2432 |
+
DataFrame
|
2433 |
+
Object with missing values filled.
|
2434 |
+
|
2435 |
+
See Also
|
2436 |
+
--------
|
2437 |
+
ffill : Forward fill values within a group.
|
2438 |
+
bfill : Backward fill values within a group.
|
2439 |
+
|
2440 |
+
Examples
|
2441 |
+
--------
|
2442 |
+
>>> df = pd.DataFrame(
|
2443 |
+
... {
|
2444 |
+
... "key": [0, 0, 1, 1, 1],
|
2445 |
+
... "A": [np.nan, 2, np.nan, 3, np.nan],
|
2446 |
+
... "B": [2, 3, np.nan, np.nan, np.nan],
|
2447 |
+
... "C": [np.nan, np.nan, 2, np.nan, np.nan],
|
2448 |
+
... }
|
2449 |
+
... )
|
2450 |
+
>>> df
|
2451 |
+
key A B C
|
2452 |
+
0 0 NaN 2.0 NaN
|
2453 |
+
1 0 2.0 3.0 NaN
|
2454 |
+
2 1 NaN NaN 2.0
|
2455 |
+
3 1 3.0 NaN NaN
|
2456 |
+
4 1 NaN NaN NaN
|
2457 |
+
|
2458 |
+
Propagate non-null values forward or backward within each group along columns.
|
2459 |
+
|
2460 |
+
>>> df.groupby("key").fillna(method="ffill")
|
2461 |
+
A B C
|
2462 |
+
0 NaN 2.0 NaN
|
2463 |
+
1 2.0 3.0 NaN
|
2464 |
+
2 NaN NaN 2.0
|
2465 |
+
3 3.0 NaN 2.0
|
2466 |
+
4 3.0 NaN 2.0
|
2467 |
+
|
2468 |
+
>>> df.groupby("key").fillna(method="bfill")
|
2469 |
+
A B C
|
2470 |
+
0 2.0 2.0 NaN
|
2471 |
+
1 2.0 3.0 NaN
|
2472 |
+
2 3.0 NaN 2.0
|
2473 |
+
3 3.0 NaN NaN
|
2474 |
+
4 NaN NaN NaN
|
2475 |
+
|
2476 |
+
Propagate non-null values forward or backward within each group along rows.
|
2477 |
+
|
2478 |
+
>>> df.T.groupby(np.array([0, 0, 1, 1])).fillna(method="ffill").T
|
2479 |
+
key A B C
|
2480 |
+
0 0.0 0.0 2.0 2.0
|
2481 |
+
1 0.0 2.0 3.0 3.0
|
2482 |
+
2 1.0 1.0 NaN 2.0
|
2483 |
+
3 1.0 3.0 NaN NaN
|
2484 |
+
4 1.0 1.0 NaN NaN
|
2485 |
+
|
2486 |
+
>>> df.T.groupby(np.array([0, 0, 1, 1])).fillna(method="bfill").T
|
2487 |
+
key A B C
|
2488 |
+
0 0.0 NaN 2.0 NaN
|
2489 |
+
1 0.0 2.0 3.0 NaN
|
2490 |
+
2 1.0 NaN 2.0 2.0
|
2491 |
+
3 1.0 3.0 NaN NaN
|
2492 |
+
4 1.0 NaN NaN NaN
|
2493 |
+
|
2494 |
+
Only replace the first NaN element within a group along rows.
|
2495 |
+
|
2496 |
+
>>> df.groupby("key").fillna(method="ffill", limit=1)
|
2497 |
+
A B C
|
2498 |
+
0 NaN 2.0 NaN
|
2499 |
+
1 2.0 3.0 NaN
|
2500 |
+
2 NaN NaN 2.0
|
2501 |
+
3 3.0 NaN 2.0
|
2502 |
+
4 3.0 NaN NaN
|
2503 |
+
"""
|
2504 |
+
warnings.warn(
|
2505 |
+
f"{type(self).__name__}.fillna is deprecated and "
|
2506 |
+
"will be removed in a future version. Use obj.ffill() or obj.bfill() "
|
2507 |
+
"for forward or backward filling instead. If you want to fill with a "
|
2508 |
+
f"single value, use {type(self.obj).__name__}.fillna instead",
|
2509 |
+
FutureWarning,
|
2510 |
+
stacklevel=find_stack_level(),
|
2511 |
+
)
|
2512 |
+
|
2513 |
+
result = self._op_via_apply(
|
2514 |
+
"fillna",
|
2515 |
+
value=value,
|
2516 |
+
method=method,
|
2517 |
+
axis=axis,
|
2518 |
+
inplace=inplace,
|
2519 |
+
limit=limit,
|
2520 |
+
downcast=downcast,
|
2521 |
+
)
|
2522 |
+
return result
|
2523 |
+
|
2524 |
+
def take(
|
2525 |
+
self,
|
2526 |
+
indices: TakeIndexer,
|
2527 |
+
axis: Axis | None | lib.NoDefault = lib.no_default,
|
2528 |
+
**kwargs,
|
2529 |
+
) -> DataFrame:
|
2530 |
+
"""
|
2531 |
+
Return the elements in the given *positional* indices in each group.
|
2532 |
+
|
2533 |
+
This means that we are not indexing according to actual values in
|
2534 |
+
the index attribute of the object. We are indexing according to the
|
2535 |
+
actual position of the element in the object.
|
2536 |
+
|
2537 |
+
If a requested index does not exist for some group, this method will raise.
|
2538 |
+
To get similar behavior that ignores indices that don't exist, see
|
2539 |
+
:meth:`.DataFrameGroupBy.nth`.
|
2540 |
+
|
2541 |
+
Parameters
|
2542 |
+
----------
|
2543 |
+
indices : array-like
|
2544 |
+
An array of ints indicating which positions to take.
|
2545 |
+
axis : {0 or 'index', 1 or 'columns', None}, default 0
|
2546 |
+
The axis on which to select elements. ``0`` means that we are
|
2547 |
+
selecting rows, ``1`` means that we are selecting columns.
|
2548 |
+
|
2549 |
+
.. deprecated:: 2.1.0
|
2550 |
+
For axis=1, operate on the underlying object instead. Otherwise
|
2551 |
+
the axis keyword is not necessary.
|
2552 |
+
|
2553 |
+
**kwargs
|
2554 |
+
For compatibility with :meth:`numpy.take`. Has no effect on the
|
2555 |
+
output.
|
2556 |
+
|
2557 |
+
Returns
|
2558 |
+
-------
|
2559 |
+
DataFrame
|
2560 |
+
An DataFrame containing the elements taken from each group.
|
2561 |
+
|
2562 |
+
See Also
|
2563 |
+
--------
|
2564 |
+
DataFrame.take : Take elements from a Series along an axis.
|
2565 |
+
DataFrame.loc : Select a subset of a DataFrame by labels.
|
2566 |
+
DataFrame.iloc : Select a subset of a DataFrame by positions.
|
2567 |
+
numpy.take : Take elements from an array along an axis.
|
2568 |
+
|
2569 |
+
Examples
|
2570 |
+
--------
|
2571 |
+
>>> df = pd.DataFrame([('falcon', 'bird', 389.0),
|
2572 |
+
... ('parrot', 'bird', 24.0),
|
2573 |
+
... ('lion', 'mammal', 80.5),
|
2574 |
+
... ('monkey', 'mammal', np.nan),
|
2575 |
+
... ('rabbit', 'mammal', 15.0)],
|
2576 |
+
... columns=['name', 'class', 'max_speed'],
|
2577 |
+
... index=[4, 3, 2, 1, 0])
|
2578 |
+
>>> df
|
2579 |
+
name class max_speed
|
2580 |
+
4 falcon bird 389.0
|
2581 |
+
3 parrot bird 24.0
|
2582 |
+
2 lion mammal 80.5
|
2583 |
+
1 monkey mammal NaN
|
2584 |
+
0 rabbit mammal 15.0
|
2585 |
+
>>> gb = df.groupby([1, 1, 2, 2, 2])
|
2586 |
+
|
2587 |
+
Take elements at positions 0 and 1 along the axis 0 (default).
|
2588 |
+
|
2589 |
+
Note how the indices selected in the result do not correspond to
|
2590 |
+
our input indices 0 and 1. That's because we are selecting the 0th
|
2591 |
+
and 1st rows, not rows whose indices equal 0 and 1.
|
2592 |
+
|
2593 |
+
>>> gb.take([0, 1])
|
2594 |
+
name class max_speed
|
2595 |
+
1 4 falcon bird 389.0
|
2596 |
+
3 parrot bird 24.0
|
2597 |
+
2 2 lion mammal 80.5
|
2598 |
+
1 monkey mammal NaN
|
2599 |
+
|
2600 |
+
The order of the specified indices influences the order in the result.
|
2601 |
+
Here, the order is swapped from the previous example.
|
2602 |
+
|
2603 |
+
>>> gb.take([1, 0])
|
2604 |
+
name class max_speed
|
2605 |
+
1 3 parrot bird 24.0
|
2606 |
+
4 falcon bird 389.0
|
2607 |
+
2 1 monkey mammal NaN
|
2608 |
+
2 lion mammal 80.5
|
2609 |
+
|
2610 |
+
Take elements at indices 1 and 2 along the axis 1 (column selection).
|
2611 |
+
|
2612 |
+
We may take elements using negative integers for positive indices,
|
2613 |
+
starting from the end of the object, just like with Python lists.
|
2614 |
+
|
2615 |
+
>>> gb.take([-1, -2])
|
2616 |
+
name class max_speed
|
2617 |
+
1 3 parrot bird 24.0
|
2618 |
+
4 falcon bird 389.0
|
2619 |
+
2 0 rabbit mammal 15.0
|
2620 |
+
1 monkey mammal NaN
|
2621 |
+
"""
|
2622 |
+
result = self._op_via_apply("take", indices=indices, axis=axis, **kwargs)
|
2623 |
+
return result
|
2624 |
+
|
2625 |
+
def skew(
|
2626 |
+
self,
|
2627 |
+
axis: Axis | None | lib.NoDefault = lib.no_default,
|
2628 |
+
skipna: bool = True,
|
2629 |
+
numeric_only: bool = False,
|
2630 |
+
**kwargs,
|
2631 |
+
) -> DataFrame:
|
2632 |
+
"""
|
2633 |
+
Return unbiased skew within groups.
|
2634 |
+
|
2635 |
+
Normalized by N-1.
|
2636 |
+
|
2637 |
+
Parameters
|
2638 |
+
----------
|
2639 |
+
axis : {0 or 'index', 1 or 'columns', None}, default 0
|
2640 |
+
Axis for the function to be applied on.
|
2641 |
+
|
2642 |
+
Specifying ``axis=None`` will apply the aggregation across both axes.
|
2643 |
+
|
2644 |
+
.. versionadded:: 2.0.0
|
2645 |
+
|
2646 |
+
.. deprecated:: 2.1.0
|
2647 |
+
For axis=1, operate on the underlying object instead. Otherwise
|
2648 |
+
the axis keyword is not necessary.
|
2649 |
+
|
2650 |
+
skipna : bool, default True
|
2651 |
+
Exclude NA/null values when computing the result.
|
2652 |
+
|
2653 |
+
numeric_only : bool, default False
|
2654 |
+
Include only float, int, boolean columns.
|
2655 |
+
|
2656 |
+
**kwargs
|
2657 |
+
Additional keyword arguments to be passed to the function.
|
2658 |
+
|
2659 |
+
Returns
|
2660 |
+
-------
|
2661 |
+
DataFrame
|
2662 |
+
|
2663 |
+
See Also
|
2664 |
+
--------
|
2665 |
+
DataFrame.skew : Return unbiased skew over requested axis.
|
2666 |
+
|
2667 |
+
Examples
|
2668 |
+
--------
|
2669 |
+
>>> arrays = [['falcon', 'parrot', 'cockatoo', 'kiwi',
|
2670 |
+
... 'lion', 'monkey', 'rabbit'],
|
2671 |
+
... ['bird', 'bird', 'bird', 'bird',
|
2672 |
+
... 'mammal', 'mammal', 'mammal']]
|
2673 |
+
>>> index = pd.MultiIndex.from_arrays(arrays, names=('name', 'class'))
|
2674 |
+
>>> df = pd.DataFrame({'max_speed': [389.0, 24.0, 70.0, np.nan,
|
2675 |
+
... 80.5, 21.5, 15.0]},
|
2676 |
+
... index=index)
|
2677 |
+
>>> df
|
2678 |
+
max_speed
|
2679 |
+
name class
|
2680 |
+
falcon bird 389.0
|
2681 |
+
parrot bird 24.0
|
2682 |
+
cockatoo bird 70.0
|
2683 |
+
kiwi bird NaN
|
2684 |
+
lion mammal 80.5
|
2685 |
+
monkey mammal 21.5
|
2686 |
+
rabbit mammal 15.0
|
2687 |
+
>>> gb = df.groupby(["class"])
|
2688 |
+
>>> gb.skew()
|
2689 |
+
max_speed
|
2690 |
+
class
|
2691 |
+
bird 1.628296
|
2692 |
+
mammal 1.669046
|
2693 |
+
>>> gb.skew(skipna=False)
|
2694 |
+
max_speed
|
2695 |
+
class
|
2696 |
+
bird NaN
|
2697 |
+
mammal 1.669046
|
2698 |
+
"""
|
2699 |
+
if axis is lib.no_default:
|
2700 |
+
axis = 0
|
2701 |
+
|
2702 |
+
if axis != 0:
|
2703 |
+
result = self._op_via_apply(
|
2704 |
+
"skew",
|
2705 |
+
axis=axis,
|
2706 |
+
skipna=skipna,
|
2707 |
+
numeric_only=numeric_only,
|
2708 |
+
**kwargs,
|
2709 |
+
)
|
2710 |
+
return result
|
2711 |
+
|
2712 |
+
def alt(obj):
|
2713 |
+
# This should not be reached since the cython path should raise
|
2714 |
+
# TypeError and not NotImplementedError.
|
2715 |
+
raise TypeError(f"'skew' is not supported for dtype={obj.dtype}")
|
2716 |
+
|
2717 |
+
return self._cython_agg_general(
|
2718 |
+
"skew", alt=alt, skipna=skipna, numeric_only=numeric_only, **kwargs
|
2719 |
+
)
|
2720 |
+
|
2721 |
+
@property
|
2722 |
+
@doc(DataFrame.plot.__doc__)
|
2723 |
+
def plot(self) -> GroupByPlot:
|
2724 |
+
result = GroupByPlot(self)
|
2725 |
+
return result
|
2726 |
+
|
2727 |
+
@doc(DataFrame.corr.__doc__)
|
2728 |
+
def corr(
|
2729 |
+
self,
|
2730 |
+
method: str | Callable[[np.ndarray, np.ndarray], float] = "pearson",
|
2731 |
+
min_periods: int = 1,
|
2732 |
+
numeric_only: bool = False,
|
2733 |
+
) -> DataFrame:
|
2734 |
+
result = self._op_via_apply(
|
2735 |
+
"corr", method=method, min_periods=min_periods, numeric_only=numeric_only
|
2736 |
+
)
|
2737 |
+
return result
|
2738 |
+
|
2739 |
+
@doc(DataFrame.cov.__doc__)
|
2740 |
+
def cov(
|
2741 |
+
self,
|
2742 |
+
min_periods: int | None = None,
|
2743 |
+
ddof: int | None = 1,
|
2744 |
+
numeric_only: bool = False,
|
2745 |
+
) -> DataFrame:
|
2746 |
+
result = self._op_via_apply(
|
2747 |
+
"cov", min_periods=min_periods, ddof=ddof, numeric_only=numeric_only
|
2748 |
+
)
|
2749 |
+
return result
|
2750 |
+
|
2751 |
+
@doc(DataFrame.hist.__doc__)
|
2752 |
+
def hist(
|
2753 |
+
self,
|
2754 |
+
column: IndexLabel | None = None,
|
2755 |
+
by=None,
|
2756 |
+
grid: bool = True,
|
2757 |
+
xlabelsize: int | None = None,
|
2758 |
+
xrot: float | None = None,
|
2759 |
+
ylabelsize: int | None = None,
|
2760 |
+
yrot: float | None = None,
|
2761 |
+
ax=None,
|
2762 |
+
sharex: bool = False,
|
2763 |
+
sharey: bool = False,
|
2764 |
+
figsize: tuple[int, int] | None = None,
|
2765 |
+
layout: tuple[int, int] | None = None,
|
2766 |
+
bins: int | Sequence[int] = 10,
|
2767 |
+
backend: str | None = None,
|
2768 |
+
legend: bool = False,
|
2769 |
+
**kwargs,
|
2770 |
+
):
|
2771 |
+
result = self._op_via_apply(
|
2772 |
+
"hist",
|
2773 |
+
column=column,
|
2774 |
+
by=by,
|
2775 |
+
grid=grid,
|
2776 |
+
xlabelsize=xlabelsize,
|
2777 |
+
xrot=xrot,
|
2778 |
+
ylabelsize=ylabelsize,
|
2779 |
+
yrot=yrot,
|
2780 |
+
ax=ax,
|
2781 |
+
sharex=sharex,
|
2782 |
+
sharey=sharey,
|
2783 |
+
figsize=figsize,
|
2784 |
+
layout=layout,
|
2785 |
+
bins=bins,
|
2786 |
+
backend=backend,
|
2787 |
+
legend=legend,
|
2788 |
+
**kwargs,
|
2789 |
+
)
|
2790 |
+
return result
|
2791 |
+
|
2792 |
+
@property
|
2793 |
+
@doc(DataFrame.dtypes.__doc__)
|
2794 |
+
def dtypes(self) -> Series:
|
2795 |
+
# GH#51045
|
2796 |
+
warnings.warn(
|
2797 |
+
f"{type(self).__name__}.dtypes is deprecated and will be removed in "
|
2798 |
+
"a future version. Check the dtypes on the base object instead",
|
2799 |
+
FutureWarning,
|
2800 |
+
stacklevel=find_stack_level(),
|
2801 |
+
)
|
2802 |
+
|
2803 |
+
# error: Incompatible return value type (got "DataFrame", expected "Series")
|
2804 |
+
return self._python_apply_general( # type: ignore[return-value]
|
2805 |
+
lambda df: df.dtypes, self._selected_obj
|
2806 |
+
)
|
2807 |
+
|
2808 |
+
@doc(DataFrame.corrwith.__doc__)
|
2809 |
+
def corrwith(
|
2810 |
+
self,
|
2811 |
+
other: DataFrame | Series,
|
2812 |
+
axis: Axis | lib.NoDefault = lib.no_default,
|
2813 |
+
drop: bool = False,
|
2814 |
+
method: CorrelationMethod = "pearson",
|
2815 |
+
numeric_only: bool = False,
|
2816 |
+
) -> DataFrame:
|
2817 |
+
result = self._op_via_apply(
|
2818 |
+
"corrwith",
|
2819 |
+
other=other,
|
2820 |
+
axis=axis,
|
2821 |
+
drop=drop,
|
2822 |
+
method=method,
|
2823 |
+
numeric_only=numeric_only,
|
2824 |
+
)
|
2825 |
+
return result
|
2826 |
+
|
2827 |
+
|
2828 |
+
def _wrap_transform_general_frame(
|
2829 |
+
obj: DataFrame, group: DataFrame, res: DataFrame | Series
|
2830 |
+
) -> DataFrame:
|
2831 |
+
from pandas import concat
|
2832 |
+
|
2833 |
+
if isinstance(res, Series):
|
2834 |
+
# we need to broadcast across the
|
2835 |
+
# other dimension; this will preserve dtypes
|
2836 |
+
# GH14457
|
2837 |
+
if res.index.is_(obj.index):
|
2838 |
+
res_frame = concat([res] * len(group.columns), axis=1)
|
2839 |
+
res_frame.columns = group.columns
|
2840 |
+
res_frame.index = group.index
|
2841 |
+
else:
|
2842 |
+
res_frame = obj._constructor(
|
2843 |
+
np.tile(res.values, (len(group.index), 1)),
|
2844 |
+
columns=group.columns,
|
2845 |
+
index=group.index,
|
2846 |
+
)
|
2847 |
+
assert isinstance(res_frame, DataFrame)
|
2848 |
+
return res_frame
|
2849 |
+
elif isinstance(res, DataFrame) and not res.index.is_(group.index):
|
2850 |
+
return res._align_frame(group)[0]
|
2851 |
+
else:
|
2852 |
+
return res
|
llmeval-env/lib/python3.10/site-packages/pandas/core/groupby/groupby.py
ADDED
The diff for this file is too large to render.
See raw diff
|
|
llmeval-env/lib/python3.10/site-packages/pandas/core/groupby/grouper.py
ADDED
@@ -0,0 +1,1102 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
"""
|
2 |
+
Provide user facing operators for doing the split part of the
|
3 |
+
split-apply-combine paradigm.
|
4 |
+
"""
|
5 |
+
from __future__ import annotations
|
6 |
+
|
7 |
+
from typing import (
|
8 |
+
TYPE_CHECKING,
|
9 |
+
final,
|
10 |
+
)
|
11 |
+
import warnings
|
12 |
+
|
13 |
+
import numpy as np
|
14 |
+
|
15 |
+
from pandas._config import (
|
16 |
+
using_copy_on_write,
|
17 |
+
warn_copy_on_write,
|
18 |
+
)
|
19 |
+
|
20 |
+
from pandas._libs import lib
|
21 |
+
from pandas._libs.tslibs import OutOfBoundsDatetime
|
22 |
+
from pandas.errors import InvalidIndexError
|
23 |
+
from pandas.util._decorators import cache_readonly
|
24 |
+
from pandas.util._exceptions import find_stack_level
|
25 |
+
|
26 |
+
from pandas.core.dtypes.common import (
|
27 |
+
is_list_like,
|
28 |
+
is_scalar,
|
29 |
+
)
|
30 |
+
from pandas.core.dtypes.dtypes import CategoricalDtype
|
31 |
+
|
32 |
+
from pandas.core import algorithms
|
33 |
+
from pandas.core.arrays import (
|
34 |
+
Categorical,
|
35 |
+
ExtensionArray,
|
36 |
+
)
|
37 |
+
import pandas.core.common as com
|
38 |
+
from pandas.core.frame import DataFrame
|
39 |
+
from pandas.core.groupby import ops
|
40 |
+
from pandas.core.groupby.categorical import recode_for_groupby
|
41 |
+
from pandas.core.indexes.api import (
|
42 |
+
CategoricalIndex,
|
43 |
+
Index,
|
44 |
+
MultiIndex,
|
45 |
+
)
|
46 |
+
from pandas.core.series import Series
|
47 |
+
|
48 |
+
from pandas.io.formats.printing import pprint_thing
|
49 |
+
|
50 |
+
if TYPE_CHECKING:
|
51 |
+
from collections.abc import (
|
52 |
+
Hashable,
|
53 |
+
Iterator,
|
54 |
+
)
|
55 |
+
|
56 |
+
from pandas._typing import (
|
57 |
+
ArrayLike,
|
58 |
+
Axis,
|
59 |
+
NDFrameT,
|
60 |
+
npt,
|
61 |
+
)
|
62 |
+
|
63 |
+
from pandas.core.generic import NDFrame
|
64 |
+
|
65 |
+
|
66 |
+
class Grouper:
|
67 |
+
"""
|
68 |
+
A Grouper allows the user to specify a groupby instruction for an object.
|
69 |
+
|
70 |
+
This specification will select a column via the key parameter, or if the
|
71 |
+
level and/or axis parameters are given, a level of the index of the target
|
72 |
+
object.
|
73 |
+
|
74 |
+
If `axis` and/or `level` are passed as keywords to both `Grouper` and
|
75 |
+
`groupby`, the values passed to `Grouper` take precedence.
|
76 |
+
|
77 |
+
Parameters
|
78 |
+
----------
|
79 |
+
key : str, defaults to None
|
80 |
+
Groupby key, which selects the grouping column of the target.
|
81 |
+
level : name/number, defaults to None
|
82 |
+
The level for the target index.
|
83 |
+
freq : str / frequency object, defaults to None
|
84 |
+
This will groupby the specified frequency if the target selection
|
85 |
+
(via key or level) is a datetime-like object. For full specification
|
86 |
+
of available frequencies, please see `here
|
87 |
+
<https://pandas.pydata.org/pandas-docs/stable/user_guide/timeseries.html#offset-aliases>`_.
|
88 |
+
axis : str, int, defaults to 0
|
89 |
+
Number/name of the axis.
|
90 |
+
sort : bool, default to False
|
91 |
+
Whether to sort the resulting labels.
|
92 |
+
closed : {'left' or 'right'}
|
93 |
+
Closed end of interval. Only when `freq` parameter is passed.
|
94 |
+
label : {'left' or 'right'}
|
95 |
+
Interval boundary to use for labeling.
|
96 |
+
Only when `freq` parameter is passed.
|
97 |
+
convention : {'start', 'end', 'e', 's'}
|
98 |
+
If grouper is PeriodIndex and `freq` parameter is passed.
|
99 |
+
|
100 |
+
origin : Timestamp or str, default 'start_day'
|
101 |
+
The timestamp on which to adjust the grouping. The timezone of origin must
|
102 |
+
match the timezone of the index.
|
103 |
+
If string, must be one of the following:
|
104 |
+
|
105 |
+
- 'epoch': `origin` is 1970-01-01
|
106 |
+
- 'start': `origin` is the first value of the timeseries
|
107 |
+
- 'start_day': `origin` is the first day at midnight of the timeseries
|
108 |
+
|
109 |
+
- 'end': `origin` is the last value of the timeseries
|
110 |
+
- 'end_day': `origin` is the ceiling midnight of the last day
|
111 |
+
|
112 |
+
.. versionadded:: 1.3.0
|
113 |
+
|
114 |
+
offset : Timedelta or str, default is None
|
115 |
+
An offset timedelta added to the origin.
|
116 |
+
|
117 |
+
dropna : bool, default True
|
118 |
+
If True, and if group keys contain NA values, NA values together with
|
119 |
+
row/column will be dropped. If False, NA values will also be treated as
|
120 |
+
the key in groups.
|
121 |
+
|
122 |
+
Returns
|
123 |
+
-------
|
124 |
+
Grouper or pandas.api.typing.TimeGrouper
|
125 |
+
A TimeGrouper is returned if ``freq`` is not ``None``. Otherwise, a Grouper
|
126 |
+
is returned.
|
127 |
+
|
128 |
+
Examples
|
129 |
+
--------
|
130 |
+
``df.groupby(pd.Grouper(key="Animal"))`` is equivalent to ``df.groupby('Animal')``
|
131 |
+
|
132 |
+
>>> df = pd.DataFrame(
|
133 |
+
... {
|
134 |
+
... "Animal": ["Falcon", "Parrot", "Falcon", "Falcon", "Parrot"],
|
135 |
+
... "Speed": [100, 5, 200, 300, 15],
|
136 |
+
... }
|
137 |
+
... )
|
138 |
+
>>> df
|
139 |
+
Animal Speed
|
140 |
+
0 Falcon 100
|
141 |
+
1 Parrot 5
|
142 |
+
2 Falcon 200
|
143 |
+
3 Falcon 300
|
144 |
+
4 Parrot 15
|
145 |
+
>>> df.groupby(pd.Grouper(key="Animal")).mean()
|
146 |
+
Speed
|
147 |
+
Animal
|
148 |
+
Falcon 200.0
|
149 |
+
Parrot 10.0
|
150 |
+
|
151 |
+
Specify a resample operation on the column 'Publish date'
|
152 |
+
|
153 |
+
>>> df = pd.DataFrame(
|
154 |
+
... {
|
155 |
+
... "Publish date": [
|
156 |
+
... pd.Timestamp("2000-01-02"),
|
157 |
+
... pd.Timestamp("2000-01-02"),
|
158 |
+
... pd.Timestamp("2000-01-09"),
|
159 |
+
... pd.Timestamp("2000-01-16")
|
160 |
+
... ],
|
161 |
+
... "ID": [0, 1, 2, 3],
|
162 |
+
... "Price": [10, 20, 30, 40]
|
163 |
+
... }
|
164 |
+
... )
|
165 |
+
>>> df
|
166 |
+
Publish date ID Price
|
167 |
+
0 2000-01-02 0 10
|
168 |
+
1 2000-01-02 1 20
|
169 |
+
2 2000-01-09 2 30
|
170 |
+
3 2000-01-16 3 40
|
171 |
+
>>> df.groupby(pd.Grouper(key="Publish date", freq="1W")).mean()
|
172 |
+
ID Price
|
173 |
+
Publish date
|
174 |
+
2000-01-02 0.5 15.0
|
175 |
+
2000-01-09 2.0 30.0
|
176 |
+
2000-01-16 3.0 40.0
|
177 |
+
|
178 |
+
If you want to adjust the start of the bins based on a fixed timestamp:
|
179 |
+
|
180 |
+
>>> start, end = '2000-10-01 23:30:00', '2000-10-02 00:30:00'
|
181 |
+
>>> rng = pd.date_range(start, end, freq='7min')
|
182 |
+
>>> ts = pd.Series(np.arange(len(rng)) * 3, index=rng)
|
183 |
+
>>> ts
|
184 |
+
2000-10-01 23:30:00 0
|
185 |
+
2000-10-01 23:37:00 3
|
186 |
+
2000-10-01 23:44:00 6
|
187 |
+
2000-10-01 23:51:00 9
|
188 |
+
2000-10-01 23:58:00 12
|
189 |
+
2000-10-02 00:05:00 15
|
190 |
+
2000-10-02 00:12:00 18
|
191 |
+
2000-10-02 00:19:00 21
|
192 |
+
2000-10-02 00:26:00 24
|
193 |
+
Freq: 7min, dtype: int64
|
194 |
+
|
195 |
+
>>> ts.groupby(pd.Grouper(freq='17min')).sum()
|
196 |
+
2000-10-01 23:14:00 0
|
197 |
+
2000-10-01 23:31:00 9
|
198 |
+
2000-10-01 23:48:00 21
|
199 |
+
2000-10-02 00:05:00 54
|
200 |
+
2000-10-02 00:22:00 24
|
201 |
+
Freq: 17min, dtype: int64
|
202 |
+
|
203 |
+
>>> ts.groupby(pd.Grouper(freq='17min', origin='epoch')).sum()
|
204 |
+
2000-10-01 23:18:00 0
|
205 |
+
2000-10-01 23:35:00 18
|
206 |
+
2000-10-01 23:52:00 27
|
207 |
+
2000-10-02 00:09:00 39
|
208 |
+
2000-10-02 00:26:00 24
|
209 |
+
Freq: 17min, dtype: int64
|
210 |
+
|
211 |
+
>>> ts.groupby(pd.Grouper(freq='17min', origin='2000-01-01')).sum()
|
212 |
+
2000-10-01 23:24:00 3
|
213 |
+
2000-10-01 23:41:00 15
|
214 |
+
2000-10-01 23:58:00 45
|
215 |
+
2000-10-02 00:15:00 45
|
216 |
+
Freq: 17min, dtype: int64
|
217 |
+
|
218 |
+
If you want to adjust the start of the bins with an `offset` Timedelta, the two
|
219 |
+
following lines are equivalent:
|
220 |
+
|
221 |
+
>>> ts.groupby(pd.Grouper(freq='17min', origin='start')).sum()
|
222 |
+
2000-10-01 23:30:00 9
|
223 |
+
2000-10-01 23:47:00 21
|
224 |
+
2000-10-02 00:04:00 54
|
225 |
+
2000-10-02 00:21:00 24
|
226 |
+
Freq: 17min, dtype: int64
|
227 |
+
|
228 |
+
>>> ts.groupby(pd.Grouper(freq='17min', offset='23h30min')).sum()
|
229 |
+
2000-10-01 23:30:00 9
|
230 |
+
2000-10-01 23:47:00 21
|
231 |
+
2000-10-02 00:04:00 54
|
232 |
+
2000-10-02 00:21:00 24
|
233 |
+
Freq: 17min, dtype: int64
|
234 |
+
|
235 |
+
To replace the use of the deprecated `base` argument, you can now use `offset`,
|
236 |
+
in this example it is equivalent to have `base=2`:
|
237 |
+
|
238 |
+
>>> ts.groupby(pd.Grouper(freq='17min', offset='2min')).sum()
|
239 |
+
2000-10-01 23:16:00 0
|
240 |
+
2000-10-01 23:33:00 9
|
241 |
+
2000-10-01 23:50:00 36
|
242 |
+
2000-10-02 00:07:00 39
|
243 |
+
2000-10-02 00:24:00 24
|
244 |
+
Freq: 17min, dtype: int64
|
245 |
+
"""
|
246 |
+
|
247 |
+
sort: bool
|
248 |
+
dropna: bool
|
249 |
+
_gpr_index: Index | None
|
250 |
+
_grouper: Index | None
|
251 |
+
|
252 |
+
_attributes: tuple[str, ...] = ("key", "level", "freq", "axis", "sort", "dropna")
|
253 |
+
|
254 |
+
def __new__(cls, *args, **kwargs):
|
255 |
+
if kwargs.get("freq") is not None:
|
256 |
+
from pandas.core.resample import TimeGrouper
|
257 |
+
|
258 |
+
cls = TimeGrouper
|
259 |
+
return super().__new__(cls)
|
260 |
+
|
261 |
+
def __init__(
|
262 |
+
self,
|
263 |
+
key=None,
|
264 |
+
level=None,
|
265 |
+
freq=None,
|
266 |
+
axis: Axis | lib.NoDefault = lib.no_default,
|
267 |
+
sort: bool = False,
|
268 |
+
dropna: bool = True,
|
269 |
+
) -> None:
|
270 |
+
if type(self) is Grouper:
|
271 |
+
# i.e. not TimeGrouper
|
272 |
+
if axis is not lib.no_default:
|
273 |
+
warnings.warn(
|
274 |
+
"Grouper axis keyword is deprecated and will be removed in a "
|
275 |
+
"future version. To group on axis=1, use obj.T.groupby(...) "
|
276 |
+
"instead",
|
277 |
+
FutureWarning,
|
278 |
+
stacklevel=find_stack_level(),
|
279 |
+
)
|
280 |
+
else:
|
281 |
+
axis = 0
|
282 |
+
if axis is lib.no_default:
|
283 |
+
axis = 0
|
284 |
+
|
285 |
+
self.key = key
|
286 |
+
self.level = level
|
287 |
+
self.freq = freq
|
288 |
+
self.axis = axis
|
289 |
+
self.sort = sort
|
290 |
+
self.dropna = dropna
|
291 |
+
|
292 |
+
self._grouper_deprecated = None
|
293 |
+
self._indexer_deprecated: npt.NDArray[np.intp] | None = None
|
294 |
+
self._obj_deprecated = None
|
295 |
+
self._gpr_index = None
|
296 |
+
self.binner = None
|
297 |
+
self._grouper = None
|
298 |
+
self._indexer: npt.NDArray[np.intp] | None = None
|
299 |
+
|
300 |
+
def _get_grouper(
|
301 |
+
self, obj: NDFrameT, validate: bool = True
|
302 |
+
) -> tuple[ops.BaseGrouper, NDFrameT]:
|
303 |
+
"""
|
304 |
+
Parameters
|
305 |
+
----------
|
306 |
+
obj : Series or DataFrame
|
307 |
+
validate : bool, default True
|
308 |
+
if True, validate the grouper
|
309 |
+
|
310 |
+
Returns
|
311 |
+
-------
|
312 |
+
a tuple of grouper, obj (possibly sorted)
|
313 |
+
"""
|
314 |
+
obj, _, _ = self._set_grouper(obj)
|
315 |
+
grouper, _, obj = get_grouper(
|
316 |
+
obj,
|
317 |
+
[self.key],
|
318 |
+
axis=self.axis,
|
319 |
+
level=self.level,
|
320 |
+
sort=self.sort,
|
321 |
+
validate=validate,
|
322 |
+
dropna=self.dropna,
|
323 |
+
)
|
324 |
+
# Without setting this, subsequent lookups to .groups raise
|
325 |
+
# error: Incompatible types in assignment (expression has type "BaseGrouper",
|
326 |
+
# variable has type "None")
|
327 |
+
self._grouper_deprecated = grouper # type: ignore[assignment]
|
328 |
+
|
329 |
+
return grouper, obj
|
330 |
+
|
331 |
+
def _set_grouper(
|
332 |
+
self, obj: NDFrameT, sort: bool = False, *, gpr_index: Index | None = None
|
333 |
+
) -> tuple[NDFrameT, Index, npt.NDArray[np.intp] | None]:
|
334 |
+
"""
|
335 |
+
given an object and the specifications, setup the internal grouper
|
336 |
+
for this particular specification
|
337 |
+
|
338 |
+
Parameters
|
339 |
+
----------
|
340 |
+
obj : Series or DataFrame
|
341 |
+
sort : bool, default False
|
342 |
+
whether the resulting grouper should be sorted
|
343 |
+
gpr_index : Index or None, default None
|
344 |
+
|
345 |
+
Returns
|
346 |
+
-------
|
347 |
+
NDFrame
|
348 |
+
Index
|
349 |
+
np.ndarray[np.intp] | None
|
350 |
+
"""
|
351 |
+
assert obj is not None
|
352 |
+
|
353 |
+
if self.key is not None and self.level is not None:
|
354 |
+
raise ValueError("The Grouper cannot specify both a key and a level!")
|
355 |
+
|
356 |
+
# Keep self._grouper value before overriding
|
357 |
+
if self._grouper is None:
|
358 |
+
# TODO: What are we assuming about subsequent calls?
|
359 |
+
self._grouper = gpr_index
|
360 |
+
self._indexer = self._indexer_deprecated
|
361 |
+
|
362 |
+
# the key must be a valid info item
|
363 |
+
if self.key is not None:
|
364 |
+
key = self.key
|
365 |
+
# The 'on' is already defined
|
366 |
+
if getattr(gpr_index, "name", None) == key and isinstance(obj, Series):
|
367 |
+
# Sometimes self._grouper will have been resorted while
|
368 |
+
# obj has not. In this case there is a mismatch when we
|
369 |
+
# call self._grouper.take(obj.index) so we need to undo the sorting
|
370 |
+
# before we call _grouper.take.
|
371 |
+
assert self._grouper is not None
|
372 |
+
if self._indexer is not None:
|
373 |
+
reverse_indexer = self._indexer.argsort()
|
374 |
+
unsorted_ax = self._grouper.take(reverse_indexer)
|
375 |
+
ax = unsorted_ax.take(obj.index)
|
376 |
+
else:
|
377 |
+
ax = self._grouper.take(obj.index)
|
378 |
+
else:
|
379 |
+
if key not in obj._info_axis:
|
380 |
+
raise KeyError(f"The grouper name {key} is not found")
|
381 |
+
ax = Index(obj[key], name=key)
|
382 |
+
|
383 |
+
else:
|
384 |
+
ax = obj._get_axis(self.axis)
|
385 |
+
if self.level is not None:
|
386 |
+
level = self.level
|
387 |
+
|
388 |
+
# if a level is given it must be a mi level or
|
389 |
+
# equivalent to the axis name
|
390 |
+
if isinstance(ax, MultiIndex):
|
391 |
+
level = ax._get_level_number(level)
|
392 |
+
ax = Index(ax._get_level_values(level), name=ax.names[level])
|
393 |
+
|
394 |
+
else:
|
395 |
+
if level not in (0, ax.name):
|
396 |
+
raise ValueError(f"The level {level} is not valid")
|
397 |
+
|
398 |
+
# possibly sort
|
399 |
+
indexer: npt.NDArray[np.intp] | None = None
|
400 |
+
if (self.sort or sort) and not ax.is_monotonic_increasing:
|
401 |
+
# use stable sort to support first, last, nth
|
402 |
+
# TODO: why does putting na_position="first" fix datetimelike cases?
|
403 |
+
indexer = self._indexer_deprecated = ax.array.argsort(
|
404 |
+
kind="mergesort", na_position="first"
|
405 |
+
)
|
406 |
+
ax = ax.take(indexer)
|
407 |
+
obj = obj.take(indexer, axis=self.axis)
|
408 |
+
|
409 |
+
# error: Incompatible types in assignment (expression has type
|
410 |
+
# "NDFrameT", variable has type "None")
|
411 |
+
self._obj_deprecated = obj # type: ignore[assignment]
|
412 |
+
self._gpr_index = ax
|
413 |
+
return obj, ax, indexer
|
414 |
+
|
415 |
+
@final
|
416 |
+
@property
|
417 |
+
def ax(self) -> Index:
|
418 |
+
warnings.warn(
|
419 |
+
f"{type(self).__name__}.ax is deprecated and will be removed in a "
|
420 |
+
"future version. Use Resampler.ax instead",
|
421 |
+
FutureWarning,
|
422 |
+
stacklevel=find_stack_level(),
|
423 |
+
)
|
424 |
+
index = self._gpr_index
|
425 |
+
if index is None:
|
426 |
+
raise ValueError("_set_grouper must be called before ax is accessed")
|
427 |
+
return index
|
428 |
+
|
429 |
+
@final
|
430 |
+
@property
|
431 |
+
def indexer(self):
|
432 |
+
warnings.warn(
|
433 |
+
f"{type(self).__name__}.indexer is deprecated and will be removed "
|
434 |
+
"in a future version. Use Resampler.indexer instead.",
|
435 |
+
FutureWarning,
|
436 |
+
stacklevel=find_stack_level(),
|
437 |
+
)
|
438 |
+
return self._indexer_deprecated
|
439 |
+
|
440 |
+
@final
|
441 |
+
@property
|
442 |
+
def obj(self):
|
443 |
+
# TODO(3.0): enforcing these deprecations on Grouper should close
|
444 |
+
# GH#25564, GH#41930
|
445 |
+
warnings.warn(
|
446 |
+
f"{type(self).__name__}.obj is deprecated and will be removed "
|
447 |
+
"in a future version. Use GroupBy.indexer instead.",
|
448 |
+
FutureWarning,
|
449 |
+
stacklevel=find_stack_level(),
|
450 |
+
)
|
451 |
+
return self._obj_deprecated
|
452 |
+
|
453 |
+
@final
|
454 |
+
@property
|
455 |
+
def grouper(self):
|
456 |
+
warnings.warn(
|
457 |
+
f"{type(self).__name__}.grouper is deprecated and will be removed "
|
458 |
+
"in a future version. Use GroupBy.grouper instead.",
|
459 |
+
FutureWarning,
|
460 |
+
stacklevel=find_stack_level(),
|
461 |
+
)
|
462 |
+
return self._grouper_deprecated
|
463 |
+
|
464 |
+
@final
|
465 |
+
@property
|
466 |
+
def groups(self):
|
467 |
+
warnings.warn(
|
468 |
+
f"{type(self).__name__}.groups is deprecated and will be removed "
|
469 |
+
"in a future version. Use GroupBy.groups instead.",
|
470 |
+
FutureWarning,
|
471 |
+
stacklevel=find_stack_level(),
|
472 |
+
)
|
473 |
+
# error: "None" has no attribute "groups"
|
474 |
+
return self._grouper_deprecated.groups # type: ignore[attr-defined]
|
475 |
+
|
476 |
+
@final
|
477 |
+
def __repr__(self) -> str:
|
478 |
+
attrs_list = (
|
479 |
+
f"{attr_name}={repr(getattr(self, attr_name))}"
|
480 |
+
for attr_name in self._attributes
|
481 |
+
if getattr(self, attr_name) is not None
|
482 |
+
)
|
483 |
+
attrs = ", ".join(attrs_list)
|
484 |
+
cls_name = type(self).__name__
|
485 |
+
return f"{cls_name}({attrs})"
|
486 |
+
|
487 |
+
|
488 |
+
@final
|
489 |
+
class Grouping:
|
490 |
+
"""
|
491 |
+
Holds the grouping information for a single key
|
492 |
+
|
493 |
+
Parameters
|
494 |
+
----------
|
495 |
+
index : Index
|
496 |
+
grouper :
|
497 |
+
obj : DataFrame or Series
|
498 |
+
name : Label
|
499 |
+
level :
|
500 |
+
observed : bool, default False
|
501 |
+
If we are a Categorical, use the observed values
|
502 |
+
in_axis : if the Grouping is a column in self.obj and hence among
|
503 |
+
Groupby.exclusions list
|
504 |
+
dropna : bool, default True
|
505 |
+
Whether to drop NA groups.
|
506 |
+
uniques : Array-like, optional
|
507 |
+
When specified, will be used for unique values. Enables including empty groups
|
508 |
+
in the result for a BinGrouper. Must not contain duplicates.
|
509 |
+
|
510 |
+
Attributes
|
511 |
+
-------
|
512 |
+
indices : dict
|
513 |
+
Mapping of {group -> index_list}
|
514 |
+
codes : ndarray
|
515 |
+
Group codes
|
516 |
+
group_index : Index or None
|
517 |
+
unique groups
|
518 |
+
groups : dict
|
519 |
+
Mapping of {group -> label_list}
|
520 |
+
"""
|
521 |
+
|
522 |
+
_codes: npt.NDArray[np.signedinteger] | None = None
|
523 |
+
_all_grouper: Categorical | None
|
524 |
+
_orig_cats: Index | None
|
525 |
+
_index: Index
|
526 |
+
|
527 |
+
def __init__(
|
528 |
+
self,
|
529 |
+
index: Index,
|
530 |
+
grouper=None,
|
531 |
+
obj: NDFrame | None = None,
|
532 |
+
level=None,
|
533 |
+
sort: bool = True,
|
534 |
+
observed: bool = False,
|
535 |
+
in_axis: bool = False,
|
536 |
+
dropna: bool = True,
|
537 |
+
uniques: ArrayLike | None = None,
|
538 |
+
) -> None:
|
539 |
+
self.level = level
|
540 |
+
self._orig_grouper = grouper
|
541 |
+
grouping_vector = _convert_grouper(index, grouper)
|
542 |
+
self._all_grouper = None
|
543 |
+
self._orig_cats = None
|
544 |
+
self._index = index
|
545 |
+
self._sort = sort
|
546 |
+
self.obj = obj
|
547 |
+
self._observed = observed
|
548 |
+
self.in_axis = in_axis
|
549 |
+
self._dropna = dropna
|
550 |
+
self._uniques = uniques
|
551 |
+
|
552 |
+
# we have a single grouper which may be a myriad of things,
|
553 |
+
# some of which are dependent on the passing in level
|
554 |
+
|
555 |
+
ilevel = self._ilevel
|
556 |
+
if ilevel is not None:
|
557 |
+
# In extant tests, the new self.grouping_vector matches
|
558 |
+
# `index.get_level_values(ilevel)` whenever
|
559 |
+
# mapper is None and isinstance(index, MultiIndex)
|
560 |
+
if isinstance(index, MultiIndex):
|
561 |
+
index_level = index.get_level_values(ilevel)
|
562 |
+
else:
|
563 |
+
index_level = index
|
564 |
+
|
565 |
+
if grouping_vector is None:
|
566 |
+
grouping_vector = index_level
|
567 |
+
else:
|
568 |
+
mapper = grouping_vector
|
569 |
+
grouping_vector = index_level.map(mapper)
|
570 |
+
|
571 |
+
# a passed Grouper like, directly get the grouper in the same way
|
572 |
+
# as single grouper groupby, use the group_info to get codes
|
573 |
+
elif isinstance(grouping_vector, Grouper):
|
574 |
+
# get the new grouper; we already have disambiguated
|
575 |
+
# what key/level refer to exactly, don't need to
|
576 |
+
# check again as we have by this point converted these
|
577 |
+
# to an actual value (rather than a pd.Grouper)
|
578 |
+
assert self.obj is not None # for mypy
|
579 |
+
newgrouper, newobj = grouping_vector._get_grouper(self.obj, validate=False)
|
580 |
+
self.obj = newobj
|
581 |
+
|
582 |
+
if isinstance(newgrouper, ops.BinGrouper):
|
583 |
+
# TODO: can we unwrap this and get a tighter typing
|
584 |
+
# for self.grouping_vector?
|
585 |
+
grouping_vector = newgrouper
|
586 |
+
else:
|
587 |
+
# ops.BaseGrouper
|
588 |
+
# TODO: 2023-02-03 no test cases with len(newgrouper.groupings) > 1.
|
589 |
+
# If that were to occur, would we be throwing out information?
|
590 |
+
# error: Cannot determine type of "grouping_vector" [has-type]
|
591 |
+
ng = newgrouper.groupings[0].grouping_vector # type: ignore[has-type]
|
592 |
+
# use Index instead of ndarray so we can recover the name
|
593 |
+
grouping_vector = Index(ng, name=newgrouper.result_index.name)
|
594 |
+
|
595 |
+
elif not isinstance(
|
596 |
+
grouping_vector, (Series, Index, ExtensionArray, np.ndarray)
|
597 |
+
):
|
598 |
+
# no level passed
|
599 |
+
if getattr(grouping_vector, "ndim", 1) != 1:
|
600 |
+
t = str(type(grouping_vector))
|
601 |
+
raise ValueError(f"Grouper for '{t}' not 1-dimensional")
|
602 |
+
|
603 |
+
grouping_vector = index.map(grouping_vector)
|
604 |
+
|
605 |
+
if not (
|
606 |
+
hasattr(grouping_vector, "__len__")
|
607 |
+
and len(grouping_vector) == len(index)
|
608 |
+
):
|
609 |
+
grper = pprint_thing(grouping_vector)
|
610 |
+
errmsg = (
|
611 |
+
"Grouper result violates len(labels) == "
|
612 |
+
f"len(data)\nresult: {grper}"
|
613 |
+
)
|
614 |
+
raise AssertionError(errmsg)
|
615 |
+
|
616 |
+
if isinstance(grouping_vector, np.ndarray):
|
617 |
+
if grouping_vector.dtype.kind in "mM":
|
618 |
+
# if we have a date/time-like grouper, make sure that we have
|
619 |
+
# Timestamps like
|
620 |
+
# TODO 2022-10-08 we only have one test that gets here and
|
621 |
+
# values are already in nanoseconds in that case.
|
622 |
+
grouping_vector = Series(grouping_vector).to_numpy()
|
623 |
+
elif isinstance(getattr(grouping_vector, "dtype", None), CategoricalDtype):
|
624 |
+
# a passed Categorical
|
625 |
+
self._orig_cats = grouping_vector.categories
|
626 |
+
grouping_vector, self._all_grouper = recode_for_groupby(
|
627 |
+
grouping_vector, sort, observed
|
628 |
+
)
|
629 |
+
|
630 |
+
self.grouping_vector = grouping_vector
|
631 |
+
|
632 |
+
def __repr__(self) -> str:
|
633 |
+
return f"Grouping({self.name})"
|
634 |
+
|
635 |
+
def __iter__(self) -> Iterator:
|
636 |
+
return iter(self.indices)
|
637 |
+
|
638 |
+
@cache_readonly
|
639 |
+
def _passed_categorical(self) -> bool:
|
640 |
+
dtype = getattr(self.grouping_vector, "dtype", None)
|
641 |
+
return isinstance(dtype, CategoricalDtype)
|
642 |
+
|
643 |
+
@cache_readonly
|
644 |
+
def name(self) -> Hashable:
|
645 |
+
ilevel = self._ilevel
|
646 |
+
if ilevel is not None:
|
647 |
+
return self._index.names[ilevel]
|
648 |
+
|
649 |
+
if isinstance(self._orig_grouper, (Index, Series)):
|
650 |
+
return self._orig_grouper.name
|
651 |
+
|
652 |
+
elif isinstance(self.grouping_vector, ops.BaseGrouper):
|
653 |
+
return self.grouping_vector.result_index.name
|
654 |
+
|
655 |
+
elif isinstance(self.grouping_vector, Index):
|
656 |
+
return self.grouping_vector.name
|
657 |
+
|
658 |
+
# otherwise we have ndarray or ExtensionArray -> no name
|
659 |
+
return None
|
660 |
+
|
661 |
+
@cache_readonly
|
662 |
+
def _ilevel(self) -> int | None:
|
663 |
+
"""
|
664 |
+
If necessary, converted index level name to index level position.
|
665 |
+
"""
|
666 |
+
level = self.level
|
667 |
+
if level is None:
|
668 |
+
return None
|
669 |
+
if not isinstance(level, int):
|
670 |
+
index = self._index
|
671 |
+
if level not in index.names:
|
672 |
+
raise AssertionError(f"Level {level} not in index")
|
673 |
+
return index.names.index(level)
|
674 |
+
return level
|
675 |
+
|
676 |
+
@property
|
677 |
+
def ngroups(self) -> int:
|
678 |
+
return len(self._group_index)
|
679 |
+
|
680 |
+
@cache_readonly
|
681 |
+
def indices(self) -> dict[Hashable, npt.NDArray[np.intp]]:
|
682 |
+
# we have a list of groupers
|
683 |
+
if isinstance(self.grouping_vector, ops.BaseGrouper):
|
684 |
+
return self.grouping_vector.indices
|
685 |
+
|
686 |
+
values = Categorical(self.grouping_vector)
|
687 |
+
return values._reverse_indexer()
|
688 |
+
|
689 |
+
@property
|
690 |
+
def codes(self) -> npt.NDArray[np.signedinteger]:
|
691 |
+
return self._codes_and_uniques[0]
|
692 |
+
|
693 |
+
@cache_readonly
|
694 |
+
def _group_arraylike(self) -> ArrayLike:
|
695 |
+
"""
|
696 |
+
Analogous to result_index, but holding an ArrayLike to ensure
|
697 |
+
we can retain ExtensionDtypes.
|
698 |
+
"""
|
699 |
+
if self._all_grouper is not None:
|
700 |
+
# retain dtype for categories, including unobserved ones
|
701 |
+
return self._result_index._values
|
702 |
+
|
703 |
+
elif self._passed_categorical:
|
704 |
+
return self._group_index._values
|
705 |
+
|
706 |
+
return self._codes_and_uniques[1]
|
707 |
+
|
708 |
+
@property
|
709 |
+
def group_arraylike(self) -> ArrayLike:
|
710 |
+
"""
|
711 |
+
Analogous to result_index, but holding an ArrayLike to ensure
|
712 |
+
we can retain ExtensionDtypes.
|
713 |
+
"""
|
714 |
+
warnings.warn(
|
715 |
+
"group_arraylike is deprecated and will be removed in a future "
|
716 |
+
"version of pandas",
|
717 |
+
category=FutureWarning,
|
718 |
+
stacklevel=find_stack_level(),
|
719 |
+
)
|
720 |
+
return self._group_arraylike
|
721 |
+
|
722 |
+
@cache_readonly
|
723 |
+
def _result_index(self) -> Index:
|
724 |
+
# result_index retains dtype for categories, including unobserved ones,
|
725 |
+
# which group_index does not
|
726 |
+
if self._all_grouper is not None:
|
727 |
+
group_idx = self._group_index
|
728 |
+
assert isinstance(group_idx, CategoricalIndex)
|
729 |
+
cats = self._orig_cats
|
730 |
+
# set_categories is dynamically added
|
731 |
+
return group_idx.set_categories(cats) # type: ignore[attr-defined]
|
732 |
+
return self._group_index
|
733 |
+
|
734 |
+
@property
|
735 |
+
def result_index(self) -> Index:
|
736 |
+
warnings.warn(
|
737 |
+
"result_index is deprecated and will be removed in a future "
|
738 |
+
"version of pandas",
|
739 |
+
category=FutureWarning,
|
740 |
+
stacklevel=find_stack_level(),
|
741 |
+
)
|
742 |
+
return self._result_index
|
743 |
+
|
744 |
+
@cache_readonly
|
745 |
+
def _group_index(self) -> Index:
|
746 |
+
codes, uniques = self._codes_and_uniques
|
747 |
+
if not self._dropna and self._passed_categorical:
|
748 |
+
assert isinstance(uniques, Categorical)
|
749 |
+
if self._sort and (codes == len(uniques)).any():
|
750 |
+
# Add NA value on the end when sorting
|
751 |
+
uniques = Categorical.from_codes(
|
752 |
+
np.append(uniques.codes, [-1]), uniques.categories, validate=False
|
753 |
+
)
|
754 |
+
elif len(codes) > 0:
|
755 |
+
# Need to determine proper placement of NA value when not sorting
|
756 |
+
cat = self.grouping_vector
|
757 |
+
na_idx = (cat.codes < 0).argmax()
|
758 |
+
if cat.codes[na_idx] < 0:
|
759 |
+
# count number of unique codes that comes before the nan value
|
760 |
+
na_unique_idx = algorithms.nunique_ints(cat.codes[:na_idx])
|
761 |
+
new_codes = np.insert(uniques.codes, na_unique_idx, -1)
|
762 |
+
uniques = Categorical.from_codes(
|
763 |
+
new_codes, uniques.categories, validate=False
|
764 |
+
)
|
765 |
+
return Index._with_infer(uniques, name=self.name)
|
766 |
+
|
767 |
+
@property
|
768 |
+
def group_index(self) -> Index:
|
769 |
+
warnings.warn(
|
770 |
+
"group_index is deprecated and will be removed in a future "
|
771 |
+
"version of pandas",
|
772 |
+
category=FutureWarning,
|
773 |
+
stacklevel=find_stack_level(),
|
774 |
+
)
|
775 |
+
return self._group_index
|
776 |
+
|
777 |
+
@cache_readonly
|
778 |
+
def _codes_and_uniques(self) -> tuple[npt.NDArray[np.signedinteger], ArrayLike]:
|
779 |
+
uniques: ArrayLike
|
780 |
+
if self._passed_categorical:
|
781 |
+
# we make a CategoricalIndex out of the cat grouper
|
782 |
+
# preserving the categories / ordered attributes;
|
783 |
+
# doesn't (yet - GH#46909) handle dropna=False
|
784 |
+
cat = self.grouping_vector
|
785 |
+
categories = cat.categories
|
786 |
+
|
787 |
+
if self._observed:
|
788 |
+
ucodes = algorithms.unique1d(cat.codes)
|
789 |
+
ucodes = ucodes[ucodes != -1]
|
790 |
+
if self._sort:
|
791 |
+
ucodes = np.sort(ucodes)
|
792 |
+
else:
|
793 |
+
ucodes = np.arange(len(categories))
|
794 |
+
|
795 |
+
uniques = Categorical.from_codes(
|
796 |
+
codes=ucodes, categories=categories, ordered=cat.ordered, validate=False
|
797 |
+
)
|
798 |
+
|
799 |
+
codes = cat.codes
|
800 |
+
if not self._dropna:
|
801 |
+
na_mask = codes < 0
|
802 |
+
if np.any(na_mask):
|
803 |
+
if self._sort:
|
804 |
+
# Replace NA codes with `largest code + 1`
|
805 |
+
na_code = len(categories)
|
806 |
+
codes = np.where(na_mask, na_code, codes)
|
807 |
+
else:
|
808 |
+
# Insert NA code into the codes based on first appearance
|
809 |
+
# A negative code must exist, no need to check codes[na_idx] < 0
|
810 |
+
na_idx = na_mask.argmax()
|
811 |
+
# count number of unique codes that comes before the nan value
|
812 |
+
na_code = algorithms.nunique_ints(codes[:na_idx])
|
813 |
+
codes = np.where(codes >= na_code, codes + 1, codes)
|
814 |
+
codes = np.where(na_mask, na_code, codes)
|
815 |
+
|
816 |
+
if not self._observed:
|
817 |
+
uniques = uniques.reorder_categories(self._orig_cats)
|
818 |
+
|
819 |
+
return codes, uniques
|
820 |
+
|
821 |
+
elif isinstance(self.grouping_vector, ops.BaseGrouper):
|
822 |
+
# we have a list of groupers
|
823 |
+
codes = self.grouping_vector.codes_info
|
824 |
+
uniques = self.grouping_vector.result_index._values
|
825 |
+
elif self._uniques is not None:
|
826 |
+
# GH#50486 Code grouping_vector using _uniques; allows
|
827 |
+
# including uniques that are not present in grouping_vector.
|
828 |
+
cat = Categorical(self.grouping_vector, categories=self._uniques)
|
829 |
+
codes = cat.codes
|
830 |
+
uniques = self._uniques
|
831 |
+
else:
|
832 |
+
# GH35667, replace dropna=False with use_na_sentinel=False
|
833 |
+
# error: Incompatible types in assignment (expression has type "Union[
|
834 |
+
# ndarray[Any, Any], Index]", variable has type "Categorical")
|
835 |
+
codes, uniques = algorithms.factorize( # type: ignore[assignment]
|
836 |
+
self.grouping_vector, sort=self._sort, use_na_sentinel=self._dropna
|
837 |
+
)
|
838 |
+
return codes, uniques
|
839 |
+
|
840 |
+
@cache_readonly
|
841 |
+
def groups(self) -> dict[Hashable, np.ndarray]:
|
842 |
+
cats = Categorical.from_codes(self.codes, self._group_index, validate=False)
|
843 |
+
return self._index.groupby(cats)
|
844 |
+
|
845 |
+
|
846 |
+
def get_grouper(
|
847 |
+
obj: NDFrameT,
|
848 |
+
key=None,
|
849 |
+
axis: Axis = 0,
|
850 |
+
level=None,
|
851 |
+
sort: bool = True,
|
852 |
+
observed: bool = False,
|
853 |
+
validate: bool = True,
|
854 |
+
dropna: bool = True,
|
855 |
+
) -> tuple[ops.BaseGrouper, frozenset[Hashable], NDFrameT]:
|
856 |
+
"""
|
857 |
+
Create and return a BaseGrouper, which is an internal
|
858 |
+
mapping of how to create the grouper indexers.
|
859 |
+
This may be composed of multiple Grouping objects, indicating
|
860 |
+
multiple groupers
|
861 |
+
|
862 |
+
Groupers are ultimately index mappings. They can originate as:
|
863 |
+
index mappings, keys to columns, functions, or Groupers
|
864 |
+
|
865 |
+
Groupers enable local references to axis,level,sort, while
|
866 |
+
the passed in axis, level, and sort are 'global'.
|
867 |
+
|
868 |
+
This routine tries to figure out what the passing in references
|
869 |
+
are and then creates a Grouping for each one, combined into
|
870 |
+
a BaseGrouper.
|
871 |
+
|
872 |
+
If observed & we have a categorical grouper, only show the observed
|
873 |
+
values.
|
874 |
+
|
875 |
+
If validate, then check for key/level overlaps.
|
876 |
+
|
877 |
+
"""
|
878 |
+
group_axis = obj._get_axis(axis)
|
879 |
+
|
880 |
+
# validate that the passed single level is compatible with the passed
|
881 |
+
# axis of the object
|
882 |
+
if level is not None:
|
883 |
+
# TODO: These if-block and else-block are almost same.
|
884 |
+
# MultiIndex instance check is removable, but it seems that there are
|
885 |
+
# some processes only for non-MultiIndex in else-block,
|
886 |
+
# eg. `obj.index.name != level`. We have to consider carefully whether
|
887 |
+
# these are applicable for MultiIndex. Even if these are applicable,
|
888 |
+
# we need to check if it makes no side effect to subsequent processes
|
889 |
+
# on the outside of this condition.
|
890 |
+
# (GH 17621)
|
891 |
+
if isinstance(group_axis, MultiIndex):
|
892 |
+
if is_list_like(level) and len(level) == 1:
|
893 |
+
level = level[0]
|
894 |
+
|
895 |
+
if key is None and is_scalar(level):
|
896 |
+
# Get the level values from group_axis
|
897 |
+
key = group_axis.get_level_values(level)
|
898 |
+
level = None
|
899 |
+
|
900 |
+
else:
|
901 |
+
# allow level to be a length-one list-like object
|
902 |
+
# (e.g., level=[0])
|
903 |
+
# GH 13901
|
904 |
+
if is_list_like(level):
|
905 |
+
nlevels = len(level)
|
906 |
+
if nlevels == 1:
|
907 |
+
level = level[0]
|
908 |
+
elif nlevels == 0:
|
909 |
+
raise ValueError("No group keys passed!")
|
910 |
+
else:
|
911 |
+
raise ValueError("multiple levels only valid with MultiIndex")
|
912 |
+
|
913 |
+
if isinstance(level, str):
|
914 |
+
if obj._get_axis(axis).name != level:
|
915 |
+
raise ValueError(
|
916 |
+
f"level name {level} is not the name "
|
917 |
+
f"of the {obj._get_axis_name(axis)}"
|
918 |
+
)
|
919 |
+
elif level > 0 or level < -1:
|
920 |
+
raise ValueError("level > 0 or level < -1 only valid with MultiIndex")
|
921 |
+
|
922 |
+
# NOTE: `group_axis` and `group_axis.get_level_values(level)`
|
923 |
+
# are same in this section.
|
924 |
+
level = None
|
925 |
+
key = group_axis
|
926 |
+
|
927 |
+
# a passed-in Grouper, directly convert
|
928 |
+
if isinstance(key, Grouper):
|
929 |
+
grouper, obj = key._get_grouper(obj, validate=False)
|
930 |
+
if key.key is None:
|
931 |
+
return grouper, frozenset(), obj
|
932 |
+
else:
|
933 |
+
return grouper, frozenset({key.key}), obj
|
934 |
+
|
935 |
+
# already have a BaseGrouper, just return it
|
936 |
+
elif isinstance(key, ops.BaseGrouper):
|
937 |
+
return key, frozenset(), obj
|
938 |
+
|
939 |
+
if not isinstance(key, list):
|
940 |
+
keys = [key]
|
941 |
+
match_axis_length = False
|
942 |
+
else:
|
943 |
+
keys = key
|
944 |
+
match_axis_length = len(keys) == len(group_axis)
|
945 |
+
|
946 |
+
# what are we after, exactly?
|
947 |
+
any_callable = any(callable(g) or isinstance(g, dict) for g in keys)
|
948 |
+
any_groupers = any(isinstance(g, (Grouper, Grouping)) for g in keys)
|
949 |
+
any_arraylike = any(
|
950 |
+
isinstance(g, (list, tuple, Series, Index, np.ndarray)) for g in keys
|
951 |
+
)
|
952 |
+
|
953 |
+
# is this an index replacement?
|
954 |
+
if (
|
955 |
+
not any_callable
|
956 |
+
and not any_arraylike
|
957 |
+
and not any_groupers
|
958 |
+
and match_axis_length
|
959 |
+
and level is None
|
960 |
+
):
|
961 |
+
if isinstance(obj, DataFrame):
|
962 |
+
all_in_columns_index = all(
|
963 |
+
g in obj.columns or g in obj.index.names for g in keys
|
964 |
+
)
|
965 |
+
else:
|
966 |
+
assert isinstance(obj, Series)
|
967 |
+
all_in_columns_index = all(g in obj.index.names for g in keys)
|
968 |
+
|
969 |
+
if not all_in_columns_index:
|
970 |
+
keys = [com.asarray_tuplesafe(keys)]
|
971 |
+
|
972 |
+
if isinstance(level, (tuple, list)):
|
973 |
+
if key is None:
|
974 |
+
keys = [None] * len(level)
|
975 |
+
levels = level
|
976 |
+
else:
|
977 |
+
levels = [level] * len(keys)
|
978 |
+
|
979 |
+
groupings: list[Grouping] = []
|
980 |
+
exclusions: set[Hashable] = set()
|
981 |
+
|
982 |
+
# if the actual grouper should be obj[key]
|
983 |
+
def is_in_axis(key) -> bool:
|
984 |
+
if not _is_label_like(key):
|
985 |
+
if obj.ndim == 1:
|
986 |
+
return False
|
987 |
+
|
988 |
+
# items -> .columns for DataFrame, .index for Series
|
989 |
+
items = obj.axes[-1]
|
990 |
+
try:
|
991 |
+
items.get_loc(key)
|
992 |
+
except (KeyError, TypeError, InvalidIndexError):
|
993 |
+
# TypeError shows up here if we pass e.g. an Index
|
994 |
+
return False
|
995 |
+
|
996 |
+
return True
|
997 |
+
|
998 |
+
# if the grouper is obj[name]
|
999 |
+
def is_in_obj(gpr) -> bool:
|
1000 |
+
if not hasattr(gpr, "name"):
|
1001 |
+
return False
|
1002 |
+
if using_copy_on_write() or warn_copy_on_write():
|
1003 |
+
# For the CoW case, we check the references to determine if the
|
1004 |
+
# series is part of the object
|
1005 |
+
try:
|
1006 |
+
obj_gpr_column = obj[gpr.name]
|
1007 |
+
except (KeyError, IndexError, InvalidIndexError, OutOfBoundsDatetime):
|
1008 |
+
return False
|
1009 |
+
if isinstance(gpr, Series) and isinstance(obj_gpr_column, Series):
|
1010 |
+
return gpr._mgr.references_same_values( # type: ignore[union-attr]
|
1011 |
+
obj_gpr_column._mgr, 0 # type: ignore[arg-type]
|
1012 |
+
)
|
1013 |
+
return False
|
1014 |
+
try:
|
1015 |
+
return gpr is obj[gpr.name]
|
1016 |
+
except (KeyError, IndexError, InvalidIndexError, OutOfBoundsDatetime):
|
1017 |
+
# IndexError reached in e.g. test_skip_group_keys when we pass
|
1018 |
+
# lambda here
|
1019 |
+
# InvalidIndexError raised on key-types inappropriate for index,
|
1020 |
+
# e.g. DatetimeIndex.get_loc(tuple())
|
1021 |
+
# OutOfBoundsDatetime raised when obj is a Series with DatetimeIndex
|
1022 |
+
# and gpr.name is month str
|
1023 |
+
return False
|
1024 |
+
|
1025 |
+
for gpr, level in zip(keys, levels):
|
1026 |
+
if is_in_obj(gpr): # df.groupby(df['name'])
|
1027 |
+
in_axis = True
|
1028 |
+
exclusions.add(gpr.name)
|
1029 |
+
|
1030 |
+
elif is_in_axis(gpr): # df.groupby('name')
|
1031 |
+
if obj.ndim != 1 and gpr in obj:
|
1032 |
+
if validate:
|
1033 |
+
obj._check_label_or_level_ambiguity(gpr, axis=axis)
|
1034 |
+
in_axis, name, gpr = True, gpr, obj[gpr]
|
1035 |
+
if gpr.ndim != 1:
|
1036 |
+
# non-unique columns; raise here to get the name in the
|
1037 |
+
# exception message
|
1038 |
+
raise ValueError(f"Grouper for '{name}' not 1-dimensional")
|
1039 |
+
exclusions.add(name)
|
1040 |
+
elif obj._is_level_reference(gpr, axis=axis):
|
1041 |
+
in_axis, level, gpr = False, gpr, None
|
1042 |
+
else:
|
1043 |
+
raise KeyError(gpr)
|
1044 |
+
elif isinstance(gpr, Grouper) and gpr.key is not None:
|
1045 |
+
# Add key to exclusions
|
1046 |
+
exclusions.add(gpr.key)
|
1047 |
+
in_axis = True
|
1048 |
+
else:
|
1049 |
+
in_axis = False
|
1050 |
+
|
1051 |
+
# create the Grouping
|
1052 |
+
# allow us to passing the actual Grouping as the gpr
|
1053 |
+
ping = (
|
1054 |
+
Grouping(
|
1055 |
+
group_axis,
|
1056 |
+
gpr,
|
1057 |
+
obj=obj,
|
1058 |
+
level=level,
|
1059 |
+
sort=sort,
|
1060 |
+
observed=observed,
|
1061 |
+
in_axis=in_axis,
|
1062 |
+
dropna=dropna,
|
1063 |
+
)
|
1064 |
+
if not isinstance(gpr, Grouping)
|
1065 |
+
else gpr
|
1066 |
+
)
|
1067 |
+
|
1068 |
+
groupings.append(ping)
|
1069 |
+
|
1070 |
+
if len(groupings) == 0 and len(obj):
|
1071 |
+
raise ValueError("No group keys passed!")
|
1072 |
+
if len(groupings) == 0:
|
1073 |
+
groupings.append(Grouping(Index([], dtype="int"), np.array([], dtype=np.intp)))
|
1074 |
+
|
1075 |
+
# create the internals grouper
|
1076 |
+
grouper = ops.BaseGrouper(group_axis, groupings, sort=sort, dropna=dropna)
|
1077 |
+
return grouper, frozenset(exclusions), obj
|
1078 |
+
|
1079 |
+
|
1080 |
+
def _is_label_like(val) -> bool:
|
1081 |
+
return isinstance(val, (str, tuple)) or (val is not None and is_scalar(val))
|
1082 |
+
|
1083 |
+
|
1084 |
+
def _convert_grouper(axis: Index, grouper):
|
1085 |
+
if isinstance(grouper, dict):
|
1086 |
+
return grouper.get
|
1087 |
+
elif isinstance(grouper, Series):
|
1088 |
+
if grouper.index.equals(axis):
|
1089 |
+
return grouper._values
|
1090 |
+
else:
|
1091 |
+
return grouper.reindex(axis)._values
|
1092 |
+
elif isinstance(grouper, MultiIndex):
|
1093 |
+
return grouper._values
|
1094 |
+
elif isinstance(grouper, (list, tuple, Index, Categorical, np.ndarray)):
|
1095 |
+
if len(grouper) != len(axis):
|
1096 |
+
raise ValueError("Grouper and axis must be same length")
|
1097 |
+
|
1098 |
+
if isinstance(grouper, (list, tuple)):
|
1099 |
+
grouper = com.asarray_tuplesafe(grouper)
|
1100 |
+
return grouper
|
1101 |
+
else:
|
1102 |
+
return grouper
|
llmeval-env/lib/python3.10/site-packages/pandas/core/groupby/indexing.py
ADDED
@@ -0,0 +1,304 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from __future__ import annotations
|
2 |
+
|
3 |
+
from collections.abc import Iterable
|
4 |
+
from typing import (
|
5 |
+
TYPE_CHECKING,
|
6 |
+
Literal,
|
7 |
+
cast,
|
8 |
+
)
|
9 |
+
|
10 |
+
import numpy as np
|
11 |
+
|
12 |
+
from pandas.util._decorators import (
|
13 |
+
cache_readonly,
|
14 |
+
doc,
|
15 |
+
)
|
16 |
+
|
17 |
+
from pandas.core.dtypes.common import (
|
18 |
+
is_integer,
|
19 |
+
is_list_like,
|
20 |
+
)
|
21 |
+
|
22 |
+
if TYPE_CHECKING:
|
23 |
+
from pandas._typing import PositionalIndexer
|
24 |
+
|
25 |
+
from pandas import (
|
26 |
+
DataFrame,
|
27 |
+
Series,
|
28 |
+
)
|
29 |
+
from pandas.core.groupby import groupby
|
30 |
+
|
31 |
+
|
32 |
+
class GroupByIndexingMixin:
|
33 |
+
"""
|
34 |
+
Mixin for adding ._positional_selector to GroupBy.
|
35 |
+
"""
|
36 |
+
|
37 |
+
@cache_readonly
|
38 |
+
def _positional_selector(self) -> GroupByPositionalSelector:
|
39 |
+
"""
|
40 |
+
Return positional selection for each group.
|
41 |
+
|
42 |
+
``groupby._positional_selector[i:j]`` is similar to
|
43 |
+
``groupby.apply(lambda x: x.iloc[i:j])``
|
44 |
+
but much faster and preserves the original index and order.
|
45 |
+
|
46 |
+
``_positional_selector[]`` is compatible with and extends :meth:`~GroupBy.head`
|
47 |
+
and :meth:`~GroupBy.tail`. For example:
|
48 |
+
|
49 |
+
- ``head(5)``
|
50 |
+
- ``_positional_selector[5:-5]``
|
51 |
+
- ``tail(5)``
|
52 |
+
|
53 |
+
together return all the rows.
|
54 |
+
|
55 |
+
Allowed inputs for the index are:
|
56 |
+
|
57 |
+
- An integer valued iterable, e.g. ``range(2, 4)``.
|
58 |
+
- A comma separated list of integers and slices, e.g. ``5``, ``2, 4``, ``2:4``.
|
59 |
+
|
60 |
+
The output format is the same as :meth:`~GroupBy.head` and
|
61 |
+
:meth:`~GroupBy.tail`, namely
|
62 |
+
a subset of the ``DataFrame`` or ``Series`` with the index and order preserved.
|
63 |
+
|
64 |
+
Returns
|
65 |
+
-------
|
66 |
+
Series
|
67 |
+
The filtered subset of the original Series.
|
68 |
+
DataFrame
|
69 |
+
The filtered subset of the original DataFrame.
|
70 |
+
|
71 |
+
See Also
|
72 |
+
--------
|
73 |
+
DataFrame.iloc : Purely integer-location based indexing for selection by
|
74 |
+
position.
|
75 |
+
GroupBy.head : Return first n rows of each group.
|
76 |
+
GroupBy.tail : Return last n rows of each group.
|
77 |
+
GroupBy.nth : Take the nth row from each group if n is an int, or a
|
78 |
+
subset of rows, if n is a list of ints.
|
79 |
+
|
80 |
+
Notes
|
81 |
+
-----
|
82 |
+
- The slice step cannot be negative.
|
83 |
+
- If the index specification results in overlaps, the item is not duplicated.
|
84 |
+
- If the index specification changes the order of items, then
|
85 |
+
they are returned in their original order.
|
86 |
+
By contrast, ``DataFrame.iloc`` can change the row order.
|
87 |
+
- ``groupby()`` parameters such as as_index and dropna are ignored.
|
88 |
+
|
89 |
+
The differences between ``_positional_selector[]`` and :meth:`~GroupBy.nth`
|
90 |
+
with ``as_index=False`` are:
|
91 |
+
|
92 |
+
- Input to ``_positional_selector`` can include
|
93 |
+
one or more slices whereas ``nth``
|
94 |
+
just handles an integer or a list of integers.
|
95 |
+
- ``_positional_selector`` can accept a slice relative to the
|
96 |
+
last row of each group.
|
97 |
+
- ``_positional_selector`` does not have an equivalent to the
|
98 |
+
``nth()`` ``dropna`` parameter.
|
99 |
+
|
100 |
+
Examples
|
101 |
+
--------
|
102 |
+
>>> df = pd.DataFrame([["a", 1], ["a", 2], ["a", 3], ["b", 4], ["b", 5]],
|
103 |
+
... columns=["A", "B"])
|
104 |
+
>>> df.groupby("A")._positional_selector[1:2]
|
105 |
+
A B
|
106 |
+
1 a 2
|
107 |
+
4 b 5
|
108 |
+
|
109 |
+
>>> df.groupby("A")._positional_selector[1, -1]
|
110 |
+
A B
|
111 |
+
1 a 2
|
112 |
+
2 a 3
|
113 |
+
4 b 5
|
114 |
+
"""
|
115 |
+
if TYPE_CHECKING:
|
116 |
+
# pylint: disable-next=used-before-assignment
|
117 |
+
groupby_self = cast(groupby.GroupBy, self)
|
118 |
+
else:
|
119 |
+
groupby_self = self
|
120 |
+
|
121 |
+
return GroupByPositionalSelector(groupby_self)
|
122 |
+
|
123 |
+
def _make_mask_from_positional_indexer(
|
124 |
+
self,
|
125 |
+
arg: PositionalIndexer | tuple,
|
126 |
+
) -> np.ndarray:
|
127 |
+
if is_list_like(arg):
|
128 |
+
if all(is_integer(i) for i in cast(Iterable, arg)):
|
129 |
+
mask = self._make_mask_from_list(cast(Iterable[int], arg))
|
130 |
+
else:
|
131 |
+
mask = self._make_mask_from_tuple(cast(tuple, arg))
|
132 |
+
|
133 |
+
elif isinstance(arg, slice):
|
134 |
+
mask = self._make_mask_from_slice(arg)
|
135 |
+
elif is_integer(arg):
|
136 |
+
mask = self._make_mask_from_int(cast(int, arg))
|
137 |
+
else:
|
138 |
+
raise TypeError(
|
139 |
+
f"Invalid index {type(arg)}. "
|
140 |
+
"Must be integer, list-like, slice or a tuple of "
|
141 |
+
"integers and slices"
|
142 |
+
)
|
143 |
+
|
144 |
+
if isinstance(mask, bool):
|
145 |
+
if mask:
|
146 |
+
mask = self._ascending_count >= 0
|
147 |
+
else:
|
148 |
+
mask = self._ascending_count < 0
|
149 |
+
|
150 |
+
return cast(np.ndarray, mask)
|
151 |
+
|
152 |
+
def _make_mask_from_int(self, arg: int) -> np.ndarray:
|
153 |
+
if arg >= 0:
|
154 |
+
return self._ascending_count == arg
|
155 |
+
else:
|
156 |
+
return self._descending_count == (-arg - 1)
|
157 |
+
|
158 |
+
def _make_mask_from_list(self, args: Iterable[int]) -> bool | np.ndarray:
|
159 |
+
positive = [arg for arg in args if arg >= 0]
|
160 |
+
negative = [-arg - 1 for arg in args if arg < 0]
|
161 |
+
|
162 |
+
mask: bool | np.ndarray = False
|
163 |
+
|
164 |
+
if positive:
|
165 |
+
mask |= np.isin(self._ascending_count, positive)
|
166 |
+
|
167 |
+
if negative:
|
168 |
+
mask |= np.isin(self._descending_count, negative)
|
169 |
+
|
170 |
+
return mask
|
171 |
+
|
172 |
+
def _make_mask_from_tuple(self, args: tuple) -> bool | np.ndarray:
|
173 |
+
mask: bool | np.ndarray = False
|
174 |
+
|
175 |
+
for arg in args:
|
176 |
+
if is_integer(arg):
|
177 |
+
mask |= self._make_mask_from_int(cast(int, arg))
|
178 |
+
elif isinstance(arg, slice):
|
179 |
+
mask |= self._make_mask_from_slice(arg)
|
180 |
+
else:
|
181 |
+
raise ValueError(
|
182 |
+
f"Invalid argument {type(arg)}. Should be int or slice."
|
183 |
+
)
|
184 |
+
|
185 |
+
return mask
|
186 |
+
|
187 |
+
def _make_mask_from_slice(self, arg: slice) -> bool | np.ndarray:
|
188 |
+
start = arg.start
|
189 |
+
stop = arg.stop
|
190 |
+
step = arg.step
|
191 |
+
|
192 |
+
if step is not None and step < 0:
|
193 |
+
raise ValueError(f"Invalid step {step}. Must be non-negative")
|
194 |
+
|
195 |
+
mask: bool | np.ndarray = True
|
196 |
+
|
197 |
+
if step is None:
|
198 |
+
step = 1
|
199 |
+
|
200 |
+
if start is None:
|
201 |
+
if step > 1:
|
202 |
+
mask &= self._ascending_count % step == 0
|
203 |
+
|
204 |
+
elif start >= 0:
|
205 |
+
mask &= self._ascending_count >= start
|
206 |
+
|
207 |
+
if step > 1:
|
208 |
+
mask &= (self._ascending_count - start) % step == 0
|
209 |
+
|
210 |
+
else:
|
211 |
+
mask &= self._descending_count < -start
|
212 |
+
|
213 |
+
offset_array = self._descending_count + start + 1
|
214 |
+
limit_array = (
|
215 |
+
self._ascending_count + self._descending_count + (start + 1)
|
216 |
+
) < 0
|
217 |
+
offset_array = np.where(limit_array, self._ascending_count, offset_array)
|
218 |
+
|
219 |
+
mask &= offset_array % step == 0
|
220 |
+
|
221 |
+
if stop is not None:
|
222 |
+
if stop >= 0:
|
223 |
+
mask &= self._ascending_count < stop
|
224 |
+
else:
|
225 |
+
mask &= self._descending_count >= -stop
|
226 |
+
|
227 |
+
return mask
|
228 |
+
|
229 |
+
@cache_readonly
|
230 |
+
def _ascending_count(self) -> np.ndarray:
|
231 |
+
if TYPE_CHECKING:
|
232 |
+
groupby_self = cast(groupby.GroupBy, self)
|
233 |
+
else:
|
234 |
+
groupby_self = self
|
235 |
+
|
236 |
+
return groupby_self._cumcount_array()
|
237 |
+
|
238 |
+
@cache_readonly
|
239 |
+
def _descending_count(self) -> np.ndarray:
|
240 |
+
if TYPE_CHECKING:
|
241 |
+
groupby_self = cast(groupby.GroupBy, self)
|
242 |
+
else:
|
243 |
+
groupby_self = self
|
244 |
+
|
245 |
+
return groupby_self._cumcount_array(ascending=False)
|
246 |
+
|
247 |
+
|
248 |
+
@doc(GroupByIndexingMixin._positional_selector)
|
249 |
+
class GroupByPositionalSelector:
|
250 |
+
def __init__(self, groupby_object: groupby.GroupBy) -> None:
|
251 |
+
self.groupby_object = groupby_object
|
252 |
+
|
253 |
+
def __getitem__(self, arg: PositionalIndexer | tuple) -> DataFrame | Series:
|
254 |
+
"""
|
255 |
+
Select by positional index per group.
|
256 |
+
|
257 |
+
Implements GroupBy._positional_selector
|
258 |
+
|
259 |
+
Parameters
|
260 |
+
----------
|
261 |
+
arg : PositionalIndexer | tuple
|
262 |
+
Allowed values are:
|
263 |
+
- int
|
264 |
+
- int valued iterable such as list or range
|
265 |
+
- slice with step either None or positive
|
266 |
+
- tuple of integers and slices
|
267 |
+
|
268 |
+
Returns
|
269 |
+
-------
|
270 |
+
Series
|
271 |
+
The filtered subset of the original groupby Series.
|
272 |
+
DataFrame
|
273 |
+
The filtered subset of the original groupby DataFrame.
|
274 |
+
|
275 |
+
See Also
|
276 |
+
--------
|
277 |
+
DataFrame.iloc : Integer-location based indexing for selection by position.
|
278 |
+
GroupBy.head : Return first n rows of each group.
|
279 |
+
GroupBy.tail : Return last n rows of each group.
|
280 |
+
GroupBy._positional_selector : Return positional selection for each group.
|
281 |
+
GroupBy.nth : Take the nth row from each group if n is an int, or a
|
282 |
+
subset of rows, if n is a list of ints.
|
283 |
+
"""
|
284 |
+
mask = self.groupby_object._make_mask_from_positional_indexer(arg)
|
285 |
+
return self.groupby_object._mask_selected_obj(mask)
|
286 |
+
|
287 |
+
|
288 |
+
class GroupByNthSelector:
|
289 |
+
"""
|
290 |
+
Dynamically substituted for GroupBy.nth to enable both call and index
|
291 |
+
"""
|
292 |
+
|
293 |
+
def __init__(self, groupby_object: groupby.GroupBy) -> None:
|
294 |
+
self.groupby_object = groupby_object
|
295 |
+
|
296 |
+
def __call__(
|
297 |
+
self,
|
298 |
+
n: PositionalIndexer | tuple,
|
299 |
+
dropna: Literal["any", "all", None] = None,
|
300 |
+
) -> DataFrame | Series:
|
301 |
+
return self.groupby_object._nth(n, dropna)
|
302 |
+
|
303 |
+
def __getitem__(self, n: PositionalIndexer | tuple) -> DataFrame | Series:
|
304 |
+
return self.groupby_object._nth(n)
|
llmeval-env/lib/python3.10/site-packages/pandas/core/groupby/numba_.py
ADDED
@@ -0,0 +1,181 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
"""Common utilities for Numba operations with groupby ops"""
|
2 |
+
from __future__ import annotations
|
3 |
+
|
4 |
+
import functools
|
5 |
+
import inspect
|
6 |
+
from typing import (
|
7 |
+
TYPE_CHECKING,
|
8 |
+
Any,
|
9 |
+
Callable,
|
10 |
+
)
|
11 |
+
|
12 |
+
import numpy as np
|
13 |
+
|
14 |
+
from pandas.compat._optional import import_optional_dependency
|
15 |
+
|
16 |
+
from pandas.core.util.numba_ import (
|
17 |
+
NumbaUtilError,
|
18 |
+
jit_user_function,
|
19 |
+
)
|
20 |
+
|
21 |
+
if TYPE_CHECKING:
|
22 |
+
from pandas._typing import Scalar
|
23 |
+
|
24 |
+
|
25 |
+
def validate_udf(func: Callable) -> None:
|
26 |
+
"""
|
27 |
+
Validate user defined function for ops when using Numba with groupby ops.
|
28 |
+
|
29 |
+
The first signature arguments should include:
|
30 |
+
|
31 |
+
def f(values, index, ...):
|
32 |
+
...
|
33 |
+
|
34 |
+
Parameters
|
35 |
+
----------
|
36 |
+
func : function, default False
|
37 |
+
user defined function
|
38 |
+
|
39 |
+
Returns
|
40 |
+
-------
|
41 |
+
None
|
42 |
+
|
43 |
+
Raises
|
44 |
+
------
|
45 |
+
NumbaUtilError
|
46 |
+
"""
|
47 |
+
if not callable(func):
|
48 |
+
raise NotImplementedError(
|
49 |
+
"Numba engine can only be used with a single function."
|
50 |
+
)
|
51 |
+
udf_signature = list(inspect.signature(func).parameters.keys())
|
52 |
+
expected_args = ["values", "index"]
|
53 |
+
min_number_args = len(expected_args)
|
54 |
+
if (
|
55 |
+
len(udf_signature) < min_number_args
|
56 |
+
or udf_signature[:min_number_args] != expected_args
|
57 |
+
):
|
58 |
+
raise NumbaUtilError(
|
59 |
+
f"The first {min_number_args} arguments to {func.__name__} must be "
|
60 |
+
f"{expected_args}"
|
61 |
+
)
|
62 |
+
|
63 |
+
|
64 |
+
@functools.cache
|
65 |
+
def generate_numba_agg_func(
|
66 |
+
func: Callable[..., Scalar],
|
67 |
+
nopython: bool,
|
68 |
+
nogil: bool,
|
69 |
+
parallel: bool,
|
70 |
+
) -> Callable[[np.ndarray, np.ndarray, np.ndarray, np.ndarray, int, Any], np.ndarray]:
|
71 |
+
"""
|
72 |
+
Generate a numba jitted agg function specified by values from engine_kwargs.
|
73 |
+
|
74 |
+
1. jit the user's function
|
75 |
+
2. Return a groupby agg function with the jitted function inline
|
76 |
+
|
77 |
+
Configurations specified in engine_kwargs apply to both the user's
|
78 |
+
function _AND_ the groupby evaluation loop.
|
79 |
+
|
80 |
+
Parameters
|
81 |
+
----------
|
82 |
+
func : function
|
83 |
+
function to be applied to each group and will be JITed
|
84 |
+
nopython : bool
|
85 |
+
nopython to be passed into numba.jit
|
86 |
+
nogil : bool
|
87 |
+
nogil to be passed into numba.jit
|
88 |
+
parallel : bool
|
89 |
+
parallel to be passed into numba.jit
|
90 |
+
|
91 |
+
Returns
|
92 |
+
-------
|
93 |
+
Numba function
|
94 |
+
"""
|
95 |
+
numba_func = jit_user_function(func)
|
96 |
+
if TYPE_CHECKING:
|
97 |
+
import numba
|
98 |
+
else:
|
99 |
+
numba = import_optional_dependency("numba")
|
100 |
+
|
101 |
+
@numba.jit(nopython=nopython, nogil=nogil, parallel=parallel)
|
102 |
+
def group_agg(
|
103 |
+
values: np.ndarray,
|
104 |
+
index: np.ndarray,
|
105 |
+
begin: np.ndarray,
|
106 |
+
end: np.ndarray,
|
107 |
+
num_columns: int,
|
108 |
+
*args: Any,
|
109 |
+
) -> np.ndarray:
|
110 |
+
assert len(begin) == len(end)
|
111 |
+
num_groups = len(begin)
|
112 |
+
|
113 |
+
result = np.empty((num_groups, num_columns))
|
114 |
+
for i in numba.prange(num_groups):
|
115 |
+
group_index = index[begin[i] : end[i]]
|
116 |
+
for j in numba.prange(num_columns):
|
117 |
+
group = values[begin[i] : end[i], j]
|
118 |
+
result[i, j] = numba_func(group, group_index, *args)
|
119 |
+
return result
|
120 |
+
|
121 |
+
return group_agg
|
122 |
+
|
123 |
+
|
124 |
+
@functools.cache
|
125 |
+
def generate_numba_transform_func(
|
126 |
+
func: Callable[..., np.ndarray],
|
127 |
+
nopython: bool,
|
128 |
+
nogil: bool,
|
129 |
+
parallel: bool,
|
130 |
+
) -> Callable[[np.ndarray, np.ndarray, np.ndarray, np.ndarray, int, Any], np.ndarray]:
|
131 |
+
"""
|
132 |
+
Generate a numba jitted transform function specified by values from engine_kwargs.
|
133 |
+
|
134 |
+
1. jit the user's function
|
135 |
+
2. Return a groupby transform function with the jitted function inline
|
136 |
+
|
137 |
+
Configurations specified in engine_kwargs apply to both the user's
|
138 |
+
function _AND_ the groupby evaluation loop.
|
139 |
+
|
140 |
+
Parameters
|
141 |
+
----------
|
142 |
+
func : function
|
143 |
+
function to be applied to each window and will be JITed
|
144 |
+
nopython : bool
|
145 |
+
nopython to be passed into numba.jit
|
146 |
+
nogil : bool
|
147 |
+
nogil to be passed into numba.jit
|
148 |
+
parallel : bool
|
149 |
+
parallel to be passed into numba.jit
|
150 |
+
|
151 |
+
Returns
|
152 |
+
-------
|
153 |
+
Numba function
|
154 |
+
"""
|
155 |
+
numba_func = jit_user_function(func)
|
156 |
+
if TYPE_CHECKING:
|
157 |
+
import numba
|
158 |
+
else:
|
159 |
+
numba = import_optional_dependency("numba")
|
160 |
+
|
161 |
+
@numba.jit(nopython=nopython, nogil=nogil, parallel=parallel)
|
162 |
+
def group_transform(
|
163 |
+
values: np.ndarray,
|
164 |
+
index: np.ndarray,
|
165 |
+
begin: np.ndarray,
|
166 |
+
end: np.ndarray,
|
167 |
+
num_columns: int,
|
168 |
+
*args: Any,
|
169 |
+
) -> np.ndarray:
|
170 |
+
assert len(begin) == len(end)
|
171 |
+
num_groups = len(begin)
|
172 |
+
|
173 |
+
result = np.empty((len(values), num_columns))
|
174 |
+
for i in numba.prange(num_groups):
|
175 |
+
group_index = index[begin[i] : end[i]]
|
176 |
+
for j in numba.prange(num_columns):
|
177 |
+
group = values[begin[i] : end[i], j]
|
178 |
+
result[begin[i] : end[i], j] = numba_func(group, group_index, *args)
|
179 |
+
return result
|
180 |
+
|
181 |
+
return group_transform
|
llmeval-env/lib/python3.10/site-packages/pandas/core/groupby/ops.py
ADDED
@@ -0,0 +1,1208 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
"""
|
2 |
+
Provide classes to perform the groupby aggregate operations.
|
3 |
+
|
4 |
+
These are not exposed to the user and provide implementations of the grouping
|
5 |
+
operations, primarily in cython. These classes (BaseGrouper and BinGrouper)
|
6 |
+
are contained *in* the SeriesGroupBy and DataFrameGroupBy objects.
|
7 |
+
"""
|
8 |
+
from __future__ import annotations
|
9 |
+
|
10 |
+
import collections
|
11 |
+
import functools
|
12 |
+
from typing import (
|
13 |
+
TYPE_CHECKING,
|
14 |
+
Callable,
|
15 |
+
Generic,
|
16 |
+
final,
|
17 |
+
)
|
18 |
+
|
19 |
+
import numpy as np
|
20 |
+
|
21 |
+
from pandas._libs import (
|
22 |
+
NaT,
|
23 |
+
lib,
|
24 |
+
)
|
25 |
+
import pandas._libs.groupby as libgroupby
|
26 |
+
from pandas._typing import (
|
27 |
+
ArrayLike,
|
28 |
+
AxisInt,
|
29 |
+
NDFrameT,
|
30 |
+
Shape,
|
31 |
+
npt,
|
32 |
+
)
|
33 |
+
from pandas.errors import AbstractMethodError
|
34 |
+
from pandas.util._decorators import cache_readonly
|
35 |
+
|
36 |
+
from pandas.core.dtypes.cast import (
|
37 |
+
maybe_cast_pointwise_result,
|
38 |
+
maybe_downcast_to_dtype,
|
39 |
+
)
|
40 |
+
from pandas.core.dtypes.common import (
|
41 |
+
ensure_float64,
|
42 |
+
ensure_int64,
|
43 |
+
ensure_platform_int,
|
44 |
+
ensure_uint64,
|
45 |
+
is_1d_only_ea_dtype,
|
46 |
+
)
|
47 |
+
from pandas.core.dtypes.missing import (
|
48 |
+
isna,
|
49 |
+
maybe_fill,
|
50 |
+
)
|
51 |
+
|
52 |
+
from pandas.core.frame import DataFrame
|
53 |
+
from pandas.core.groupby import grouper
|
54 |
+
from pandas.core.indexes.api import (
|
55 |
+
CategoricalIndex,
|
56 |
+
Index,
|
57 |
+
MultiIndex,
|
58 |
+
ensure_index,
|
59 |
+
)
|
60 |
+
from pandas.core.series import Series
|
61 |
+
from pandas.core.sorting import (
|
62 |
+
compress_group_index,
|
63 |
+
decons_obs_group_ids,
|
64 |
+
get_flattened_list,
|
65 |
+
get_group_index,
|
66 |
+
get_group_index_sorter,
|
67 |
+
get_indexer_dict,
|
68 |
+
)
|
69 |
+
|
70 |
+
if TYPE_CHECKING:
|
71 |
+
from collections.abc import (
|
72 |
+
Hashable,
|
73 |
+
Iterator,
|
74 |
+
Sequence,
|
75 |
+
)
|
76 |
+
|
77 |
+
from pandas.core.generic import NDFrame
|
78 |
+
|
79 |
+
|
80 |
+
def check_result_array(obj, dtype) -> None:
|
81 |
+
# Our operation is supposed to be an aggregation/reduction. If
|
82 |
+
# it returns an ndarray, this likely means an invalid operation has
|
83 |
+
# been passed. See test_apply_without_aggregation, test_agg_must_agg
|
84 |
+
if isinstance(obj, np.ndarray):
|
85 |
+
if dtype != object:
|
86 |
+
# If it is object dtype, the function can be a reduction/aggregation
|
87 |
+
# and still return an ndarray e.g. test_agg_over_numpy_arrays
|
88 |
+
raise ValueError("Must produce aggregated value")
|
89 |
+
|
90 |
+
|
91 |
+
def extract_result(res):
|
92 |
+
"""
|
93 |
+
Extract the result object, it might be a 0-dim ndarray
|
94 |
+
or a len-1 0-dim, or a scalar
|
95 |
+
"""
|
96 |
+
if hasattr(res, "_values"):
|
97 |
+
# Preserve EA
|
98 |
+
res = res._values
|
99 |
+
if res.ndim == 1 and len(res) == 1:
|
100 |
+
# see test_agg_lambda_with_timezone, test_resampler_grouper.py::test_apply
|
101 |
+
res = res[0]
|
102 |
+
return res
|
103 |
+
|
104 |
+
|
105 |
+
class WrappedCythonOp:
|
106 |
+
"""
|
107 |
+
Dispatch logic for functions defined in _libs.groupby
|
108 |
+
|
109 |
+
Parameters
|
110 |
+
----------
|
111 |
+
kind: str
|
112 |
+
Whether the operation is an aggregate or transform.
|
113 |
+
how: str
|
114 |
+
Operation name, e.g. "mean".
|
115 |
+
has_dropped_na: bool
|
116 |
+
True precisely when dropna=True and the grouper contains a null value.
|
117 |
+
"""
|
118 |
+
|
119 |
+
# Functions for which we do _not_ attempt to cast the cython result
|
120 |
+
# back to the original dtype.
|
121 |
+
cast_blocklist = frozenset(
|
122 |
+
["any", "all", "rank", "count", "size", "idxmin", "idxmax"]
|
123 |
+
)
|
124 |
+
|
125 |
+
def __init__(self, kind: str, how: str, has_dropped_na: bool) -> None:
|
126 |
+
self.kind = kind
|
127 |
+
self.how = how
|
128 |
+
self.has_dropped_na = has_dropped_na
|
129 |
+
|
130 |
+
_CYTHON_FUNCTIONS: dict[str, dict] = {
|
131 |
+
"aggregate": {
|
132 |
+
"any": functools.partial(libgroupby.group_any_all, val_test="any"),
|
133 |
+
"all": functools.partial(libgroupby.group_any_all, val_test="all"),
|
134 |
+
"sum": "group_sum",
|
135 |
+
"prod": "group_prod",
|
136 |
+
"idxmin": functools.partial(libgroupby.group_idxmin_idxmax, name="idxmin"),
|
137 |
+
"idxmax": functools.partial(libgroupby.group_idxmin_idxmax, name="idxmax"),
|
138 |
+
"min": "group_min",
|
139 |
+
"max": "group_max",
|
140 |
+
"mean": "group_mean",
|
141 |
+
"median": "group_median_float64",
|
142 |
+
"var": "group_var",
|
143 |
+
"std": functools.partial(libgroupby.group_var, name="std"),
|
144 |
+
"sem": functools.partial(libgroupby.group_var, name="sem"),
|
145 |
+
"skew": "group_skew",
|
146 |
+
"first": "group_nth",
|
147 |
+
"last": "group_last",
|
148 |
+
"ohlc": "group_ohlc",
|
149 |
+
},
|
150 |
+
"transform": {
|
151 |
+
"cumprod": "group_cumprod",
|
152 |
+
"cumsum": "group_cumsum",
|
153 |
+
"cummin": "group_cummin",
|
154 |
+
"cummax": "group_cummax",
|
155 |
+
"rank": "group_rank",
|
156 |
+
},
|
157 |
+
}
|
158 |
+
|
159 |
+
_cython_arity = {"ohlc": 4} # OHLC
|
160 |
+
|
161 |
+
@classmethod
|
162 |
+
def get_kind_from_how(cls, how: str) -> str:
|
163 |
+
if how in cls._CYTHON_FUNCTIONS["aggregate"]:
|
164 |
+
return "aggregate"
|
165 |
+
return "transform"
|
166 |
+
|
167 |
+
# Note: we make this a classmethod and pass kind+how so that caching
|
168 |
+
# works at the class level and not the instance level
|
169 |
+
@classmethod
|
170 |
+
@functools.cache
|
171 |
+
def _get_cython_function(
|
172 |
+
cls, kind: str, how: str, dtype: np.dtype, is_numeric: bool
|
173 |
+
):
|
174 |
+
dtype_str = dtype.name
|
175 |
+
ftype = cls._CYTHON_FUNCTIONS[kind][how]
|
176 |
+
|
177 |
+
# see if there is a fused-type version of function
|
178 |
+
# only valid for numeric
|
179 |
+
if callable(ftype):
|
180 |
+
f = ftype
|
181 |
+
else:
|
182 |
+
f = getattr(libgroupby, ftype)
|
183 |
+
if is_numeric:
|
184 |
+
return f
|
185 |
+
elif dtype == np.dtype(object):
|
186 |
+
if how in ["median", "cumprod"]:
|
187 |
+
# no fused types -> no __signatures__
|
188 |
+
raise NotImplementedError(
|
189 |
+
f"function is not implemented for this dtype: "
|
190 |
+
f"[how->{how},dtype->{dtype_str}]"
|
191 |
+
)
|
192 |
+
elif how in ["std", "sem", "idxmin", "idxmax"]:
|
193 |
+
# We have a partial object that does not have __signatures__
|
194 |
+
return f
|
195 |
+
elif how == "skew":
|
196 |
+
# _get_cython_vals will convert to float64
|
197 |
+
pass
|
198 |
+
elif "object" not in f.__signatures__:
|
199 |
+
# raise NotImplementedError here rather than TypeError later
|
200 |
+
raise NotImplementedError(
|
201 |
+
f"function is not implemented for this dtype: "
|
202 |
+
f"[how->{how},dtype->{dtype_str}]"
|
203 |
+
)
|
204 |
+
return f
|
205 |
+
else:
|
206 |
+
raise NotImplementedError(
|
207 |
+
"This should not be reached. Please report a bug at "
|
208 |
+
"github.com/pandas-dev/pandas/",
|
209 |
+
dtype,
|
210 |
+
)
|
211 |
+
|
212 |
+
def _get_cython_vals(self, values: np.ndarray) -> np.ndarray:
|
213 |
+
"""
|
214 |
+
Cast numeric dtypes to float64 for functions that only support that.
|
215 |
+
|
216 |
+
Parameters
|
217 |
+
----------
|
218 |
+
values : np.ndarray
|
219 |
+
|
220 |
+
Returns
|
221 |
+
-------
|
222 |
+
values : np.ndarray
|
223 |
+
"""
|
224 |
+
how = self.how
|
225 |
+
|
226 |
+
if how in ["median", "std", "sem", "skew"]:
|
227 |
+
# median only has a float64 implementation
|
228 |
+
# We should only get here with is_numeric, as non-numeric cases
|
229 |
+
# should raise in _get_cython_function
|
230 |
+
values = ensure_float64(values)
|
231 |
+
|
232 |
+
elif values.dtype.kind in "iu":
|
233 |
+
if how in ["var", "mean"] or (
|
234 |
+
self.kind == "transform" and self.has_dropped_na
|
235 |
+
):
|
236 |
+
# has_dropped_na check need for test_null_group_str_transformer
|
237 |
+
# result may still include NaN, so we have to cast
|
238 |
+
values = ensure_float64(values)
|
239 |
+
|
240 |
+
elif how in ["sum", "ohlc", "prod", "cumsum", "cumprod"]:
|
241 |
+
# Avoid overflow during group op
|
242 |
+
if values.dtype.kind == "i":
|
243 |
+
values = ensure_int64(values)
|
244 |
+
else:
|
245 |
+
values = ensure_uint64(values)
|
246 |
+
|
247 |
+
return values
|
248 |
+
|
249 |
+
def _get_output_shape(self, ngroups: int, values: np.ndarray) -> Shape:
|
250 |
+
how = self.how
|
251 |
+
kind = self.kind
|
252 |
+
|
253 |
+
arity = self._cython_arity.get(how, 1)
|
254 |
+
|
255 |
+
out_shape: Shape
|
256 |
+
if how == "ohlc":
|
257 |
+
out_shape = (ngroups, arity)
|
258 |
+
elif arity > 1:
|
259 |
+
raise NotImplementedError(
|
260 |
+
"arity of more than 1 is not supported for the 'how' argument"
|
261 |
+
)
|
262 |
+
elif kind == "transform":
|
263 |
+
out_shape = values.shape
|
264 |
+
else:
|
265 |
+
out_shape = (ngroups,) + values.shape[1:]
|
266 |
+
return out_shape
|
267 |
+
|
268 |
+
def _get_out_dtype(self, dtype: np.dtype) -> np.dtype:
|
269 |
+
how = self.how
|
270 |
+
|
271 |
+
if how == "rank":
|
272 |
+
out_dtype = "float64"
|
273 |
+
elif how in ["idxmin", "idxmax"]:
|
274 |
+
# The Cython implementation only produces the row number; we'll take
|
275 |
+
# from the index using this in post processing
|
276 |
+
out_dtype = "intp"
|
277 |
+
else:
|
278 |
+
if dtype.kind in "iufcb":
|
279 |
+
out_dtype = f"{dtype.kind}{dtype.itemsize}"
|
280 |
+
else:
|
281 |
+
out_dtype = "object"
|
282 |
+
return np.dtype(out_dtype)
|
283 |
+
|
284 |
+
def _get_result_dtype(self, dtype: np.dtype) -> np.dtype:
|
285 |
+
"""
|
286 |
+
Get the desired dtype of a result based on the
|
287 |
+
input dtype and how it was computed.
|
288 |
+
|
289 |
+
Parameters
|
290 |
+
----------
|
291 |
+
dtype : np.dtype
|
292 |
+
|
293 |
+
Returns
|
294 |
+
-------
|
295 |
+
np.dtype
|
296 |
+
The desired dtype of the result.
|
297 |
+
"""
|
298 |
+
how = self.how
|
299 |
+
|
300 |
+
if how in ["sum", "cumsum", "sum", "prod", "cumprod"]:
|
301 |
+
if dtype == np.dtype(bool):
|
302 |
+
return np.dtype(np.int64)
|
303 |
+
elif how in ["mean", "median", "var", "std", "sem"]:
|
304 |
+
if dtype.kind in "fc":
|
305 |
+
return dtype
|
306 |
+
elif dtype.kind in "iub":
|
307 |
+
return np.dtype(np.float64)
|
308 |
+
return dtype
|
309 |
+
|
310 |
+
@final
|
311 |
+
def _cython_op_ndim_compat(
|
312 |
+
self,
|
313 |
+
values: np.ndarray,
|
314 |
+
*,
|
315 |
+
min_count: int,
|
316 |
+
ngroups: int,
|
317 |
+
comp_ids: np.ndarray,
|
318 |
+
mask: npt.NDArray[np.bool_] | None = None,
|
319 |
+
result_mask: npt.NDArray[np.bool_] | None = None,
|
320 |
+
**kwargs,
|
321 |
+
) -> np.ndarray:
|
322 |
+
if values.ndim == 1:
|
323 |
+
# expand to 2d, dispatch, then squeeze if appropriate
|
324 |
+
values2d = values[None, :]
|
325 |
+
if mask is not None:
|
326 |
+
mask = mask[None, :]
|
327 |
+
if result_mask is not None:
|
328 |
+
result_mask = result_mask[None, :]
|
329 |
+
res = self._call_cython_op(
|
330 |
+
values2d,
|
331 |
+
min_count=min_count,
|
332 |
+
ngroups=ngroups,
|
333 |
+
comp_ids=comp_ids,
|
334 |
+
mask=mask,
|
335 |
+
result_mask=result_mask,
|
336 |
+
**kwargs,
|
337 |
+
)
|
338 |
+
if res.shape[0] == 1:
|
339 |
+
return res[0]
|
340 |
+
|
341 |
+
# otherwise we have OHLC
|
342 |
+
return res.T
|
343 |
+
|
344 |
+
return self._call_cython_op(
|
345 |
+
values,
|
346 |
+
min_count=min_count,
|
347 |
+
ngroups=ngroups,
|
348 |
+
comp_ids=comp_ids,
|
349 |
+
mask=mask,
|
350 |
+
result_mask=result_mask,
|
351 |
+
**kwargs,
|
352 |
+
)
|
353 |
+
|
354 |
+
@final
|
355 |
+
def _call_cython_op(
|
356 |
+
self,
|
357 |
+
values: np.ndarray, # np.ndarray[ndim=2]
|
358 |
+
*,
|
359 |
+
min_count: int,
|
360 |
+
ngroups: int,
|
361 |
+
comp_ids: np.ndarray,
|
362 |
+
mask: npt.NDArray[np.bool_] | None,
|
363 |
+
result_mask: npt.NDArray[np.bool_] | None,
|
364 |
+
**kwargs,
|
365 |
+
) -> np.ndarray: # np.ndarray[ndim=2]
|
366 |
+
orig_values = values
|
367 |
+
|
368 |
+
dtype = values.dtype
|
369 |
+
is_numeric = dtype.kind in "iufcb"
|
370 |
+
|
371 |
+
is_datetimelike = dtype.kind in "mM"
|
372 |
+
|
373 |
+
if is_datetimelike:
|
374 |
+
values = values.view("int64")
|
375 |
+
is_numeric = True
|
376 |
+
elif dtype.kind == "b":
|
377 |
+
values = values.view("uint8")
|
378 |
+
if values.dtype == "float16":
|
379 |
+
values = values.astype(np.float32)
|
380 |
+
|
381 |
+
if self.how in ["any", "all"]:
|
382 |
+
if mask is None:
|
383 |
+
mask = isna(values)
|
384 |
+
if dtype == object:
|
385 |
+
if kwargs["skipna"]:
|
386 |
+
# GH#37501: don't raise on pd.NA when skipna=True
|
387 |
+
if mask.any():
|
388 |
+
# mask on original values computed separately
|
389 |
+
values = values.copy()
|
390 |
+
values[mask] = True
|
391 |
+
values = values.astype(bool, copy=False).view(np.int8)
|
392 |
+
is_numeric = True
|
393 |
+
|
394 |
+
values = values.T
|
395 |
+
if mask is not None:
|
396 |
+
mask = mask.T
|
397 |
+
if result_mask is not None:
|
398 |
+
result_mask = result_mask.T
|
399 |
+
|
400 |
+
out_shape = self._get_output_shape(ngroups, values)
|
401 |
+
func = self._get_cython_function(self.kind, self.how, values.dtype, is_numeric)
|
402 |
+
values = self._get_cython_vals(values)
|
403 |
+
out_dtype = self._get_out_dtype(values.dtype)
|
404 |
+
|
405 |
+
result = maybe_fill(np.empty(out_shape, dtype=out_dtype))
|
406 |
+
if self.kind == "aggregate":
|
407 |
+
counts = np.zeros(ngroups, dtype=np.int64)
|
408 |
+
if self.how in [
|
409 |
+
"idxmin",
|
410 |
+
"idxmax",
|
411 |
+
"min",
|
412 |
+
"max",
|
413 |
+
"mean",
|
414 |
+
"last",
|
415 |
+
"first",
|
416 |
+
"sum",
|
417 |
+
]:
|
418 |
+
func(
|
419 |
+
out=result,
|
420 |
+
counts=counts,
|
421 |
+
values=values,
|
422 |
+
labels=comp_ids,
|
423 |
+
min_count=min_count,
|
424 |
+
mask=mask,
|
425 |
+
result_mask=result_mask,
|
426 |
+
is_datetimelike=is_datetimelike,
|
427 |
+
**kwargs,
|
428 |
+
)
|
429 |
+
elif self.how in ["sem", "std", "var", "ohlc", "prod", "median"]:
|
430 |
+
if self.how in ["std", "sem"]:
|
431 |
+
kwargs["is_datetimelike"] = is_datetimelike
|
432 |
+
func(
|
433 |
+
result,
|
434 |
+
counts,
|
435 |
+
values,
|
436 |
+
comp_ids,
|
437 |
+
min_count=min_count,
|
438 |
+
mask=mask,
|
439 |
+
result_mask=result_mask,
|
440 |
+
**kwargs,
|
441 |
+
)
|
442 |
+
elif self.how in ["any", "all"]:
|
443 |
+
func(
|
444 |
+
out=result,
|
445 |
+
values=values,
|
446 |
+
labels=comp_ids,
|
447 |
+
mask=mask,
|
448 |
+
result_mask=result_mask,
|
449 |
+
**kwargs,
|
450 |
+
)
|
451 |
+
result = result.astype(bool, copy=False)
|
452 |
+
elif self.how in ["skew"]:
|
453 |
+
func(
|
454 |
+
out=result,
|
455 |
+
counts=counts,
|
456 |
+
values=values,
|
457 |
+
labels=comp_ids,
|
458 |
+
mask=mask,
|
459 |
+
result_mask=result_mask,
|
460 |
+
**kwargs,
|
461 |
+
)
|
462 |
+
if dtype == object:
|
463 |
+
result = result.astype(object)
|
464 |
+
|
465 |
+
else:
|
466 |
+
raise NotImplementedError(f"{self.how} is not implemented")
|
467 |
+
else:
|
468 |
+
# TODO: min_count
|
469 |
+
if self.how != "rank":
|
470 |
+
# TODO: should rank take result_mask?
|
471 |
+
kwargs["result_mask"] = result_mask
|
472 |
+
func(
|
473 |
+
out=result,
|
474 |
+
values=values,
|
475 |
+
labels=comp_ids,
|
476 |
+
ngroups=ngroups,
|
477 |
+
is_datetimelike=is_datetimelike,
|
478 |
+
mask=mask,
|
479 |
+
**kwargs,
|
480 |
+
)
|
481 |
+
|
482 |
+
if self.kind == "aggregate" and self.how not in ["idxmin", "idxmax"]:
|
483 |
+
# i.e. counts is defined. Locations where count<min_count
|
484 |
+
# need to have the result set to np.nan, which may require casting,
|
485 |
+
# see GH#40767. For idxmin/idxmax is handled specially via post-processing
|
486 |
+
if result.dtype.kind in "iu" and not is_datetimelike:
|
487 |
+
# if the op keeps the int dtypes, we have to use 0
|
488 |
+
cutoff = max(0 if self.how in ["sum", "prod"] else 1, min_count)
|
489 |
+
empty_groups = counts < cutoff
|
490 |
+
if empty_groups.any():
|
491 |
+
if result_mask is not None:
|
492 |
+
assert result_mask[empty_groups].all()
|
493 |
+
else:
|
494 |
+
# Note: this conversion could be lossy, see GH#40767
|
495 |
+
result = result.astype("float64")
|
496 |
+
result[empty_groups] = np.nan
|
497 |
+
|
498 |
+
result = result.T
|
499 |
+
|
500 |
+
if self.how not in self.cast_blocklist:
|
501 |
+
# e.g. if we are int64 and need to restore to datetime64/timedelta64
|
502 |
+
# "rank" is the only member of cast_blocklist we get here
|
503 |
+
# Casting only needed for float16, bool, datetimelike,
|
504 |
+
# and self.how in ["sum", "prod", "ohlc", "cumprod"]
|
505 |
+
res_dtype = self._get_result_dtype(orig_values.dtype)
|
506 |
+
op_result = maybe_downcast_to_dtype(result, res_dtype)
|
507 |
+
else:
|
508 |
+
op_result = result
|
509 |
+
|
510 |
+
return op_result
|
511 |
+
|
512 |
+
@final
|
513 |
+
def _validate_axis(self, axis: AxisInt, values: ArrayLike) -> None:
|
514 |
+
if values.ndim > 2:
|
515 |
+
raise NotImplementedError("number of dimensions is currently limited to 2")
|
516 |
+
if values.ndim == 2:
|
517 |
+
assert axis == 1, axis
|
518 |
+
elif not is_1d_only_ea_dtype(values.dtype):
|
519 |
+
# Note: it is *not* the case that axis is always 0 for 1-dim values,
|
520 |
+
# as we can have 1D ExtensionArrays that we need to treat as 2D
|
521 |
+
assert axis == 0
|
522 |
+
|
523 |
+
@final
|
524 |
+
def cython_operation(
|
525 |
+
self,
|
526 |
+
*,
|
527 |
+
values: ArrayLike,
|
528 |
+
axis: AxisInt,
|
529 |
+
min_count: int = -1,
|
530 |
+
comp_ids: np.ndarray,
|
531 |
+
ngroups: int,
|
532 |
+
**kwargs,
|
533 |
+
) -> ArrayLike:
|
534 |
+
"""
|
535 |
+
Call our cython function, with appropriate pre- and post- processing.
|
536 |
+
"""
|
537 |
+
self._validate_axis(axis, values)
|
538 |
+
|
539 |
+
if not isinstance(values, np.ndarray):
|
540 |
+
# i.e. ExtensionArray
|
541 |
+
return values._groupby_op(
|
542 |
+
how=self.how,
|
543 |
+
has_dropped_na=self.has_dropped_na,
|
544 |
+
min_count=min_count,
|
545 |
+
ngroups=ngroups,
|
546 |
+
ids=comp_ids,
|
547 |
+
**kwargs,
|
548 |
+
)
|
549 |
+
|
550 |
+
return self._cython_op_ndim_compat(
|
551 |
+
values,
|
552 |
+
min_count=min_count,
|
553 |
+
ngroups=ngroups,
|
554 |
+
comp_ids=comp_ids,
|
555 |
+
mask=None,
|
556 |
+
**kwargs,
|
557 |
+
)
|
558 |
+
|
559 |
+
|
560 |
+
class BaseGrouper:
|
561 |
+
"""
|
562 |
+
This is an internal Grouper class, which actually holds
|
563 |
+
the generated groups
|
564 |
+
|
565 |
+
Parameters
|
566 |
+
----------
|
567 |
+
axis : Index
|
568 |
+
groupings : Sequence[Grouping]
|
569 |
+
all the grouping instances to handle in this grouper
|
570 |
+
for example for grouper list to groupby, need to pass the list
|
571 |
+
sort : bool, default True
|
572 |
+
whether this grouper will give sorted result or not
|
573 |
+
|
574 |
+
"""
|
575 |
+
|
576 |
+
axis: Index
|
577 |
+
|
578 |
+
def __init__(
|
579 |
+
self,
|
580 |
+
axis: Index,
|
581 |
+
groupings: Sequence[grouper.Grouping],
|
582 |
+
sort: bool = True,
|
583 |
+
dropna: bool = True,
|
584 |
+
) -> None:
|
585 |
+
assert isinstance(axis, Index), axis
|
586 |
+
|
587 |
+
self.axis = axis
|
588 |
+
self._groupings: list[grouper.Grouping] = list(groupings)
|
589 |
+
self._sort = sort
|
590 |
+
self.dropna = dropna
|
591 |
+
|
592 |
+
@property
|
593 |
+
def groupings(self) -> list[grouper.Grouping]:
|
594 |
+
return self._groupings
|
595 |
+
|
596 |
+
@property
|
597 |
+
def shape(self) -> Shape:
|
598 |
+
return tuple(ping.ngroups for ping in self.groupings)
|
599 |
+
|
600 |
+
def __iter__(self) -> Iterator[Hashable]:
|
601 |
+
return iter(self.indices)
|
602 |
+
|
603 |
+
@property
|
604 |
+
def nkeys(self) -> int:
|
605 |
+
return len(self.groupings)
|
606 |
+
|
607 |
+
def get_iterator(
|
608 |
+
self, data: NDFrameT, axis: AxisInt = 0
|
609 |
+
) -> Iterator[tuple[Hashable, NDFrameT]]:
|
610 |
+
"""
|
611 |
+
Groupby iterator
|
612 |
+
|
613 |
+
Returns
|
614 |
+
-------
|
615 |
+
Generator yielding sequence of (name, subsetted object)
|
616 |
+
for each group
|
617 |
+
"""
|
618 |
+
splitter = self._get_splitter(data, axis=axis)
|
619 |
+
keys = self.group_keys_seq
|
620 |
+
yield from zip(keys, splitter)
|
621 |
+
|
622 |
+
@final
|
623 |
+
def _get_splitter(self, data: NDFrame, axis: AxisInt = 0) -> DataSplitter:
|
624 |
+
"""
|
625 |
+
Returns
|
626 |
+
-------
|
627 |
+
Generator yielding subsetted objects
|
628 |
+
"""
|
629 |
+
ids, _, ngroups = self.group_info
|
630 |
+
return _get_splitter(
|
631 |
+
data,
|
632 |
+
ids,
|
633 |
+
ngroups,
|
634 |
+
sorted_ids=self._sorted_ids,
|
635 |
+
sort_idx=self._sort_idx,
|
636 |
+
axis=axis,
|
637 |
+
)
|
638 |
+
|
639 |
+
@final
|
640 |
+
@cache_readonly
|
641 |
+
def group_keys_seq(self):
|
642 |
+
if len(self.groupings) == 1:
|
643 |
+
return self.levels[0]
|
644 |
+
else:
|
645 |
+
ids, _, ngroups = self.group_info
|
646 |
+
|
647 |
+
# provide "flattened" iterator for multi-group setting
|
648 |
+
return get_flattened_list(ids, ngroups, self.levels, self.codes)
|
649 |
+
|
650 |
+
@cache_readonly
|
651 |
+
def indices(self) -> dict[Hashable, npt.NDArray[np.intp]]:
|
652 |
+
"""dict {group name -> group indices}"""
|
653 |
+
if len(self.groupings) == 1 and isinstance(self.result_index, CategoricalIndex):
|
654 |
+
# This shows unused categories in indices GH#38642
|
655 |
+
return self.groupings[0].indices
|
656 |
+
codes_list = [ping.codes for ping in self.groupings]
|
657 |
+
keys = [ping._group_index for ping in self.groupings]
|
658 |
+
return get_indexer_dict(codes_list, keys)
|
659 |
+
|
660 |
+
@final
|
661 |
+
def result_ilocs(self) -> npt.NDArray[np.intp]:
|
662 |
+
"""
|
663 |
+
Get the original integer locations of result_index in the input.
|
664 |
+
"""
|
665 |
+
# Original indices are where group_index would go via sorting.
|
666 |
+
# But when dropna is true, we need to remove null values while accounting for
|
667 |
+
# any gaps that then occur because of them.
|
668 |
+
group_index = get_group_index(
|
669 |
+
self.codes, self.shape, sort=self._sort, xnull=True
|
670 |
+
)
|
671 |
+
group_index, _ = compress_group_index(group_index, sort=self._sort)
|
672 |
+
|
673 |
+
if self.has_dropped_na:
|
674 |
+
mask = np.where(group_index >= 0)
|
675 |
+
# Count how many gaps are caused by previous null values for each position
|
676 |
+
null_gaps = np.cumsum(group_index == -1)[mask]
|
677 |
+
group_index = group_index[mask]
|
678 |
+
|
679 |
+
result = get_group_index_sorter(group_index, self.ngroups)
|
680 |
+
|
681 |
+
if self.has_dropped_na:
|
682 |
+
# Shift by the number of prior null gaps
|
683 |
+
result += np.take(null_gaps, result)
|
684 |
+
|
685 |
+
return result
|
686 |
+
|
687 |
+
@final
|
688 |
+
@property
|
689 |
+
def codes(self) -> list[npt.NDArray[np.signedinteger]]:
|
690 |
+
return [ping.codes for ping in self.groupings]
|
691 |
+
|
692 |
+
@property
|
693 |
+
def levels(self) -> list[Index]:
|
694 |
+
return [ping._group_index for ping in self.groupings]
|
695 |
+
|
696 |
+
@property
|
697 |
+
def names(self) -> list[Hashable]:
|
698 |
+
return [ping.name for ping in self.groupings]
|
699 |
+
|
700 |
+
@final
|
701 |
+
def size(self) -> Series:
|
702 |
+
"""
|
703 |
+
Compute group sizes.
|
704 |
+
"""
|
705 |
+
ids, _, ngroups = self.group_info
|
706 |
+
out: np.ndarray | list
|
707 |
+
if ngroups:
|
708 |
+
out = np.bincount(ids[ids != -1], minlength=ngroups)
|
709 |
+
else:
|
710 |
+
out = []
|
711 |
+
return Series(out, index=self.result_index, dtype="int64", copy=False)
|
712 |
+
|
713 |
+
@cache_readonly
|
714 |
+
def groups(self) -> dict[Hashable, np.ndarray]:
|
715 |
+
"""dict {group name -> group labels}"""
|
716 |
+
if len(self.groupings) == 1:
|
717 |
+
return self.groupings[0].groups
|
718 |
+
else:
|
719 |
+
to_groupby = []
|
720 |
+
for ping in self.groupings:
|
721 |
+
gv = ping.grouping_vector
|
722 |
+
if not isinstance(gv, BaseGrouper):
|
723 |
+
to_groupby.append(gv)
|
724 |
+
else:
|
725 |
+
to_groupby.append(gv.groupings[0].grouping_vector)
|
726 |
+
index = MultiIndex.from_arrays(to_groupby)
|
727 |
+
return self.axis.groupby(index)
|
728 |
+
|
729 |
+
@final
|
730 |
+
@cache_readonly
|
731 |
+
def is_monotonic(self) -> bool:
|
732 |
+
# return if my group orderings are monotonic
|
733 |
+
return Index(self.group_info[0]).is_monotonic_increasing
|
734 |
+
|
735 |
+
@final
|
736 |
+
@cache_readonly
|
737 |
+
def has_dropped_na(self) -> bool:
|
738 |
+
"""
|
739 |
+
Whether grouper has null value(s) that are dropped.
|
740 |
+
"""
|
741 |
+
return bool((self.group_info[0] < 0).any())
|
742 |
+
|
743 |
+
@cache_readonly
|
744 |
+
def group_info(self) -> tuple[npt.NDArray[np.intp], npt.NDArray[np.intp], int]:
|
745 |
+
comp_ids, obs_group_ids = self._get_compressed_codes()
|
746 |
+
|
747 |
+
ngroups = len(obs_group_ids)
|
748 |
+
comp_ids = ensure_platform_int(comp_ids)
|
749 |
+
|
750 |
+
return comp_ids, obs_group_ids, ngroups
|
751 |
+
|
752 |
+
@cache_readonly
|
753 |
+
def codes_info(self) -> npt.NDArray[np.intp]:
|
754 |
+
# return the codes of items in original grouped axis
|
755 |
+
ids, _, _ = self.group_info
|
756 |
+
return ids
|
757 |
+
|
758 |
+
@final
|
759 |
+
def _get_compressed_codes(
|
760 |
+
self,
|
761 |
+
) -> tuple[npt.NDArray[np.signedinteger], npt.NDArray[np.intp]]:
|
762 |
+
# The first returned ndarray may have any signed integer dtype
|
763 |
+
if len(self.groupings) > 1:
|
764 |
+
group_index = get_group_index(self.codes, self.shape, sort=True, xnull=True)
|
765 |
+
return compress_group_index(group_index, sort=self._sort)
|
766 |
+
# FIXME: compress_group_index's second return value is int64, not intp
|
767 |
+
|
768 |
+
ping = self.groupings[0]
|
769 |
+
return ping.codes, np.arange(len(ping._group_index), dtype=np.intp)
|
770 |
+
|
771 |
+
@final
|
772 |
+
@cache_readonly
|
773 |
+
def ngroups(self) -> int:
|
774 |
+
return len(self.result_index)
|
775 |
+
|
776 |
+
@property
|
777 |
+
def reconstructed_codes(self) -> list[npt.NDArray[np.intp]]:
|
778 |
+
codes = self.codes
|
779 |
+
ids, obs_ids, _ = self.group_info
|
780 |
+
return decons_obs_group_ids(ids, obs_ids, self.shape, codes, xnull=True)
|
781 |
+
|
782 |
+
@cache_readonly
|
783 |
+
def result_index(self) -> Index:
|
784 |
+
if len(self.groupings) == 1:
|
785 |
+
return self.groupings[0]._result_index.rename(self.names[0])
|
786 |
+
|
787 |
+
codes = self.reconstructed_codes
|
788 |
+
levels = [ping._result_index for ping in self.groupings]
|
789 |
+
return MultiIndex(
|
790 |
+
levels=levels, codes=codes, verify_integrity=False, names=self.names
|
791 |
+
)
|
792 |
+
|
793 |
+
@final
|
794 |
+
def get_group_levels(self) -> list[ArrayLike]:
|
795 |
+
# Note: only called from _insert_inaxis_grouper, which
|
796 |
+
# is only called for BaseGrouper, never for BinGrouper
|
797 |
+
if len(self.groupings) == 1:
|
798 |
+
return [self.groupings[0]._group_arraylike]
|
799 |
+
|
800 |
+
name_list = []
|
801 |
+
for ping, codes in zip(self.groupings, self.reconstructed_codes):
|
802 |
+
codes = ensure_platform_int(codes)
|
803 |
+
levels = ping._group_arraylike.take(codes)
|
804 |
+
|
805 |
+
name_list.append(levels)
|
806 |
+
|
807 |
+
return name_list
|
808 |
+
|
809 |
+
# ------------------------------------------------------------
|
810 |
+
# Aggregation functions
|
811 |
+
|
812 |
+
@final
|
813 |
+
def _cython_operation(
|
814 |
+
self,
|
815 |
+
kind: str,
|
816 |
+
values,
|
817 |
+
how: str,
|
818 |
+
axis: AxisInt,
|
819 |
+
min_count: int = -1,
|
820 |
+
**kwargs,
|
821 |
+
) -> ArrayLike:
|
822 |
+
"""
|
823 |
+
Returns the values of a cython operation.
|
824 |
+
"""
|
825 |
+
assert kind in ["transform", "aggregate"]
|
826 |
+
|
827 |
+
cy_op = WrappedCythonOp(kind=kind, how=how, has_dropped_na=self.has_dropped_na)
|
828 |
+
|
829 |
+
ids, _, _ = self.group_info
|
830 |
+
ngroups = self.ngroups
|
831 |
+
return cy_op.cython_operation(
|
832 |
+
values=values,
|
833 |
+
axis=axis,
|
834 |
+
min_count=min_count,
|
835 |
+
comp_ids=ids,
|
836 |
+
ngroups=ngroups,
|
837 |
+
**kwargs,
|
838 |
+
)
|
839 |
+
|
840 |
+
@final
|
841 |
+
def agg_series(
|
842 |
+
self, obj: Series, func: Callable, preserve_dtype: bool = False
|
843 |
+
) -> ArrayLike:
|
844 |
+
"""
|
845 |
+
Parameters
|
846 |
+
----------
|
847 |
+
obj : Series
|
848 |
+
func : function taking a Series and returning a scalar-like
|
849 |
+
preserve_dtype : bool
|
850 |
+
Whether the aggregation is known to be dtype-preserving.
|
851 |
+
|
852 |
+
Returns
|
853 |
+
-------
|
854 |
+
np.ndarray or ExtensionArray
|
855 |
+
"""
|
856 |
+
|
857 |
+
if not isinstance(obj._values, np.ndarray):
|
858 |
+
# we can preserve a little bit more aggressively with EA dtype
|
859 |
+
# because maybe_cast_pointwise_result will do a try/except
|
860 |
+
# with _from_sequence. NB we are assuming here that _from_sequence
|
861 |
+
# is sufficiently strict that it casts appropriately.
|
862 |
+
preserve_dtype = True
|
863 |
+
|
864 |
+
result = self._aggregate_series_pure_python(obj, func)
|
865 |
+
|
866 |
+
npvalues = lib.maybe_convert_objects(result, try_float=False)
|
867 |
+
if preserve_dtype:
|
868 |
+
out = maybe_cast_pointwise_result(npvalues, obj.dtype, numeric_only=True)
|
869 |
+
else:
|
870 |
+
out = npvalues
|
871 |
+
return out
|
872 |
+
|
873 |
+
@final
|
874 |
+
def _aggregate_series_pure_python(
|
875 |
+
self, obj: Series, func: Callable
|
876 |
+
) -> npt.NDArray[np.object_]:
|
877 |
+
_, _, ngroups = self.group_info
|
878 |
+
|
879 |
+
result = np.empty(ngroups, dtype="O")
|
880 |
+
initialized = False
|
881 |
+
|
882 |
+
splitter = self._get_splitter(obj, axis=0)
|
883 |
+
|
884 |
+
for i, group in enumerate(splitter):
|
885 |
+
res = func(group)
|
886 |
+
res = extract_result(res)
|
887 |
+
|
888 |
+
if not initialized:
|
889 |
+
# We only do this validation on the first iteration
|
890 |
+
check_result_array(res, group.dtype)
|
891 |
+
initialized = True
|
892 |
+
|
893 |
+
result[i] = res
|
894 |
+
|
895 |
+
return result
|
896 |
+
|
897 |
+
@final
|
898 |
+
def apply_groupwise(
|
899 |
+
self, f: Callable, data: DataFrame | Series, axis: AxisInt = 0
|
900 |
+
) -> tuple[list, bool]:
|
901 |
+
mutated = False
|
902 |
+
splitter = self._get_splitter(data, axis=axis)
|
903 |
+
group_keys = self.group_keys_seq
|
904 |
+
result_values = []
|
905 |
+
|
906 |
+
# This calls DataSplitter.__iter__
|
907 |
+
zipped = zip(group_keys, splitter)
|
908 |
+
|
909 |
+
for key, group in zipped:
|
910 |
+
# Pinning name is needed for
|
911 |
+
# test_group_apply_once_per_group,
|
912 |
+
# test_inconsistent_return_type, test_set_group_name,
|
913 |
+
# test_group_name_available_in_inference_pass,
|
914 |
+
# test_groupby_multi_timezone
|
915 |
+
object.__setattr__(group, "name", key)
|
916 |
+
|
917 |
+
# group might be modified
|
918 |
+
group_axes = group.axes
|
919 |
+
res = f(group)
|
920 |
+
if not mutated and not _is_indexed_like(res, group_axes, axis):
|
921 |
+
mutated = True
|
922 |
+
result_values.append(res)
|
923 |
+
# getattr pattern for __name__ is needed for functools.partial objects
|
924 |
+
if len(group_keys) == 0 and getattr(f, "__name__", None) in [
|
925 |
+
"skew",
|
926 |
+
"sum",
|
927 |
+
"prod",
|
928 |
+
]:
|
929 |
+
# If group_keys is empty, then no function calls have been made,
|
930 |
+
# so we will not have raised even if this is an invalid dtype.
|
931 |
+
# So do one dummy call here to raise appropriate TypeError.
|
932 |
+
f(data.iloc[:0])
|
933 |
+
|
934 |
+
return result_values, mutated
|
935 |
+
|
936 |
+
# ------------------------------------------------------------
|
937 |
+
# Methods for sorting subsets of our GroupBy's object
|
938 |
+
|
939 |
+
@final
|
940 |
+
@cache_readonly
|
941 |
+
def _sort_idx(self) -> npt.NDArray[np.intp]:
|
942 |
+
# Counting sort indexer
|
943 |
+
ids, _, ngroups = self.group_info
|
944 |
+
return get_group_index_sorter(ids, ngroups)
|
945 |
+
|
946 |
+
@final
|
947 |
+
@cache_readonly
|
948 |
+
def _sorted_ids(self) -> npt.NDArray[np.intp]:
|
949 |
+
ids, _, _ = self.group_info
|
950 |
+
return ids.take(self._sort_idx)
|
951 |
+
|
952 |
+
|
953 |
+
class BinGrouper(BaseGrouper):
|
954 |
+
"""
|
955 |
+
This is an internal Grouper class
|
956 |
+
|
957 |
+
Parameters
|
958 |
+
----------
|
959 |
+
bins : the split index of binlabels to group the item of axis
|
960 |
+
binlabels : the label list
|
961 |
+
indexer : np.ndarray[np.intp], optional
|
962 |
+
the indexer created by Grouper
|
963 |
+
some groupers (TimeGrouper) will sort its axis and its
|
964 |
+
group_info is also sorted, so need the indexer to reorder
|
965 |
+
|
966 |
+
Examples
|
967 |
+
--------
|
968 |
+
bins: [2, 4, 6, 8, 10]
|
969 |
+
binlabels: DatetimeIndex(['2005-01-01', '2005-01-03',
|
970 |
+
'2005-01-05', '2005-01-07', '2005-01-09'],
|
971 |
+
dtype='datetime64[ns]', freq='2D')
|
972 |
+
|
973 |
+
the group_info, which contains the label of each item in grouped
|
974 |
+
axis, the index of label in label list, group number, is
|
975 |
+
|
976 |
+
(array([0, 0, 1, 1, 2, 2, 3, 3, 4, 4]), array([0, 1, 2, 3, 4]), 5)
|
977 |
+
|
978 |
+
means that, the grouped axis has 10 items, can be grouped into 5
|
979 |
+
labels, the first and second items belong to the first label, the
|
980 |
+
third and forth items belong to the second label, and so on
|
981 |
+
|
982 |
+
"""
|
983 |
+
|
984 |
+
bins: npt.NDArray[np.int64]
|
985 |
+
binlabels: Index
|
986 |
+
|
987 |
+
def __init__(
|
988 |
+
self,
|
989 |
+
bins,
|
990 |
+
binlabels,
|
991 |
+
indexer=None,
|
992 |
+
) -> None:
|
993 |
+
self.bins = ensure_int64(bins)
|
994 |
+
self.binlabels = ensure_index(binlabels)
|
995 |
+
self.indexer = indexer
|
996 |
+
|
997 |
+
# These lengths must match, otherwise we could call agg_series
|
998 |
+
# with empty self.bins, which would raise later.
|
999 |
+
assert len(self.binlabels) == len(self.bins)
|
1000 |
+
|
1001 |
+
@cache_readonly
|
1002 |
+
def groups(self):
|
1003 |
+
"""dict {group name -> group labels}"""
|
1004 |
+
# this is mainly for compat
|
1005 |
+
# GH 3881
|
1006 |
+
result = {
|
1007 |
+
key: value
|
1008 |
+
for key, value in zip(self.binlabels, self.bins)
|
1009 |
+
if key is not NaT
|
1010 |
+
}
|
1011 |
+
return result
|
1012 |
+
|
1013 |
+
@property
|
1014 |
+
def nkeys(self) -> int:
|
1015 |
+
# still matches len(self.groupings), but we can hard-code
|
1016 |
+
return 1
|
1017 |
+
|
1018 |
+
@cache_readonly
|
1019 |
+
def codes_info(self) -> npt.NDArray[np.intp]:
|
1020 |
+
# return the codes of items in original grouped axis
|
1021 |
+
ids, _, _ = self.group_info
|
1022 |
+
if self.indexer is not None:
|
1023 |
+
sorter = np.lexsort((ids, self.indexer))
|
1024 |
+
ids = ids[sorter]
|
1025 |
+
return ids
|
1026 |
+
|
1027 |
+
def get_iterator(self, data: NDFrame, axis: AxisInt = 0):
|
1028 |
+
"""
|
1029 |
+
Groupby iterator
|
1030 |
+
|
1031 |
+
Returns
|
1032 |
+
-------
|
1033 |
+
Generator yielding sequence of (name, subsetted object)
|
1034 |
+
for each group
|
1035 |
+
"""
|
1036 |
+
if axis == 0:
|
1037 |
+
slicer = lambda start, edge: data.iloc[start:edge]
|
1038 |
+
else:
|
1039 |
+
slicer = lambda start, edge: data.iloc[:, start:edge]
|
1040 |
+
|
1041 |
+
length = len(data.axes[axis])
|
1042 |
+
|
1043 |
+
start = 0
|
1044 |
+
for edge, label in zip(self.bins, self.binlabels):
|
1045 |
+
if label is not NaT:
|
1046 |
+
yield label, slicer(start, edge)
|
1047 |
+
start = edge
|
1048 |
+
|
1049 |
+
if start < length:
|
1050 |
+
yield self.binlabels[-1], slicer(start, None)
|
1051 |
+
|
1052 |
+
@cache_readonly
|
1053 |
+
def indices(self):
|
1054 |
+
indices = collections.defaultdict(list)
|
1055 |
+
|
1056 |
+
i = 0
|
1057 |
+
for label, bin in zip(self.binlabels, self.bins):
|
1058 |
+
if i < bin:
|
1059 |
+
if label is not NaT:
|
1060 |
+
indices[label] = list(range(i, bin))
|
1061 |
+
i = bin
|
1062 |
+
return indices
|
1063 |
+
|
1064 |
+
@cache_readonly
|
1065 |
+
def group_info(self) -> tuple[npt.NDArray[np.intp], npt.NDArray[np.intp], int]:
|
1066 |
+
ngroups = self.ngroups
|
1067 |
+
obs_group_ids = np.arange(ngroups, dtype=np.intp)
|
1068 |
+
rep = np.diff(np.r_[0, self.bins])
|
1069 |
+
|
1070 |
+
rep = ensure_platform_int(rep)
|
1071 |
+
if ngroups == len(self.bins):
|
1072 |
+
comp_ids = np.repeat(np.arange(ngroups), rep)
|
1073 |
+
else:
|
1074 |
+
comp_ids = np.repeat(np.r_[-1, np.arange(ngroups)], rep)
|
1075 |
+
|
1076 |
+
return (
|
1077 |
+
ensure_platform_int(comp_ids),
|
1078 |
+
obs_group_ids,
|
1079 |
+
ngroups,
|
1080 |
+
)
|
1081 |
+
|
1082 |
+
@cache_readonly
|
1083 |
+
def reconstructed_codes(self) -> list[np.ndarray]:
|
1084 |
+
# get unique result indices, and prepend 0 as groupby starts from the first
|
1085 |
+
return [np.r_[0, np.flatnonzero(self.bins[1:] != self.bins[:-1]) + 1]]
|
1086 |
+
|
1087 |
+
@cache_readonly
|
1088 |
+
def result_index(self) -> Index:
|
1089 |
+
if len(self.binlabels) != 0 and isna(self.binlabels[0]):
|
1090 |
+
return self.binlabels[1:]
|
1091 |
+
|
1092 |
+
return self.binlabels
|
1093 |
+
|
1094 |
+
@property
|
1095 |
+
def levels(self) -> list[Index]:
|
1096 |
+
return [self.binlabels]
|
1097 |
+
|
1098 |
+
@property
|
1099 |
+
def names(self) -> list[Hashable]:
|
1100 |
+
return [self.binlabels.name]
|
1101 |
+
|
1102 |
+
@property
|
1103 |
+
def groupings(self) -> list[grouper.Grouping]:
|
1104 |
+
lev = self.binlabels
|
1105 |
+
codes = self.group_info[0]
|
1106 |
+
labels = lev.take(codes)
|
1107 |
+
ping = grouper.Grouping(
|
1108 |
+
labels, labels, in_axis=False, level=None, uniques=lev._values
|
1109 |
+
)
|
1110 |
+
return [ping]
|
1111 |
+
|
1112 |
+
|
1113 |
+
def _is_indexed_like(obj, axes, axis: AxisInt) -> bool:
|
1114 |
+
if isinstance(obj, Series):
|
1115 |
+
if len(axes) > 1:
|
1116 |
+
return False
|
1117 |
+
return obj.axes[axis].equals(axes[axis])
|
1118 |
+
elif isinstance(obj, DataFrame):
|
1119 |
+
return obj.axes[axis].equals(axes[axis])
|
1120 |
+
|
1121 |
+
return False
|
1122 |
+
|
1123 |
+
|
1124 |
+
# ----------------------------------------------------------------------
|
1125 |
+
# Splitting / application
|
1126 |
+
|
1127 |
+
|
1128 |
+
class DataSplitter(Generic[NDFrameT]):
|
1129 |
+
def __init__(
|
1130 |
+
self,
|
1131 |
+
data: NDFrameT,
|
1132 |
+
labels: npt.NDArray[np.intp],
|
1133 |
+
ngroups: int,
|
1134 |
+
*,
|
1135 |
+
sort_idx: npt.NDArray[np.intp],
|
1136 |
+
sorted_ids: npt.NDArray[np.intp],
|
1137 |
+
axis: AxisInt = 0,
|
1138 |
+
) -> None:
|
1139 |
+
self.data = data
|
1140 |
+
self.labels = ensure_platform_int(labels) # _should_ already be np.intp
|
1141 |
+
self.ngroups = ngroups
|
1142 |
+
|
1143 |
+
self._slabels = sorted_ids
|
1144 |
+
self._sort_idx = sort_idx
|
1145 |
+
|
1146 |
+
self.axis = axis
|
1147 |
+
assert isinstance(axis, int), axis
|
1148 |
+
|
1149 |
+
def __iter__(self) -> Iterator:
|
1150 |
+
sdata = self._sorted_data
|
1151 |
+
|
1152 |
+
if self.ngroups == 0:
|
1153 |
+
# we are inside a generator, rather than raise StopIteration
|
1154 |
+
# we merely return signal the end
|
1155 |
+
return
|
1156 |
+
|
1157 |
+
starts, ends = lib.generate_slices(self._slabels, self.ngroups)
|
1158 |
+
|
1159 |
+
for start, end in zip(starts, ends):
|
1160 |
+
yield self._chop(sdata, slice(start, end))
|
1161 |
+
|
1162 |
+
@cache_readonly
|
1163 |
+
def _sorted_data(self) -> NDFrameT:
|
1164 |
+
return self.data.take(self._sort_idx, axis=self.axis)
|
1165 |
+
|
1166 |
+
def _chop(self, sdata, slice_obj: slice) -> NDFrame:
|
1167 |
+
raise AbstractMethodError(self)
|
1168 |
+
|
1169 |
+
|
1170 |
+
class SeriesSplitter(DataSplitter):
|
1171 |
+
def _chop(self, sdata: Series, slice_obj: slice) -> Series:
|
1172 |
+
# fastpath equivalent to `sdata.iloc[slice_obj]`
|
1173 |
+
mgr = sdata._mgr.get_slice(slice_obj)
|
1174 |
+
ser = sdata._constructor_from_mgr(mgr, axes=mgr.axes)
|
1175 |
+
ser._name = sdata.name
|
1176 |
+
return ser.__finalize__(sdata, method="groupby")
|
1177 |
+
|
1178 |
+
|
1179 |
+
class FrameSplitter(DataSplitter):
|
1180 |
+
def _chop(self, sdata: DataFrame, slice_obj: slice) -> DataFrame:
|
1181 |
+
# Fastpath equivalent to:
|
1182 |
+
# if self.axis == 0:
|
1183 |
+
# return sdata.iloc[slice_obj]
|
1184 |
+
# else:
|
1185 |
+
# return sdata.iloc[:, slice_obj]
|
1186 |
+
mgr = sdata._mgr.get_slice(slice_obj, axis=1 - self.axis)
|
1187 |
+
df = sdata._constructor_from_mgr(mgr, axes=mgr.axes)
|
1188 |
+
return df.__finalize__(sdata, method="groupby")
|
1189 |
+
|
1190 |
+
|
1191 |
+
def _get_splitter(
|
1192 |
+
data: NDFrame,
|
1193 |
+
labels: npt.NDArray[np.intp],
|
1194 |
+
ngroups: int,
|
1195 |
+
*,
|
1196 |
+
sort_idx: npt.NDArray[np.intp],
|
1197 |
+
sorted_ids: npt.NDArray[np.intp],
|
1198 |
+
axis: AxisInt = 0,
|
1199 |
+
) -> DataSplitter:
|
1200 |
+
if isinstance(data, Series):
|
1201 |
+
klass: type[DataSplitter] = SeriesSplitter
|
1202 |
+
else:
|
1203 |
+
# i.e. DataFrame
|
1204 |
+
klass = FrameSplitter
|
1205 |
+
|
1206 |
+
return klass(
|
1207 |
+
data, labels, ngroups, sort_idx=sort_idx, sorted_ids=sorted_ids, axis=axis
|
1208 |
+
)
|
llmeval-env/lib/python3.10/site-packages/pandas/core/strings/__init__.py
ADDED
@@ -0,0 +1,28 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
"""
|
2 |
+
Implementation of pandas.Series.str and its interface.
|
3 |
+
|
4 |
+
* strings.accessor.StringMethods : Accessor for Series.str
|
5 |
+
* strings.base.BaseStringArrayMethods: Mixin ABC for EAs to implement str methods
|
6 |
+
|
7 |
+
Most methods on the StringMethods accessor follow the pattern:
|
8 |
+
|
9 |
+
1. extract the array from the series (or index)
|
10 |
+
2. Call that array's implementation of the string method
|
11 |
+
3. Wrap the result (in a Series, index, or DataFrame)
|
12 |
+
|
13 |
+
Pandas extension arrays implementing string methods should inherit from
|
14 |
+
pandas.core.strings.base.BaseStringArrayMethods. This is an ABC defining
|
15 |
+
the various string methods. To avoid namespace clashes and pollution,
|
16 |
+
these are prefixed with `_str_`. So ``Series.str.upper()`` calls
|
17 |
+
``Series.array._str_upper()``. The interface isn't currently public
|
18 |
+
to other string extension arrays.
|
19 |
+
"""
|
20 |
+
# Pandas current implementation is in ObjectStringArrayMixin. This is designed
|
21 |
+
# to work on object-dtype ndarrays.
|
22 |
+
#
|
23 |
+
# BaseStringArrayMethods
|
24 |
+
# - ObjectStringArrayMixin
|
25 |
+
# - StringArray
|
26 |
+
# - NumpyExtensionArray
|
27 |
+
# - Categorical
|
28 |
+
# - ArrowStringArray
|
llmeval-env/lib/python3.10/site-packages/pandas/core/strings/__pycache__/__init__.cpython-310.pyc
ADDED
Binary file (1.02 kB). View file
|
|
llmeval-env/lib/python3.10/site-packages/pandas/core/strings/__pycache__/accessor.cpython-310.pyc
ADDED
Binary file (99.7 kB). View file
|
|
llmeval-env/lib/python3.10/site-packages/pandas/core/strings/__pycache__/base.cpython-310.pyc
ADDED
Binary file (9.75 kB). View file
|
|
llmeval-env/lib/python3.10/site-packages/pandas/core/strings/__pycache__/object_array.cpython-310.pyc
ADDED
Binary file (21.3 kB). View file
|
|
llmeval-env/lib/python3.10/site-packages/pandas/core/strings/accessor.py
ADDED
The diff for this file is too large to render.
See raw diff
|
|
llmeval-env/lib/python3.10/site-packages/pandas/core/strings/base.py
ADDED
@@ -0,0 +1,262 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from __future__ import annotations
|
2 |
+
|
3 |
+
import abc
|
4 |
+
from typing import (
|
5 |
+
TYPE_CHECKING,
|
6 |
+
Callable,
|
7 |
+
Literal,
|
8 |
+
)
|
9 |
+
|
10 |
+
import numpy as np
|
11 |
+
|
12 |
+
if TYPE_CHECKING:
|
13 |
+
from collections.abc import Sequence
|
14 |
+
import re
|
15 |
+
|
16 |
+
from pandas._typing import Scalar
|
17 |
+
|
18 |
+
from pandas import Series
|
19 |
+
|
20 |
+
|
21 |
+
class BaseStringArrayMethods(abc.ABC):
|
22 |
+
"""
|
23 |
+
Base class for extension arrays implementing string methods.
|
24 |
+
|
25 |
+
This is where our ExtensionArrays can override the implementation of
|
26 |
+
Series.str.<method>. We don't expect this to work with
|
27 |
+
3rd-party extension arrays.
|
28 |
+
|
29 |
+
* User calls Series.str.<method>
|
30 |
+
* pandas extracts the extension array from the Series
|
31 |
+
* pandas calls ``extension_array._str_<method>(*args, **kwargs)``
|
32 |
+
* pandas wraps the result, to return to the user.
|
33 |
+
|
34 |
+
See :ref:`Series.str` for the docstring of each method.
|
35 |
+
"""
|
36 |
+
|
37 |
+
def _str_getitem(self, key):
|
38 |
+
if isinstance(key, slice):
|
39 |
+
return self._str_slice(start=key.start, stop=key.stop, step=key.step)
|
40 |
+
else:
|
41 |
+
return self._str_get(key)
|
42 |
+
|
43 |
+
@abc.abstractmethod
|
44 |
+
def _str_count(self, pat, flags: int = 0):
|
45 |
+
pass
|
46 |
+
|
47 |
+
@abc.abstractmethod
|
48 |
+
def _str_pad(
|
49 |
+
self,
|
50 |
+
width: int,
|
51 |
+
side: Literal["left", "right", "both"] = "left",
|
52 |
+
fillchar: str = " ",
|
53 |
+
):
|
54 |
+
pass
|
55 |
+
|
56 |
+
@abc.abstractmethod
|
57 |
+
def _str_contains(
|
58 |
+
self, pat, case: bool = True, flags: int = 0, na=None, regex: bool = True
|
59 |
+
):
|
60 |
+
pass
|
61 |
+
|
62 |
+
@abc.abstractmethod
|
63 |
+
def _str_startswith(self, pat, na=None):
|
64 |
+
pass
|
65 |
+
|
66 |
+
@abc.abstractmethod
|
67 |
+
def _str_endswith(self, pat, na=None):
|
68 |
+
pass
|
69 |
+
|
70 |
+
@abc.abstractmethod
|
71 |
+
def _str_replace(
|
72 |
+
self,
|
73 |
+
pat: str | re.Pattern,
|
74 |
+
repl: str | Callable,
|
75 |
+
n: int = -1,
|
76 |
+
case: bool = True,
|
77 |
+
flags: int = 0,
|
78 |
+
regex: bool = True,
|
79 |
+
):
|
80 |
+
pass
|
81 |
+
|
82 |
+
@abc.abstractmethod
|
83 |
+
def _str_repeat(self, repeats: int | Sequence[int]):
|
84 |
+
pass
|
85 |
+
|
86 |
+
@abc.abstractmethod
|
87 |
+
def _str_match(
|
88 |
+
self, pat: str, case: bool = True, flags: int = 0, na: Scalar = np.nan
|
89 |
+
):
|
90 |
+
pass
|
91 |
+
|
92 |
+
@abc.abstractmethod
|
93 |
+
def _str_fullmatch(
|
94 |
+
self,
|
95 |
+
pat: str | re.Pattern,
|
96 |
+
case: bool = True,
|
97 |
+
flags: int = 0,
|
98 |
+
na: Scalar = np.nan,
|
99 |
+
):
|
100 |
+
pass
|
101 |
+
|
102 |
+
@abc.abstractmethod
|
103 |
+
def _str_encode(self, encoding, errors: str = "strict"):
|
104 |
+
pass
|
105 |
+
|
106 |
+
@abc.abstractmethod
|
107 |
+
def _str_find(self, sub, start: int = 0, end=None):
|
108 |
+
pass
|
109 |
+
|
110 |
+
@abc.abstractmethod
|
111 |
+
def _str_rfind(self, sub, start: int = 0, end=None):
|
112 |
+
pass
|
113 |
+
|
114 |
+
@abc.abstractmethod
|
115 |
+
def _str_findall(self, pat, flags: int = 0):
|
116 |
+
pass
|
117 |
+
|
118 |
+
@abc.abstractmethod
|
119 |
+
def _str_get(self, i):
|
120 |
+
pass
|
121 |
+
|
122 |
+
@abc.abstractmethod
|
123 |
+
def _str_index(self, sub, start: int = 0, end=None):
|
124 |
+
pass
|
125 |
+
|
126 |
+
@abc.abstractmethod
|
127 |
+
def _str_rindex(self, sub, start: int = 0, end=None):
|
128 |
+
pass
|
129 |
+
|
130 |
+
@abc.abstractmethod
|
131 |
+
def _str_join(self, sep: str):
|
132 |
+
pass
|
133 |
+
|
134 |
+
@abc.abstractmethod
|
135 |
+
def _str_partition(self, sep: str, expand):
|
136 |
+
pass
|
137 |
+
|
138 |
+
@abc.abstractmethod
|
139 |
+
def _str_rpartition(self, sep: str, expand):
|
140 |
+
pass
|
141 |
+
|
142 |
+
@abc.abstractmethod
|
143 |
+
def _str_len(self):
|
144 |
+
pass
|
145 |
+
|
146 |
+
@abc.abstractmethod
|
147 |
+
def _str_slice(self, start=None, stop=None, step=None):
|
148 |
+
pass
|
149 |
+
|
150 |
+
@abc.abstractmethod
|
151 |
+
def _str_slice_replace(self, start=None, stop=None, repl=None):
|
152 |
+
pass
|
153 |
+
|
154 |
+
@abc.abstractmethod
|
155 |
+
def _str_translate(self, table):
|
156 |
+
pass
|
157 |
+
|
158 |
+
@abc.abstractmethod
|
159 |
+
def _str_wrap(self, width: int, **kwargs):
|
160 |
+
pass
|
161 |
+
|
162 |
+
@abc.abstractmethod
|
163 |
+
def _str_get_dummies(self, sep: str = "|"):
|
164 |
+
pass
|
165 |
+
|
166 |
+
@abc.abstractmethod
|
167 |
+
def _str_isalnum(self):
|
168 |
+
pass
|
169 |
+
|
170 |
+
@abc.abstractmethod
|
171 |
+
def _str_isalpha(self):
|
172 |
+
pass
|
173 |
+
|
174 |
+
@abc.abstractmethod
|
175 |
+
def _str_isdecimal(self):
|
176 |
+
pass
|
177 |
+
|
178 |
+
@abc.abstractmethod
|
179 |
+
def _str_isdigit(self):
|
180 |
+
pass
|
181 |
+
|
182 |
+
@abc.abstractmethod
|
183 |
+
def _str_islower(self):
|
184 |
+
pass
|
185 |
+
|
186 |
+
@abc.abstractmethod
|
187 |
+
def _str_isnumeric(self):
|
188 |
+
pass
|
189 |
+
|
190 |
+
@abc.abstractmethod
|
191 |
+
def _str_isspace(self):
|
192 |
+
pass
|
193 |
+
|
194 |
+
@abc.abstractmethod
|
195 |
+
def _str_istitle(self):
|
196 |
+
pass
|
197 |
+
|
198 |
+
@abc.abstractmethod
|
199 |
+
def _str_isupper(self):
|
200 |
+
pass
|
201 |
+
|
202 |
+
@abc.abstractmethod
|
203 |
+
def _str_capitalize(self):
|
204 |
+
pass
|
205 |
+
|
206 |
+
@abc.abstractmethod
|
207 |
+
def _str_casefold(self):
|
208 |
+
pass
|
209 |
+
|
210 |
+
@abc.abstractmethod
|
211 |
+
def _str_title(self):
|
212 |
+
pass
|
213 |
+
|
214 |
+
@abc.abstractmethod
|
215 |
+
def _str_swapcase(self):
|
216 |
+
pass
|
217 |
+
|
218 |
+
@abc.abstractmethod
|
219 |
+
def _str_lower(self):
|
220 |
+
pass
|
221 |
+
|
222 |
+
@abc.abstractmethod
|
223 |
+
def _str_upper(self):
|
224 |
+
pass
|
225 |
+
|
226 |
+
@abc.abstractmethod
|
227 |
+
def _str_normalize(self, form):
|
228 |
+
pass
|
229 |
+
|
230 |
+
@abc.abstractmethod
|
231 |
+
def _str_strip(self, to_strip=None):
|
232 |
+
pass
|
233 |
+
|
234 |
+
@abc.abstractmethod
|
235 |
+
def _str_lstrip(self, to_strip=None):
|
236 |
+
pass
|
237 |
+
|
238 |
+
@abc.abstractmethod
|
239 |
+
def _str_rstrip(self, to_strip=None):
|
240 |
+
pass
|
241 |
+
|
242 |
+
@abc.abstractmethod
|
243 |
+
def _str_removeprefix(self, prefix: str) -> Series:
|
244 |
+
pass
|
245 |
+
|
246 |
+
@abc.abstractmethod
|
247 |
+
def _str_removesuffix(self, suffix: str) -> Series:
|
248 |
+
pass
|
249 |
+
|
250 |
+
@abc.abstractmethod
|
251 |
+
def _str_split(
|
252 |
+
self, pat=None, n=-1, expand: bool = False, regex: bool | None = None
|
253 |
+
):
|
254 |
+
pass
|
255 |
+
|
256 |
+
@abc.abstractmethod
|
257 |
+
def _str_rsplit(self, pat=None, n=-1):
|
258 |
+
pass
|
259 |
+
|
260 |
+
@abc.abstractmethod
|
261 |
+
def _str_extract(self, pat: str, flags: int = 0, expand: bool = True):
|
262 |
+
pass
|
llmeval-env/lib/python3.10/site-packages/pandas/core/strings/object_array.py
ADDED
@@ -0,0 +1,497 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from __future__ import annotations
|
2 |
+
|
3 |
+
import functools
|
4 |
+
import re
|
5 |
+
import textwrap
|
6 |
+
from typing import (
|
7 |
+
TYPE_CHECKING,
|
8 |
+
Callable,
|
9 |
+
Literal,
|
10 |
+
cast,
|
11 |
+
)
|
12 |
+
import unicodedata
|
13 |
+
|
14 |
+
import numpy as np
|
15 |
+
|
16 |
+
from pandas._libs import lib
|
17 |
+
import pandas._libs.missing as libmissing
|
18 |
+
import pandas._libs.ops as libops
|
19 |
+
|
20 |
+
from pandas.core.dtypes.missing import isna
|
21 |
+
|
22 |
+
from pandas.core.strings.base import BaseStringArrayMethods
|
23 |
+
|
24 |
+
if TYPE_CHECKING:
|
25 |
+
from collections.abc import Sequence
|
26 |
+
|
27 |
+
from pandas._typing import (
|
28 |
+
NpDtype,
|
29 |
+
Scalar,
|
30 |
+
)
|
31 |
+
|
32 |
+
from pandas import Series
|
33 |
+
|
34 |
+
|
35 |
+
class ObjectStringArrayMixin(BaseStringArrayMethods):
|
36 |
+
"""
|
37 |
+
String Methods operating on object-dtype ndarrays.
|
38 |
+
"""
|
39 |
+
|
40 |
+
_str_na_value = np.nan
|
41 |
+
|
42 |
+
def __len__(self) -> int:
|
43 |
+
# For typing, _str_map relies on the object being sized.
|
44 |
+
raise NotImplementedError
|
45 |
+
|
46 |
+
def _str_map(
|
47 |
+
self, f, na_value=None, dtype: NpDtype | None = None, convert: bool = True
|
48 |
+
):
|
49 |
+
"""
|
50 |
+
Map a callable over valid elements of the array.
|
51 |
+
|
52 |
+
Parameters
|
53 |
+
----------
|
54 |
+
f : Callable
|
55 |
+
A function to call on each non-NA element.
|
56 |
+
na_value : Scalar, optional
|
57 |
+
The value to set for NA values. Might also be used for the
|
58 |
+
fill value if the callable `f` raises an exception.
|
59 |
+
This defaults to ``self._str_na_value`` which is ``np.nan``
|
60 |
+
for object-dtype and Categorical and ``pd.NA`` for StringArray.
|
61 |
+
dtype : Dtype, optional
|
62 |
+
The dtype of the result array.
|
63 |
+
convert : bool, default True
|
64 |
+
Whether to call `maybe_convert_objects` on the resulting ndarray
|
65 |
+
"""
|
66 |
+
if dtype is None:
|
67 |
+
dtype = np.dtype("object")
|
68 |
+
if na_value is None:
|
69 |
+
na_value = self._str_na_value
|
70 |
+
|
71 |
+
if not len(self):
|
72 |
+
return np.array([], dtype=dtype)
|
73 |
+
|
74 |
+
arr = np.asarray(self, dtype=object)
|
75 |
+
mask = isna(arr)
|
76 |
+
map_convert = convert and not np.all(mask)
|
77 |
+
try:
|
78 |
+
result = lib.map_infer_mask(arr, f, mask.view(np.uint8), map_convert)
|
79 |
+
except (TypeError, AttributeError) as err:
|
80 |
+
# Reraise the exception if callable `f` got wrong number of args.
|
81 |
+
# The user may want to be warned by this, instead of getting NaN
|
82 |
+
p_err = (
|
83 |
+
r"((takes)|(missing)) (?(2)from \d+ to )?\d+ "
|
84 |
+
r"(?(3)required )positional arguments?"
|
85 |
+
)
|
86 |
+
|
87 |
+
if len(err.args) >= 1 and re.search(p_err, err.args[0]):
|
88 |
+
# FIXME: this should be totally avoidable
|
89 |
+
raise err
|
90 |
+
|
91 |
+
def g(x):
|
92 |
+
# This type of fallback behavior can be removed once
|
93 |
+
# we remove object-dtype .str accessor.
|
94 |
+
try:
|
95 |
+
return f(x)
|
96 |
+
except (TypeError, AttributeError):
|
97 |
+
return na_value
|
98 |
+
|
99 |
+
return self._str_map(g, na_value=na_value, dtype=dtype)
|
100 |
+
if not isinstance(result, np.ndarray):
|
101 |
+
return result
|
102 |
+
if na_value is not np.nan:
|
103 |
+
np.putmask(result, mask, na_value)
|
104 |
+
if convert and result.dtype == object:
|
105 |
+
result = lib.maybe_convert_objects(result)
|
106 |
+
return result
|
107 |
+
|
108 |
+
def _str_count(self, pat, flags: int = 0):
|
109 |
+
regex = re.compile(pat, flags=flags)
|
110 |
+
f = lambda x: len(regex.findall(x))
|
111 |
+
return self._str_map(f, dtype="int64")
|
112 |
+
|
113 |
+
def _str_pad(
|
114 |
+
self,
|
115 |
+
width: int,
|
116 |
+
side: Literal["left", "right", "both"] = "left",
|
117 |
+
fillchar: str = " ",
|
118 |
+
):
|
119 |
+
if side == "left":
|
120 |
+
f = lambda x: x.rjust(width, fillchar)
|
121 |
+
elif side == "right":
|
122 |
+
f = lambda x: x.ljust(width, fillchar)
|
123 |
+
elif side == "both":
|
124 |
+
f = lambda x: x.center(width, fillchar)
|
125 |
+
else: # pragma: no cover
|
126 |
+
raise ValueError("Invalid side")
|
127 |
+
return self._str_map(f)
|
128 |
+
|
129 |
+
def _str_contains(
|
130 |
+
self, pat, case: bool = True, flags: int = 0, na=np.nan, regex: bool = True
|
131 |
+
):
|
132 |
+
if regex:
|
133 |
+
if not case:
|
134 |
+
flags |= re.IGNORECASE
|
135 |
+
|
136 |
+
pat = re.compile(pat, flags=flags)
|
137 |
+
|
138 |
+
f = lambda x: pat.search(x) is not None
|
139 |
+
else:
|
140 |
+
if case:
|
141 |
+
f = lambda x: pat in x
|
142 |
+
else:
|
143 |
+
upper_pat = pat.upper()
|
144 |
+
f = lambda x: upper_pat in x.upper()
|
145 |
+
return self._str_map(f, na, dtype=np.dtype("bool"))
|
146 |
+
|
147 |
+
def _str_startswith(self, pat, na=None):
|
148 |
+
f = lambda x: x.startswith(pat)
|
149 |
+
return self._str_map(f, na_value=na, dtype=np.dtype(bool))
|
150 |
+
|
151 |
+
def _str_endswith(self, pat, na=None):
|
152 |
+
f = lambda x: x.endswith(pat)
|
153 |
+
return self._str_map(f, na_value=na, dtype=np.dtype(bool))
|
154 |
+
|
155 |
+
def _str_replace(
|
156 |
+
self,
|
157 |
+
pat: str | re.Pattern,
|
158 |
+
repl: str | Callable,
|
159 |
+
n: int = -1,
|
160 |
+
case: bool = True,
|
161 |
+
flags: int = 0,
|
162 |
+
regex: bool = True,
|
163 |
+
):
|
164 |
+
if case is False:
|
165 |
+
# add case flag, if provided
|
166 |
+
flags |= re.IGNORECASE
|
167 |
+
|
168 |
+
if regex or flags or callable(repl):
|
169 |
+
if not isinstance(pat, re.Pattern):
|
170 |
+
if regex is False:
|
171 |
+
pat = re.escape(pat)
|
172 |
+
pat = re.compile(pat, flags=flags)
|
173 |
+
|
174 |
+
n = n if n >= 0 else 0
|
175 |
+
f = lambda x: pat.sub(repl=repl, string=x, count=n)
|
176 |
+
else:
|
177 |
+
f = lambda x: x.replace(pat, repl, n)
|
178 |
+
|
179 |
+
return self._str_map(f, dtype=str)
|
180 |
+
|
181 |
+
def _str_repeat(self, repeats: int | Sequence[int]):
|
182 |
+
if lib.is_integer(repeats):
|
183 |
+
rint = cast(int, repeats)
|
184 |
+
|
185 |
+
def scalar_rep(x):
|
186 |
+
try:
|
187 |
+
return bytes.__mul__(x, rint)
|
188 |
+
except TypeError:
|
189 |
+
return str.__mul__(x, rint)
|
190 |
+
|
191 |
+
return self._str_map(scalar_rep, dtype=str)
|
192 |
+
else:
|
193 |
+
from pandas.core.arrays.string_ import BaseStringArray
|
194 |
+
|
195 |
+
def rep(x, r):
|
196 |
+
if x is libmissing.NA:
|
197 |
+
return x
|
198 |
+
try:
|
199 |
+
return bytes.__mul__(x, r)
|
200 |
+
except TypeError:
|
201 |
+
return str.__mul__(x, r)
|
202 |
+
|
203 |
+
result = libops.vec_binop(
|
204 |
+
np.asarray(self),
|
205 |
+
np.asarray(repeats, dtype=object),
|
206 |
+
rep,
|
207 |
+
)
|
208 |
+
if isinstance(self, BaseStringArray):
|
209 |
+
# Not going through map, so we have to do this here.
|
210 |
+
result = type(self)._from_sequence(result, dtype=self.dtype)
|
211 |
+
return result
|
212 |
+
|
213 |
+
def _str_match(
|
214 |
+
self, pat: str, case: bool = True, flags: int = 0, na: Scalar | None = None
|
215 |
+
):
|
216 |
+
if not case:
|
217 |
+
flags |= re.IGNORECASE
|
218 |
+
|
219 |
+
regex = re.compile(pat, flags=flags)
|
220 |
+
|
221 |
+
f = lambda x: regex.match(x) is not None
|
222 |
+
return self._str_map(f, na_value=na, dtype=np.dtype(bool))
|
223 |
+
|
224 |
+
def _str_fullmatch(
|
225 |
+
self,
|
226 |
+
pat: str | re.Pattern,
|
227 |
+
case: bool = True,
|
228 |
+
flags: int = 0,
|
229 |
+
na: Scalar | None = None,
|
230 |
+
):
|
231 |
+
if not case:
|
232 |
+
flags |= re.IGNORECASE
|
233 |
+
|
234 |
+
regex = re.compile(pat, flags=flags)
|
235 |
+
|
236 |
+
f = lambda x: regex.fullmatch(x) is not None
|
237 |
+
return self._str_map(f, na_value=na, dtype=np.dtype(bool))
|
238 |
+
|
239 |
+
def _str_encode(self, encoding, errors: str = "strict"):
|
240 |
+
f = lambda x: x.encode(encoding, errors=errors)
|
241 |
+
return self._str_map(f, dtype=object)
|
242 |
+
|
243 |
+
def _str_find(self, sub, start: int = 0, end=None):
|
244 |
+
return self._str_find_(sub, start, end, side="left")
|
245 |
+
|
246 |
+
def _str_rfind(self, sub, start: int = 0, end=None):
|
247 |
+
return self._str_find_(sub, start, end, side="right")
|
248 |
+
|
249 |
+
def _str_find_(self, sub, start, end, side):
|
250 |
+
if side == "left":
|
251 |
+
method = "find"
|
252 |
+
elif side == "right":
|
253 |
+
method = "rfind"
|
254 |
+
else: # pragma: no cover
|
255 |
+
raise ValueError("Invalid side")
|
256 |
+
|
257 |
+
if end is None:
|
258 |
+
f = lambda x: getattr(x, method)(sub, start)
|
259 |
+
else:
|
260 |
+
f = lambda x: getattr(x, method)(sub, start, end)
|
261 |
+
return self._str_map(f, dtype="int64")
|
262 |
+
|
263 |
+
def _str_findall(self, pat, flags: int = 0):
|
264 |
+
regex = re.compile(pat, flags=flags)
|
265 |
+
return self._str_map(regex.findall, dtype="object")
|
266 |
+
|
267 |
+
def _str_get(self, i):
|
268 |
+
def f(x):
|
269 |
+
if isinstance(x, dict):
|
270 |
+
return x.get(i)
|
271 |
+
elif len(x) > i >= -len(x):
|
272 |
+
return x[i]
|
273 |
+
return self._str_na_value
|
274 |
+
|
275 |
+
return self._str_map(f)
|
276 |
+
|
277 |
+
def _str_index(self, sub, start: int = 0, end=None):
|
278 |
+
if end:
|
279 |
+
f = lambda x: x.index(sub, start, end)
|
280 |
+
else:
|
281 |
+
f = lambda x: x.index(sub, start, end)
|
282 |
+
return self._str_map(f, dtype="int64")
|
283 |
+
|
284 |
+
def _str_rindex(self, sub, start: int = 0, end=None):
|
285 |
+
if end:
|
286 |
+
f = lambda x: x.rindex(sub, start, end)
|
287 |
+
else:
|
288 |
+
f = lambda x: x.rindex(sub, start, end)
|
289 |
+
return self._str_map(f, dtype="int64")
|
290 |
+
|
291 |
+
def _str_join(self, sep: str):
|
292 |
+
return self._str_map(sep.join)
|
293 |
+
|
294 |
+
def _str_partition(self, sep: str, expand):
|
295 |
+
result = self._str_map(lambda x: x.partition(sep), dtype="object")
|
296 |
+
return result
|
297 |
+
|
298 |
+
def _str_rpartition(self, sep: str, expand):
|
299 |
+
return self._str_map(lambda x: x.rpartition(sep), dtype="object")
|
300 |
+
|
301 |
+
def _str_len(self):
|
302 |
+
return self._str_map(len, dtype="int64")
|
303 |
+
|
304 |
+
def _str_slice(self, start=None, stop=None, step=None):
|
305 |
+
obj = slice(start, stop, step)
|
306 |
+
return self._str_map(lambda x: x[obj])
|
307 |
+
|
308 |
+
def _str_slice_replace(self, start=None, stop=None, repl=None):
|
309 |
+
if repl is None:
|
310 |
+
repl = ""
|
311 |
+
|
312 |
+
def f(x):
|
313 |
+
if x[start:stop] == "":
|
314 |
+
local_stop = start
|
315 |
+
else:
|
316 |
+
local_stop = stop
|
317 |
+
y = ""
|
318 |
+
if start is not None:
|
319 |
+
y += x[:start]
|
320 |
+
y += repl
|
321 |
+
if stop is not None:
|
322 |
+
y += x[local_stop:]
|
323 |
+
return y
|
324 |
+
|
325 |
+
return self._str_map(f)
|
326 |
+
|
327 |
+
def _str_split(
|
328 |
+
self,
|
329 |
+
pat: str | re.Pattern | None = None,
|
330 |
+
n=-1,
|
331 |
+
expand: bool = False,
|
332 |
+
regex: bool | None = None,
|
333 |
+
):
|
334 |
+
if pat is None:
|
335 |
+
if n is None or n == 0:
|
336 |
+
n = -1
|
337 |
+
f = lambda x: x.split(pat, n)
|
338 |
+
else:
|
339 |
+
new_pat: str | re.Pattern
|
340 |
+
if regex is True or isinstance(pat, re.Pattern):
|
341 |
+
new_pat = re.compile(pat)
|
342 |
+
elif regex is False:
|
343 |
+
new_pat = pat
|
344 |
+
# regex is None so link to old behavior #43563
|
345 |
+
else:
|
346 |
+
if len(pat) == 1:
|
347 |
+
new_pat = pat
|
348 |
+
else:
|
349 |
+
new_pat = re.compile(pat)
|
350 |
+
|
351 |
+
if isinstance(new_pat, re.Pattern):
|
352 |
+
if n is None or n == -1:
|
353 |
+
n = 0
|
354 |
+
f = lambda x: new_pat.split(x, maxsplit=n)
|
355 |
+
else:
|
356 |
+
if n is None or n == 0:
|
357 |
+
n = -1
|
358 |
+
f = lambda x: x.split(pat, n)
|
359 |
+
return self._str_map(f, dtype=object)
|
360 |
+
|
361 |
+
def _str_rsplit(self, pat=None, n=-1):
|
362 |
+
if n is None or n == 0:
|
363 |
+
n = -1
|
364 |
+
f = lambda x: x.rsplit(pat, n)
|
365 |
+
return self._str_map(f, dtype="object")
|
366 |
+
|
367 |
+
def _str_translate(self, table):
|
368 |
+
return self._str_map(lambda x: x.translate(table))
|
369 |
+
|
370 |
+
def _str_wrap(self, width: int, **kwargs):
|
371 |
+
kwargs["width"] = width
|
372 |
+
tw = textwrap.TextWrapper(**kwargs)
|
373 |
+
return self._str_map(lambda s: "\n".join(tw.wrap(s)))
|
374 |
+
|
375 |
+
def _str_get_dummies(self, sep: str = "|"):
|
376 |
+
from pandas import Series
|
377 |
+
|
378 |
+
arr = Series(self).fillna("")
|
379 |
+
try:
|
380 |
+
arr = sep + arr + sep
|
381 |
+
except (TypeError, NotImplementedError):
|
382 |
+
arr = sep + arr.astype(str) + sep
|
383 |
+
|
384 |
+
tags: set[str] = set()
|
385 |
+
for ts in Series(arr, copy=False).str.split(sep):
|
386 |
+
tags.update(ts)
|
387 |
+
tags2 = sorted(tags - {""})
|
388 |
+
|
389 |
+
dummies = np.empty((len(arr), len(tags2)), dtype=np.int64)
|
390 |
+
|
391 |
+
def _isin(test_elements: str, element: str) -> bool:
|
392 |
+
return element in test_elements
|
393 |
+
|
394 |
+
for i, t in enumerate(tags2):
|
395 |
+
pat = sep + t + sep
|
396 |
+
dummies[:, i] = lib.map_infer(
|
397 |
+
arr.to_numpy(), functools.partial(_isin, element=pat)
|
398 |
+
)
|
399 |
+
return dummies, tags2
|
400 |
+
|
401 |
+
def _str_upper(self):
|
402 |
+
return self._str_map(lambda x: x.upper())
|
403 |
+
|
404 |
+
def _str_isalnum(self):
|
405 |
+
return self._str_map(str.isalnum, dtype="bool")
|
406 |
+
|
407 |
+
def _str_isalpha(self):
|
408 |
+
return self._str_map(str.isalpha, dtype="bool")
|
409 |
+
|
410 |
+
def _str_isdecimal(self):
|
411 |
+
return self._str_map(str.isdecimal, dtype="bool")
|
412 |
+
|
413 |
+
def _str_isdigit(self):
|
414 |
+
return self._str_map(str.isdigit, dtype="bool")
|
415 |
+
|
416 |
+
def _str_islower(self):
|
417 |
+
return self._str_map(str.islower, dtype="bool")
|
418 |
+
|
419 |
+
def _str_isnumeric(self):
|
420 |
+
return self._str_map(str.isnumeric, dtype="bool")
|
421 |
+
|
422 |
+
def _str_isspace(self):
|
423 |
+
return self._str_map(str.isspace, dtype="bool")
|
424 |
+
|
425 |
+
def _str_istitle(self):
|
426 |
+
return self._str_map(str.istitle, dtype="bool")
|
427 |
+
|
428 |
+
def _str_isupper(self):
|
429 |
+
return self._str_map(str.isupper, dtype="bool")
|
430 |
+
|
431 |
+
def _str_capitalize(self):
|
432 |
+
return self._str_map(str.capitalize)
|
433 |
+
|
434 |
+
def _str_casefold(self):
|
435 |
+
return self._str_map(str.casefold)
|
436 |
+
|
437 |
+
def _str_title(self):
|
438 |
+
return self._str_map(str.title)
|
439 |
+
|
440 |
+
def _str_swapcase(self):
|
441 |
+
return self._str_map(str.swapcase)
|
442 |
+
|
443 |
+
def _str_lower(self):
|
444 |
+
return self._str_map(str.lower)
|
445 |
+
|
446 |
+
def _str_normalize(self, form):
|
447 |
+
f = lambda x: unicodedata.normalize(form, x)
|
448 |
+
return self._str_map(f)
|
449 |
+
|
450 |
+
def _str_strip(self, to_strip=None):
|
451 |
+
return self._str_map(lambda x: x.strip(to_strip))
|
452 |
+
|
453 |
+
def _str_lstrip(self, to_strip=None):
|
454 |
+
return self._str_map(lambda x: x.lstrip(to_strip))
|
455 |
+
|
456 |
+
def _str_rstrip(self, to_strip=None):
|
457 |
+
return self._str_map(lambda x: x.rstrip(to_strip))
|
458 |
+
|
459 |
+
def _str_removeprefix(self, prefix: str) -> Series:
|
460 |
+
# outstanding question on whether to use native methods for users on Python 3.9+
|
461 |
+
# https://github.com/pandas-dev/pandas/pull/39226#issuecomment-836719770,
|
462 |
+
# in which case we could do return self._str_map(str.removeprefix)
|
463 |
+
|
464 |
+
def removeprefix(text: str) -> str:
|
465 |
+
if text.startswith(prefix):
|
466 |
+
return text[len(prefix) :]
|
467 |
+
return text
|
468 |
+
|
469 |
+
return self._str_map(removeprefix)
|
470 |
+
|
471 |
+
def _str_removesuffix(self, suffix: str) -> Series:
|
472 |
+
return self._str_map(lambda x: x.removesuffix(suffix))
|
473 |
+
|
474 |
+
def _str_extract(self, pat: str, flags: int = 0, expand: bool = True):
|
475 |
+
regex = re.compile(pat, flags=flags)
|
476 |
+
na_value = self._str_na_value
|
477 |
+
|
478 |
+
if not expand:
|
479 |
+
|
480 |
+
def g(x):
|
481 |
+
m = regex.search(x)
|
482 |
+
return m.groups()[0] if m else na_value
|
483 |
+
|
484 |
+
return self._str_map(g, convert=False)
|
485 |
+
|
486 |
+
empty_row = [na_value] * regex.groups
|
487 |
+
|
488 |
+
def f(x):
|
489 |
+
if not isinstance(x, str):
|
490 |
+
return empty_row
|
491 |
+
m = regex.search(x)
|
492 |
+
if m:
|
493 |
+
return [na_value if item is None else item for item in m.groups()]
|
494 |
+
else:
|
495 |
+
return empty_row
|
496 |
+
|
497 |
+
return [f(val) for val in np.asarray(self)]
|
llmeval-env/lib/python3.10/site-packages/pandas/core/util/__init__.py
ADDED
File without changes
|
llmeval-env/lib/python3.10/site-packages/pandas/core/util/__pycache__/__init__.cpython-310.pyc
ADDED
Binary file (189 Bytes). View file
|
|