applied-ai-018 commited on
Commit
da99466
·
verified ·
1 Parent(s): e069948

Add files using upload-large-folder tool

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. venv/lib/python3.10/site-packages/pandas/core/_numba/__init__.py +0 -0
  2. venv/lib/python3.10/site-packages/pandas/core/_numba/__pycache__/__init__.cpython-310.pyc +0 -0
  3. venv/lib/python3.10/site-packages/pandas/core/_numba/__pycache__/executor.cpython-310.pyc +0 -0
  4. venv/lib/python3.10/site-packages/pandas/core/_numba/__pycache__/extensions.cpython-310.pyc +0 -0
  5. venv/lib/python3.10/site-packages/pandas/core/_numba/kernels/__init__.py +27 -0
  6. venv/lib/python3.10/site-packages/pandas/core/_numba/kernels/mean_.py +196 -0
  7. venv/lib/python3.10/site-packages/pandas/core/_numba/kernels/min_max_.py +125 -0
  8. venv/lib/python3.10/site-packages/pandas/core/_numba/kernels/shared.py +29 -0
  9. venv/lib/python3.10/site-packages/pandas/core/_numba/kernels/sum_.py +244 -0
  10. venv/lib/python3.10/site-packages/pandas/core/_numba/kernels/var_.py +245 -0
  11. venv/lib/python3.10/site-packages/pandas/core/dtypes/__init__.py +0 -0
  12. venv/lib/python3.10/site-packages/pandas/core/dtypes/__pycache__/__init__.cpython-310.pyc +0 -0
  13. venv/lib/python3.10/site-packages/pandas/core/dtypes/__pycache__/api.cpython-310.pyc +0 -0
  14. venv/lib/python3.10/site-packages/pandas/core/dtypes/__pycache__/astype.cpython-310.pyc +0 -0
  15. venv/lib/python3.10/site-packages/pandas/core/dtypes/__pycache__/base.cpython-310.pyc +0 -0
  16. venv/lib/python3.10/site-packages/pandas/core/dtypes/__pycache__/cast.cpython-310.pyc +0 -0
  17. venv/lib/python3.10/site-packages/pandas/core/dtypes/__pycache__/common.cpython-310.pyc +0 -0
  18. venv/lib/python3.10/site-packages/pandas/core/dtypes/__pycache__/concat.cpython-310.pyc +0 -0
  19. venv/lib/python3.10/site-packages/pandas/core/dtypes/__pycache__/dtypes.cpython-310.pyc +0 -0
  20. venv/lib/python3.10/site-packages/pandas/core/dtypes/__pycache__/generic.cpython-310.pyc +0 -0
  21. venv/lib/python3.10/site-packages/pandas/core/dtypes/__pycache__/inference.cpython-310.pyc +0 -0
  22. venv/lib/python3.10/site-packages/pandas/core/dtypes/__pycache__/missing.cpython-310.pyc +0 -0
  23. venv/lib/python3.10/site-packages/pandas/core/dtypes/api.py +85 -0
  24. venv/lib/python3.10/site-packages/pandas/core/dtypes/astype.py +301 -0
  25. venv/lib/python3.10/site-packages/pandas/core/dtypes/base.py +583 -0
  26. venv/lib/python3.10/site-packages/pandas/core/dtypes/common.py +1748 -0
  27. venv/lib/python3.10/site-packages/pandas/core/dtypes/concat.py +348 -0
  28. venv/lib/python3.10/site-packages/pandas/core/dtypes/dtypes.py +2348 -0
  29. venv/lib/python3.10/site-packages/pandas/core/dtypes/generic.py +147 -0
  30. venv/lib/python3.10/site-packages/pandas/core/dtypes/inference.py +437 -0
  31. venv/lib/python3.10/site-packages/pandas/core/dtypes/missing.py +810 -0
  32. venv/lib/python3.10/site-packages/pandas/core/groupby/__init__.py +15 -0
  33. venv/lib/python3.10/site-packages/pandas/core/groupby/__pycache__/__init__.cpython-310.pyc +0 -0
  34. venv/lib/python3.10/site-packages/pandas/core/groupby/__pycache__/base.cpython-310.pyc +0 -0
  35. venv/lib/python3.10/site-packages/pandas/core/groupby/__pycache__/categorical.cpython-310.pyc +0 -0
  36. venv/lib/python3.10/site-packages/pandas/core/groupby/__pycache__/groupby.cpython-310.pyc +0 -0
  37. venv/lib/python3.10/site-packages/pandas/core/groupby/__pycache__/grouper.cpython-310.pyc +0 -0
  38. venv/lib/python3.10/site-packages/pandas/core/groupby/__pycache__/indexing.cpython-310.pyc +0 -0
  39. venv/lib/python3.10/site-packages/pandas/core/groupby/__pycache__/numba_.cpython-310.pyc +0 -0
  40. venv/lib/python3.10/site-packages/pandas/core/groupby/__pycache__/ops.cpython-310.pyc +0 -0
  41. venv/lib/python3.10/site-packages/pandas/core/groupby/base.py +121 -0
  42. venv/lib/python3.10/site-packages/pandas/core/groupby/categorical.py +87 -0
  43. venv/lib/python3.10/site-packages/pandas/core/groupby/generic.py +2852 -0
  44. venv/lib/python3.10/site-packages/pandas/core/groupby/groupby.py +0 -0
  45. venv/lib/python3.10/site-packages/pandas/core/groupby/grouper.py +1102 -0
  46. venv/lib/python3.10/site-packages/pandas/core/groupby/indexing.py +304 -0
  47. venv/lib/python3.10/site-packages/pandas/core/groupby/numba_.py +181 -0
  48. venv/lib/python3.10/site-packages/pandas/core/groupby/ops.py +1208 -0
  49. venv/lib/python3.10/site-packages/pandas/core/internals/__pycache__/array_manager.cpython-310.pyc +0 -0
  50. venv/lib/python3.10/site-packages/pandas/core/internals/__pycache__/blocks.cpython-310.pyc +0 -0
venv/lib/python3.10/site-packages/pandas/core/_numba/__init__.py ADDED
File without changes
venv/lib/python3.10/site-packages/pandas/core/_numba/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (186 Bytes). View file
 
venv/lib/python3.10/site-packages/pandas/core/_numba/__pycache__/executor.cpython-310.pyc ADDED
Binary file (5.1 kB). View file
 
venv/lib/python3.10/site-packages/pandas/core/_numba/__pycache__/extensions.cpython-310.pyc ADDED
Binary file (15.9 kB). View file
 
venv/lib/python3.10/site-packages/pandas/core/_numba/kernels/__init__.py ADDED
@@ -0,0 +1,27 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from pandas.core._numba.kernels.mean_ import (
2
+ grouped_mean,
3
+ sliding_mean,
4
+ )
5
+ from pandas.core._numba.kernels.min_max_ import (
6
+ grouped_min_max,
7
+ sliding_min_max,
8
+ )
9
+ from pandas.core._numba.kernels.sum_ import (
10
+ grouped_sum,
11
+ sliding_sum,
12
+ )
13
+ from pandas.core._numba.kernels.var_ import (
14
+ grouped_var,
15
+ sliding_var,
16
+ )
17
+
18
+ __all__ = [
19
+ "sliding_mean",
20
+ "grouped_mean",
21
+ "sliding_sum",
22
+ "grouped_sum",
23
+ "sliding_var",
24
+ "grouped_var",
25
+ "sliding_min_max",
26
+ "grouped_min_max",
27
+ ]
venv/lib/python3.10/site-packages/pandas/core/_numba/kernels/mean_.py ADDED
@@ -0,0 +1,196 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ Numba 1D mean kernels that can be shared by
3
+ * Dataframe / Series
4
+ * groupby
5
+ * rolling / expanding
6
+
7
+ Mirrors pandas/_libs/window/aggregation.pyx
8
+ """
9
+ from __future__ import annotations
10
+
11
+ from typing import TYPE_CHECKING
12
+
13
+ import numba
14
+ import numpy as np
15
+
16
+ from pandas.core._numba.kernels.shared import is_monotonic_increasing
17
+ from pandas.core._numba.kernels.sum_ import grouped_kahan_sum
18
+
19
+ if TYPE_CHECKING:
20
+ from pandas._typing import npt
21
+
22
+
23
+ @numba.jit(nopython=True, nogil=True, parallel=False)
24
+ def add_mean(
25
+ val: float,
26
+ nobs: int,
27
+ sum_x: float,
28
+ neg_ct: int,
29
+ compensation: float,
30
+ num_consecutive_same_value: int,
31
+ prev_value: float,
32
+ ) -> tuple[int, float, int, float, int, float]:
33
+ if not np.isnan(val):
34
+ nobs += 1
35
+ y = val - compensation
36
+ t = sum_x + y
37
+ compensation = t - sum_x - y
38
+ sum_x = t
39
+ if val < 0:
40
+ neg_ct += 1
41
+
42
+ if val == prev_value:
43
+ num_consecutive_same_value += 1
44
+ else:
45
+ num_consecutive_same_value = 1
46
+ prev_value = val
47
+
48
+ return nobs, sum_x, neg_ct, compensation, num_consecutive_same_value, prev_value
49
+
50
+
51
+ @numba.jit(nopython=True, nogil=True, parallel=False)
52
+ def remove_mean(
53
+ val: float, nobs: int, sum_x: float, neg_ct: int, compensation: float
54
+ ) -> tuple[int, float, int, float]:
55
+ if not np.isnan(val):
56
+ nobs -= 1
57
+ y = -val - compensation
58
+ t = sum_x + y
59
+ compensation = t - sum_x - y
60
+ sum_x = t
61
+ if val < 0:
62
+ neg_ct -= 1
63
+ return nobs, sum_x, neg_ct, compensation
64
+
65
+
66
+ @numba.jit(nopython=True, nogil=True, parallel=False)
67
+ def sliding_mean(
68
+ values: np.ndarray,
69
+ result_dtype: np.dtype,
70
+ start: np.ndarray,
71
+ end: np.ndarray,
72
+ min_periods: int,
73
+ ) -> tuple[np.ndarray, list[int]]:
74
+ N = len(start)
75
+ nobs = 0
76
+ sum_x = 0.0
77
+ neg_ct = 0
78
+ compensation_add = 0.0
79
+ compensation_remove = 0.0
80
+
81
+ is_monotonic_increasing_bounds = is_monotonic_increasing(
82
+ start
83
+ ) and is_monotonic_increasing(end)
84
+
85
+ output = np.empty(N, dtype=result_dtype)
86
+
87
+ for i in range(N):
88
+ s = start[i]
89
+ e = end[i]
90
+ if i == 0 or not is_monotonic_increasing_bounds:
91
+ prev_value = values[s]
92
+ num_consecutive_same_value = 0
93
+
94
+ for j in range(s, e):
95
+ val = values[j]
96
+ (
97
+ nobs,
98
+ sum_x,
99
+ neg_ct,
100
+ compensation_add,
101
+ num_consecutive_same_value,
102
+ prev_value,
103
+ ) = add_mean(
104
+ val,
105
+ nobs,
106
+ sum_x,
107
+ neg_ct,
108
+ compensation_add,
109
+ num_consecutive_same_value,
110
+ prev_value, # pyright: ignore[reportGeneralTypeIssues]
111
+ )
112
+ else:
113
+ for j in range(start[i - 1], s):
114
+ val = values[j]
115
+ nobs, sum_x, neg_ct, compensation_remove = remove_mean(
116
+ val, nobs, sum_x, neg_ct, compensation_remove
117
+ )
118
+
119
+ for j in range(end[i - 1], e):
120
+ val = values[j]
121
+ (
122
+ nobs,
123
+ sum_x,
124
+ neg_ct,
125
+ compensation_add,
126
+ num_consecutive_same_value,
127
+ prev_value,
128
+ ) = add_mean(
129
+ val,
130
+ nobs,
131
+ sum_x,
132
+ neg_ct,
133
+ compensation_add,
134
+ num_consecutive_same_value,
135
+ prev_value, # pyright: ignore[reportGeneralTypeIssues]
136
+ )
137
+
138
+ if nobs >= min_periods and nobs > 0:
139
+ result = sum_x / nobs
140
+ if num_consecutive_same_value >= nobs:
141
+ result = prev_value
142
+ elif neg_ct == 0 and result < 0:
143
+ result = 0
144
+ elif neg_ct == nobs and result > 0:
145
+ result = 0
146
+ else:
147
+ result = np.nan
148
+
149
+ output[i] = result
150
+
151
+ if not is_monotonic_increasing_bounds:
152
+ nobs = 0
153
+ sum_x = 0.0
154
+ neg_ct = 0
155
+ compensation_remove = 0.0
156
+
157
+ # na_position is empty list since float64 can already hold nans
158
+ # Do list comprehension, since numba cannot figure out that na_pos is
159
+ # empty list of ints on its own
160
+ na_pos = [0 for i in range(0)]
161
+ return output, na_pos
162
+
163
+
164
+ @numba.jit(nopython=True, nogil=True, parallel=False)
165
+ def grouped_mean(
166
+ values: np.ndarray,
167
+ result_dtype: np.dtype,
168
+ labels: npt.NDArray[np.intp],
169
+ ngroups: int,
170
+ min_periods: int,
171
+ ) -> tuple[np.ndarray, list[int]]:
172
+ output, nobs_arr, comp_arr, consecutive_counts, prev_vals = grouped_kahan_sum(
173
+ values, result_dtype, labels, ngroups
174
+ )
175
+
176
+ # Post-processing, replace sums that don't satisfy min_periods
177
+ for lab in range(ngroups):
178
+ nobs = nobs_arr[lab]
179
+ num_consecutive_same_value = consecutive_counts[lab]
180
+ prev_value = prev_vals[lab]
181
+ sum_x = output[lab]
182
+ if nobs >= min_periods:
183
+ if num_consecutive_same_value >= nobs:
184
+ result = prev_value * nobs
185
+ else:
186
+ result = sum_x
187
+ else:
188
+ result = np.nan
189
+ result /= nobs
190
+ output[lab] = result
191
+
192
+ # na_position is empty list since float64 can already hold nans
193
+ # Do list comprehension, since numba cannot figure out that na_pos is
194
+ # empty list of ints on its own
195
+ na_pos = [0 for i in range(0)]
196
+ return output, na_pos
venv/lib/python3.10/site-packages/pandas/core/_numba/kernels/min_max_.py ADDED
@@ -0,0 +1,125 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ Numba 1D min/max kernels that can be shared by
3
+ * Dataframe / Series
4
+ * groupby
5
+ * rolling / expanding
6
+
7
+ Mirrors pandas/_libs/window/aggregation.pyx
8
+ """
9
+ from __future__ import annotations
10
+
11
+ from typing import TYPE_CHECKING
12
+
13
+ import numba
14
+ import numpy as np
15
+
16
+ if TYPE_CHECKING:
17
+ from pandas._typing import npt
18
+
19
+
20
+ @numba.jit(nopython=True, nogil=True, parallel=False)
21
+ def sliding_min_max(
22
+ values: np.ndarray,
23
+ result_dtype: np.dtype,
24
+ start: np.ndarray,
25
+ end: np.ndarray,
26
+ min_periods: int,
27
+ is_max: bool,
28
+ ) -> tuple[np.ndarray, list[int]]:
29
+ N = len(start)
30
+ nobs = 0
31
+ output = np.empty(N, dtype=result_dtype)
32
+ na_pos = []
33
+ # Use deque once numba supports it
34
+ # https://github.com/numba/numba/issues/7417
35
+ Q: list = []
36
+ W: list = []
37
+ for i in range(N):
38
+ curr_win_size = end[i] - start[i]
39
+ if i == 0:
40
+ st = start[i]
41
+ else:
42
+ st = end[i - 1]
43
+
44
+ for k in range(st, end[i]):
45
+ ai = values[k]
46
+ if not np.isnan(ai):
47
+ nobs += 1
48
+ elif is_max:
49
+ ai = -np.inf
50
+ else:
51
+ ai = np.inf
52
+ # Discard previous entries if we find new min or max
53
+ if is_max:
54
+ while Q and ((ai >= values[Q[-1]]) or values[Q[-1]] != values[Q[-1]]):
55
+ Q.pop()
56
+ else:
57
+ while Q and ((ai <= values[Q[-1]]) or values[Q[-1]] != values[Q[-1]]):
58
+ Q.pop()
59
+ Q.append(k)
60
+ W.append(k)
61
+
62
+ # Discard entries outside and left of current window
63
+ while Q and Q[0] <= start[i] - 1:
64
+ Q.pop(0)
65
+ while W and W[0] <= start[i] - 1:
66
+ if not np.isnan(values[W[0]]):
67
+ nobs -= 1
68
+ W.pop(0)
69
+
70
+ # Save output based on index in input value array
71
+ if Q and curr_win_size > 0 and nobs >= min_periods:
72
+ output[i] = values[Q[0]]
73
+ else:
74
+ if values.dtype.kind != "i":
75
+ output[i] = np.nan
76
+ else:
77
+ na_pos.append(i)
78
+
79
+ return output, na_pos
80
+
81
+
82
+ @numba.jit(nopython=True, nogil=True, parallel=False)
83
+ def grouped_min_max(
84
+ values: np.ndarray,
85
+ result_dtype: np.dtype,
86
+ labels: npt.NDArray[np.intp],
87
+ ngroups: int,
88
+ min_periods: int,
89
+ is_max: bool,
90
+ ) -> tuple[np.ndarray, list[int]]:
91
+ N = len(labels)
92
+ nobs = np.zeros(ngroups, dtype=np.int64)
93
+ na_pos = []
94
+ output = np.empty(ngroups, dtype=result_dtype)
95
+
96
+ for i in range(N):
97
+ lab = labels[i]
98
+ val = values[i]
99
+ if lab < 0:
100
+ continue
101
+
102
+ if values.dtype.kind == "i" or not np.isnan(val):
103
+ nobs[lab] += 1
104
+ else:
105
+ # NaN value cannot be a min/max value
106
+ continue
107
+
108
+ if nobs[lab] == 1:
109
+ # First element in group, set output equal to this
110
+ output[lab] = val
111
+ continue
112
+
113
+ if is_max:
114
+ if val > output[lab]:
115
+ output[lab] = val
116
+ else:
117
+ if val < output[lab]:
118
+ output[lab] = val
119
+
120
+ # Set labels that don't satisfy min_periods as np.nan
121
+ for lab, count in enumerate(nobs):
122
+ if count < min_periods:
123
+ na_pos.append(lab)
124
+
125
+ return output, na_pos
venv/lib/python3.10/site-packages/pandas/core/_numba/kernels/shared.py ADDED
@@ -0,0 +1,29 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from __future__ import annotations
2
+
3
+ from typing import TYPE_CHECKING
4
+
5
+ import numba
6
+
7
+ if TYPE_CHECKING:
8
+ import numpy as np
9
+
10
+
11
+ @numba.jit(
12
+ # error: Any? not callable
13
+ numba.boolean(numba.int64[:]), # type: ignore[misc]
14
+ nopython=True,
15
+ nogil=True,
16
+ parallel=False,
17
+ )
18
+ def is_monotonic_increasing(bounds: np.ndarray) -> bool:
19
+ """Check if int64 values are monotonically increasing."""
20
+ n = len(bounds)
21
+ if n < 2:
22
+ return True
23
+ prev = bounds[0]
24
+ for i in range(1, n):
25
+ cur = bounds[i]
26
+ if cur < prev:
27
+ return False
28
+ prev = cur
29
+ return True
venv/lib/python3.10/site-packages/pandas/core/_numba/kernels/sum_.py ADDED
@@ -0,0 +1,244 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ Numba 1D sum kernels that can be shared by
3
+ * Dataframe / Series
4
+ * groupby
5
+ * rolling / expanding
6
+
7
+ Mirrors pandas/_libs/window/aggregation.pyx
8
+ """
9
+ from __future__ import annotations
10
+
11
+ from typing import (
12
+ TYPE_CHECKING,
13
+ Any,
14
+ )
15
+
16
+ import numba
17
+ from numba.extending import register_jitable
18
+ import numpy as np
19
+
20
+ if TYPE_CHECKING:
21
+ from pandas._typing import npt
22
+
23
+ from pandas.core._numba.kernels.shared import is_monotonic_increasing
24
+
25
+
26
+ @numba.jit(nopython=True, nogil=True, parallel=False)
27
+ def add_sum(
28
+ val: Any,
29
+ nobs: int,
30
+ sum_x: Any,
31
+ compensation: Any,
32
+ num_consecutive_same_value: int,
33
+ prev_value: Any,
34
+ ) -> tuple[int, Any, Any, int, Any]:
35
+ if not np.isnan(val):
36
+ nobs += 1
37
+ y = val - compensation
38
+ t = sum_x + y
39
+ compensation = t - sum_x - y
40
+ sum_x = t
41
+
42
+ if val == prev_value:
43
+ num_consecutive_same_value += 1
44
+ else:
45
+ num_consecutive_same_value = 1
46
+ prev_value = val
47
+
48
+ return nobs, sum_x, compensation, num_consecutive_same_value, prev_value
49
+
50
+
51
+ @numba.jit(nopython=True, nogil=True, parallel=False)
52
+ def remove_sum(
53
+ val: Any, nobs: int, sum_x: Any, compensation: Any
54
+ ) -> tuple[int, Any, Any]:
55
+ if not np.isnan(val):
56
+ nobs -= 1
57
+ y = -val - compensation
58
+ t = sum_x + y
59
+ compensation = t - sum_x - y
60
+ sum_x = t
61
+ return nobs, sum_x, compensation
62
+
63
+
64
+ @numba.jit(nopython=True, nogil=True, parallel=False)
65
+ def sliding_sum(
66
+ values: np.ndarray,
67
+ result_dtype: np.dtype,
68
+ start: np.ndarray,
69
+ end: np.ndarray,
70
+ min_periods: int,
71
+ ) -> tuple[np.ndarray, list[int]]:
72
+ dtype = values.dtype
73
+
74
+ na_val: object = np.nan
75
+ if dtype.kind == "i":
76
+ na_val = 0
77
+
78
+ N = len(start)
79
+ nobs = 0
80
+ sum_x = 0
81
+ compensation_add = 0
82
+ compensation_remove = 0
83
+ na_pos = []
84
+
85
+ is_monotonic_increasing_bounds = is_monotonic_increasing(
86
+ start
87
+ ) and is_monotonic_increasing(end)
88
+
89
+ output = np.empty(N, dtype=result_dtype)
90
+
91
+ for i in range(N):
92
+ s = start[i]
93
+ e = end[i]
94
+ if i == 0 or not is_monotonic_increasing_bounds:
95
+ prev_value = values[s]
96
+ num_consecutive_same_value = 0
97
+
98
+ for j in range(s, e):
99
+ val = values[j]
100
+ (
101
+ nobs,
102
+ sum_x,
103
+ compensation_add,
104
+ num_consecutive_same_value,
105
+ prev_value,
106
+ ) = add_sum(
107
+ val,
108
+ nobs,
109
+ sum_x,
110
+ compensation_add,
111
+ num_consecutive_same_value,
112
+ prev_value,
113
+ )
114
+ else:
115
+ for j in range(start[i - 1], s):
116
+ val = values[j]
117
+ nobs, sum_x, compensation_remove = remove_sum(
118
+ val, nobs, sum_x, compensation_remove
119
+ )
120
+
121
+ for j in range(end[i - 1], e):
122
+ val = values[j]
123
+ (
124
+ nobs,
125
+ sum_x,
126
+ compensation_add,
127
+ num_consecutive_same_value,
128
+ prev_value,
129
+ ) = add_sum(
130
+ val,
131
+ nobs,
132
+ sum_x,
133
+ compensation_add,
134
+ num_consecutive_same_value,
135
+ prev_value,
136
+ )
137
+
138
+ if nobs == 0 == min_periods:
139
+ result: object = 0
140
+ elif nobs >= min_periods:
141
+ if num_consecutive_same_value >= nobs:
142
+ result = prev_value * nobs
143
+ else:
144
+ result = sum_x
145
+ else:
146
+ result = na_val
147
+ if dtype.kind == "i":
148
+ na_pos.append(i)
149
+
150
+ output[i] = result
151
+
152
+ if not is_monotonic_increasing_bounds:
153
+ nobs = 0
154
+ sum_x = 0
155
+ compensation_remove = 0
156
+
157
+ return output, na_pos
158
+
159
+
160
+ # Mypy/pyright don't like the fact that the decorator is untyped
161
+ @register_jitable # type: ignore[misc]
162
+ def grouped_kahan_sum(
163
+ values: np.ndarray,
164
+ result_dtype: np.dtype,
165
+ labels: npt.NDArray[np.intp],
166
+ ngroups: int,
167
+ ) -> tuple[
168
+ np.ndarray, npt.NDArray[np.int64], np.ndarray, npt.NDArray[np.int64], np.ndarray
169
+ ]:
170
+ N = len(labels)
171
+
172
+ nobs_arr = np.zeros(ngroups, dtype=np.int64)
173
+ comp_arr = np.zeros(ngroups, dtype=values.dtype)
174
+ consecutive_counts = np.zeros(ngroups, dtype=np.int64)
175
+ prev_vals = np.zeros(ngroups, dtype=values.dtype)
176
+ output = np.zeros(ngroups, dtype=result_dtype)
177
+
178
+ for i in range(N):
179
+ lab = labels[i]
180
+ val = values[i]
181
+
182
+ if lab < 0:
183
+ continue
184
+
185
+ sum_x = output[lab]
186
+ nobs = nobs_arr[lab]
187
+ compensation_add = comp_arr[lab]
188
+ num_consecutive_same_value = consecutive_counts[lab]
189
+ prev_value = prev_vals[lab]
190
+
191
+ (
192
+ nobs,
193
+ sum_x,
194
+ compensation_add,
195
+ num_consecutive_same_value,
196
+ prev_value,
197
+ ) = add_sum(
198
+ val,
199
+ nobs,
200
+ sum_x,
201
+ compensation_add,
202
+ num_consecutive_same_value,
203
+ prev_value,
204
+ )
205
+
206
+ output[lab] = sum_x
207
+ consecutive_counts[lab] = num_consecutive_same_value
208
+ prev_vals[lab] = prev_value
209
+ comp_arr[lab] = compensation_add
210
+ nobs_arr[lab] = nobs
211
+ return output, nobs_arr, comp_arr, consecutive_counts, prev_vals
212
+
213
+
214
+ @numba.jit(nopython=True, nogil=True, parallel=False)
215
+ def grouped_sum(
216
+ values: np.ndarray,
217
+ result_dtype: np.dtype,
218
+ labels: npt.NDArray[np.intp],
219
+ ngroups: int,
220
+ min_periods: int,
221
+ ) -> tuple[np.ndarray, list[int]]:
222
+ na_pos = []
223
+
224
+ output, nobs_arr, comp_arr, consecutive_counts, prev_vals = grouped_kahan_sum(
225
+ values, result_dtype, labels, ngroups
226
+ )
227
+
228
+ # Post-processing, replace sums that don't satisfy min_periods
229
+ for lab in range(ngroups):
230
+ nobs = nobs_arr[lab]
231
+ num_consecutive_same_value = consecutive_counts[lab]
232
+ prev_value = prev_vals[lab]
233
+ sum_x = output[lab]
234
+ if nobs >= min_periods:
235
+ if num_consecutive_same_value >= nobs:
236
+ result = prev_value * nobs
237
+ else:
238
+ result = sum_x
239
+ else:
240
+ result = sum_x # Don't change val, will be replaced by nan later
241
+ na_pos.append(lab)
242
+ output[lab] = result
243
+
244
+ return output, na_pos
venv/lib/python3.10/site-packages/pandas/core/_numba/kernels/var_.py ADDED
@@ -0,0 +1,245 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ Numba 1D var kernels that can be shared by
3
+ * Dataframe / Series
4
+ * groupby
5
+ * rolling / expanding
6
+
7
+ Mirrors pandas/_libs/window/aggregation.pyx
8
+ """
9
+ from __future__ import annotations
10
+
11
+ from typing import TYPE_CHECKING
12
+
13
+ import numba
14
+ import numpy as np
15
+
16
+ if TYPE_CHECKING:
17
+ from pandas._typing import npt
18
+
19
+ from pandas.core._numba.kernels.shared import is_monotonic_increasing
20
+
21
+
22
+ @numba.jit(nopython=True, nogil=True, parallel=False)
23
+ def add_var(
24
+ val: float,
25
+ nobs: int,
26
+ mean_x: float,
27
+ ssqdm_x: float,
28
+ compensation: float,
29
+ num_consecutive_same_value: int,
30
+ prev_value: float,
31
+ ) -> tuple[int, float, float, float, int, float]:
32
+ if not np.isnan(val):
33
+ if val == prev_value:
34
+ num_consecutive_same_value += 1
35
+ else:
36
+ num_consecutive_same_value = 1
37
+ prev_value = val
38
+
39
+ nobs += 1
40
+ prev_mean = mean_x - compensation
41
+ y = val - compensation
42
+ t = y - mean_x
43
+ compensation = t + mean_x - y
44
+ delta = t
45
+ if nobs:
46
+ mean_x += delta / nobs
47
+ else:
48
+ mean_x = 0
49
+ ssqdm_x += (val - prev_mean) * (val - mean_x)
50
+ return nobs, mean_x, ssqdm_x, compensation, num_consecutive_same_value, prev_value
51
+
52
+
53
+ @numba.jit(nopython=True, nogil=True, parallel=False)
54
+ def remove_var(
55
+ val: float, nobs: int, mean_x: float, ssqdm_x: float, compensation: float
56
+ ) -> tuple[int, float, float, float]:
57
+ if not np.isnan(val):
58
+ nobs -= 1
59
+ if nobs:
60
+ prev_mean = mean_x - compensation
61
+ y = val - compensation
62
+ t = y - mean_x
63
+ compensation = t + mean_x - y
64
+ delta = t
65
+ mean_x -= delta / nobs
66
+ ssqdm_x -= (val - prev_mean) * (val - mean_x)
67
+ else:
68
+ mean_x = 0
69
+ ssqdm_x = 0
70
+ return nobs, mean_x, ssqdm_x, compensation
71
+
72
+
73
+ @numba.jit(nopython=True, nogil=True, parallel=False)
74
+ def sliding_var(
75
+ values: np.ndarray,
76
+ result_dtype: np.dtype,
77
+ start: np.ndarray,
78
+ end: np.ndarray,
79
+ min_periods: int,
80
+ ddof: int = 1,
81
+ ) -> tuple[np.ndarray, list[int]]:
82
+ N = len(start)
83
+ nobs = 0
84
+ mean_x = 0.0
85
+ ssqdm_x = 0.0
86
+ compensation_add = 0.0
87
+ compensation_remove = 0.0
88
+
89
+ min_periods = max(min_periods, 1)
90
+ is_monotonic_increasing_bounds = is_monotonic_increasing(
91
+ start
92
+ ) and is_monotonic_increasing(end)
93
+
94
+ output = np.empty(N, dtype=result_dtype)
95
+
96
+ for i in range(N):
97
+ s = start[i]
98
+ e = end[i]
99
+ if i == 0 or not is_monotonic_increasing_bounds:
100
+ prev_value = values[s]
101
+ num_consecutive_same_value = 0
102
+
103
+ for j in range(s, e):
104
+ val = values[j]
105
+ (
106
+ nobs,
107
+ mean_x,
108
+ ssqdm_x,
109
+ compensation_add,
110
+ num_consecutive_same_value,
111
+ prev_value,
112
+ ) = add_var(
113
+ val,
114
+ nobs,
115
+ mean_x,
116
+ ssqdm_x,
117
+ compensation_add,
118
+ num_consecutive_same_value,
119
+ prev_value,
120
+ )
121
+ else:
122
+ for j in range(start[i - 1], s):
123
+ val = values[j]
124
+ nobs, mean_x, ssqdm_x, compensation_remove = remove_var(
125
+ val, nobs, mean_x, ssqdm_x, compensation_remove
126
+ )
127
+
128
+ for j in range(end[i - 1], e):
129
+ val = values[j]
130
+ (
131
+ nobs,
132
+ mean_x,
133
+ ssqdm_x,
134
+ compensation_add,
135
+ num_consecutive_same_value,
136
+ prev_value,
137
+ ) = add_var(
138
+ val,
139
+ nobs,
140
+ mean_x,
141
+ ssqdm_x,
142
+ compensation_add,
143
+ num_consecutive_same_value,
144
+ prev_value,
145
+ )
146
+
147
+ if nobs >= min_periods and nobs > ddof:
148
+ if nobs == 1 or num_consecutive_same_value >= nobs:
149
+ result = 0.0
150
+ else:
151
+ result = ssqdm_x / (nobs - ddof)
152
+ else:
153
+ result = np.nan
154
+
155
+ output[i] = result
156
+
157
+ if not is_monotonic_increasing_bounds:
158
+ nobs = 0
159
+ mean_x = 0.0
160
+ ssqdm_x = 0.0
161
+ compensation_remove = 0.0
162
+
163
+ # na_position is empty list since float64 can already hold nans
164
+ # Do list comprehension, since numba cannot figure out that na_pos is
165
+ # empty list of ints on its own
166
+ na_pos = [0 for i in range(0)]
167
+ return output, na_pos
168
+
169
+
170
+ @numba.jit(nopython=True, nogil=True, parallel=False)
171
+ def grouped_var(
172
+ values: np.ndarray,
173
+ result_dtype: np.dtype,
174
+ labels: npt.NDArray[np.intp],
175
+ ngroups: int,
176
+ min_periods: int,
177
+ ddof: int = 1,
178
+ ) -> tuple[np.ndarray, list[int]]:
179
+ N = len(labels)
180
+
181
+ nobs_arr = np.zeros(ngroups, dtype=np.int64)
182
+ comp_arr = np.zeros(ngroups, dtype=values.dtype)
183
+ consecutive_counts = np.zeros(ngroups, dtype=np.int64)
184
+ prev_vals = np.zeros(ngroups, dtype=values.dtype)
185
+ output = np.zeros(ngroups, dtype=result_dtype)
186
+ means = np.zeros(ngroups, dtype=result_dtype)
187
+
188
+ for i in range(N):
189
+ lab = labels[i]
190
+ val = values[i]
191
+
192
+ if lab < 0:
193
+ continue
194
+
195
+ mean_x = means[lab]
196
+ ssqdm_x = output[lab]
197
+ nobs = nobs_arr[lab]
198
+ compensation_add = comp_arr[lab]
199
+ num_consecutive_same_value = consecutive_counts[lab]
200
+ prev_value = prev_vals[lab]
201
+
202
+ (
203
+ nobs,
204
+ mean_x,
205
+ ssqdm_x,
206
+ compensation_add,
207
+ num_consecutive_same_value,
208
+ prev_value,
209
+ ) = add_var(
210
+ val,
211
+ nobs,
212
+ mean_x,
213
+ ssqdm_x,
214
+ compensation_add,
215
+ num_consecutive_same_value,
216
+ prev_value,
217
+ )
218
+
219
+ output[lab] = ssqdm_x
220
+ means[lab] = mean_x
221
+ consecutive_counts[lab] = num_consecutive_same_value
222
+ prev_vals[lab] = prev_value
223
+ comp_arr[lab] = compensation_add
224
+ nobs_arr[lab] = nobs
225
+
226
+ # Post-processing, replace vars that don't satisfy min_periods
227
+ for lab in range(ngroups):
228
+ nobs = nobs_arr[lab]
229
+ num_consecutive_same_value = consecutive_counts[lab]
230
+ ssqdm_x = output[lab]
231
+ if nobs >= min_periods and nobs > ddof:
232
+ if nobs == 1 or num_consecutive_same_value >= nobs:
233
+ result = 0.0
234
+ else:
235
+ result = ssqdm_x / (nobs - ddof)
236
+ else:
237
+ result = np.nan
238
+ output[lab] = result
239
+
240
+ # Second pass to get the std.dev
241
+ # na_position is empty list since float64 can already hold nans
242
+ # Do list comprehension, since numba cannot figure out that na_pos is
243
+ # empty list of ints on its own
244
+ na_pos = [0 for i in range(0)]
245
+ return output, na_pos
venv/lib/python3.10/site-packages/pandas/core/dtypes/__init__.py ADDED
File without changes
venv/lib/python3.10/site-packages/pandas/core/dtypes/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (186 Bytes). View file
 
venv/lib/python3.10/site-packages/pandas/core/dtypes/__pycache__/api.cpython-310.pyc ADDED
Binary file (1.28 kB). View file
 
venv/lib/python3.10/site-packages/pandas/core/dtypes/__pycache__/astype.cpython-310.pyc ADDED
Binary file (6.71 kB). View file
 
venv/lib/python3.10/site-packages/pandas/core/dtypes/__pycache__/base.cpython-310.pyc ADDED
Binary file (18.5 kB). View file
 
venv/lib/python3.10/site-packages/pandas/core/dtypes/__pycache__/cast.cpython-310.pyc ADDED
Binary file (39.1 kB). View file
 
venv/lib/python3.10/site-packages/pandas/core/dtypes/__pycache__/common.cpython-310.pyc ADDED
Binary file (42 kB). View file
 
venv/lib/python3.10/site-packages/pandas/core/dtypes/__pycache__/concat.cpython-310.pyc ADDED
Binary file (10.7 kB). View file
 
venv/lib/python3.10/site-packages/pandas/core/dtypes/__pycache__/dtypes.cpython-310.pyc ADDED
Binary file (62.6 kB). View file
 
venv/lib/python3.10/site-packages/pandas/core/dtypes/__pycache__/generic.cpython-310.pyc ADDED
Binary file (3.23 kB). View file
 
venv/lib/python3.10/site-packages/pandas/core/dtypes/__pycache__/inference.cpython-310.pyc ADDED
Binary file (9.55 kB). View file
 
venv/lib/python3.10/site-packages/pandas/core/dtypes/__pycache__/missing.cpython-310.pyc ADDED
Binary file (19.5 kB). View file
 
venv/lib/python3.10/site-packages/pandas/core/dtypes/api.py ADDED
@@ -0,0 +1,85 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from pandas.core.dtypes.common import (
2
+ is_any_real_numeric_dtype,
3
+ is_array_like,
4
+ is_bool,
5
+ is_bool_dtype,
6
+ is_categorical_dtype,
7
+ is_complex,
8
+ is_complex_dtype,
9
+ is_datetime64_any_dtype,
10
+ is_datetime64_dtype,
11
+ is_datetime64_ns_dtype,
12
+ is_datetime64tz_dtype,
13
+ is_dict_like,
14
+ is_dtype_equal,
15
+ is_extension_array_dtype,
16
+ is_file_like,
17
+ is_float,
18
+ is_float_dtype,
19
+ is_hashable,
20
+ is_int64_dtype,
21
+ is_integer,
22
+ is_integer_dtype,
23
+ is_interval,
24
+ is_interval_dtype,
25
+ is_iterator,
26
+ is_list_like,
27
+ is_named_tuple,
28
+ is_number,
29
+ is_numeric_dtype,
30
+ is_object_dtype,
31
+ is_period_dtype,
32
+ is_re,
33
+ is_re_compilable,
34
+ is_scalar,
35
+ is_signed_integer_dtype,
36
+ is_sparse,
37
+ is_string_dtype,
38
+ is_timedelta64_dtype,
39
+ is_timedelta64_ns_dtype,
40
+ is_unsigned_integer_dtype,
41
+ pandas_dtype,
42
+ )
43
+
44
+ __all__ = [
45
+ "is_any_real_numeric_dtype",
46
+ "is_array_like",
47
+ "is_bool",
48
+ "is_bool_dtype",
49
+ "is_categorical_dtype",
50
+ "is_complex",
51
+ "is_complex_dtype",
52
+ "is_datetime64_any_dtype",
53
+ "is_datetime64_dtype",
54
+ "is_datetime64_ns_dtype",
55
+ "is_datetime64tz_dtype",
56
+ "is_dict_like",
57
+ "is_dtype_equal",
58
+ "is_extension_array_dtype",
59
+ "is_file_like",
60
+ "is_float",
61
+ "is_float_dtype",
62
+ "is_hashable",
63
+ "is_int64_dtype",
64
+ "is_integer",
65
+ "is_integer_dtype",
66
+ "is_interval",
67
+ "is_interval_dtype",
68
+ "is_iterator",
69
+ "is_list_like",
70
+ "is_named_tuple",
71
+ "is_number",
72
+ "is_numeric_dtype",
73
+ "is_object_dtype",
74
+ "is_period_dtype",
75
+ "is_re",
76
+ "is_re_compilable",
77
+ "is_scalar",
78
+ "is_signed_integer_dtype",
79
+ "is_sparse",
80
+ "is_string_dtype",
81
+ "is_timedelta64_dtype",
82
+ "is_timedelta64_ns_dtype",
83
+ "is_unsigned_integer_dtype",
84
+ "pandas_dtype",
85
+ ]
venv/lib/python3.10/site-packages/pandas/core/dtypes/astype.py ADDED
@@ -0,0 +1,301 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ Functions for implementing 'astype' methods according to pandas conventions,
3
+ particularly ones that differ from numpy.
4
+ """
5
+ from __future__ import annotations
6
+
7
+ import inspect
8
+ from typing import (
9
+ TYPE_CHECKING,
10
+ overload,
11
+ )
12
+ import warnings
13
+
14
+ import numpy as np
15
+
16
+ from pandas._libs import lib
17
+ from pandas._libs.tslibs.timedeltas import array_to_timedelta64
18
+ from pandas.errors import IntCastingNaNError
19
+
20
+ from pandas.core.dtypes.common import (
21
+ is_object_dtype,
22
+ is_string_dtype,
23
+ pandas_dtype,
24
+ )
25
+ from pandas.core.dtypes.dtypes import (
26
+ ExtensionDtype,
27
+ NumpyEADtype,
28
+ )
29
+
30
+ if TYPE_CHECKING:
31
+ from pandas._typing import (
32
+ ArrayLike,
33
+ DtypeObj,
34
+ IgnoreRaise,
35
+ )
36
+
37
+ from pandas.core.arrays import ExtensionArray
38
+
39
+ _dtype_obj = np.dtype(object)
40
+
41
+
42
+ @overload
43
+ def _astype_nansafe(
44
+ arr: np.ndarray, dtype: np.dtype, copy: bool = ..., skipna: bool = ...
45
+ ) -> np.ndarray:
46
+ ...
47
+
48
+
49
+ @overload
50
+ def _astype_nansafe(
51
+ arr: np.ndarray, dtype: ExtensionDtype, copy: bool = ..., skipna: bool = ...
52
+ ) -> ExtensionArray:
53
+ ...
54
+
55
+
56
+ def _astype_nansafe(
57
+ arr: np.ndarray, dtype: DtypeObj, copy: bool = True, skipna: bool = False
58
+ ) -> ArrayLike:
59
+ """
60
+ Cast the elements of an array to a given dtype a nan-safe manner.
61
+
62
+ Parameters
63
+ ----------
64
+ arr : ndarray
65
+ dtype : np.dtype or ExtensionDtype
66
+ copy : bool, default True
67
+ If False, a view will be attempted but may fail, if
68
+ e.g. the item sizes don't align.
69
+ skipna: bool, default False
70
+ Whether or not we should skip NaN when casting as a string-type.
71
+
72
+ Raises
73
+ ------
74
+ ValueError
75
+ The dtype was a datetime64/timedelta64 dtype, but it had no unit.
76
+ """
77
+
78
+ # dispatch on extension dtype if needed
79
+ if isinstance(dtype, ExtensionDtype):
80
+ return dtype.construct_array_type()._from_sequence(arr, dtype=dtype, copy=copy)
81
+
82
+ elif not isinstance(dtype, np.dtype): # pragma: no cover
83
+ raise ValueError("dtype must be np.dtype or ExtensionDtype")
84
+
85
+ if arr.dtype.kind in "mM":
86
+ from pandas.core.construction import ensure_wrapped_if_datetimelike
87
+
88
+ arr = ensure_wrapped_if_datetimelike(arr)
89
+ res = arr.astype(dtype, copy=copy)
90
+ return np.asarray(res)
91
+
92
+ if issubclass(dtype.type, str):
93
+ shape = arr.shape
94
+ if arr.ndim > 1:
95
+ arr = arr.ravel()
96
+ return lib.ensure_string_array(
97
+ arr, skipna=skipna, convert_na_value=False
98
+ ).reshape(shape)
99
+
100
+ elif np.issubdtype(arr.dtype, np.floating) and dtype.kind in "iu":
101
+ return _astype_float_to_int_nansafe(arr, dtype, copy)
102
+
103
+ elif arr.dtype == object:
104
+ # if we have a datetime/timedelta array of objects
105
+ # then coerce to datetime64[ns] and use DatetimeArray.astype
106
+
107
+ if lib.is_np_dtype(dtype, "M"):
108
+ from pandas.core.arrays import DatetimeArray
109
+
110
+ dta = DatetimeArray._from_sequence(arr, dtype=dtype)
111
+ return dta._ndarray
112
+
113
+ elif lib.is_np_dtype(dtype, "m"):
114
+ from pandas.core.construction import ensure_wrapped_if_datetimelike
115
+
116
+ # bc we know arr.dtype == object, this is equivalent to
117
+ # `np.asarray(to_timedelta(arr))`, but using a lower-level API that
118
+ # does not require a circular import.
119
+ tdvals = array_to_timedelta64(arr).view("m8[ns]")
120
+
121
+ tda = ensure_wrapped_if_datetimelike(tdvals)
122
+ return tda.astype(dtype, copy=False)._ndarray
123
+
124
+ if dtype.name in ("datetime64", "timedelta64"):
125
+ msg = (
126
+ f"The '{dtype.name}' dtype has no unit. Please pass in "
127
+ f"'{dtype.name}[ns]' instead."
128
+ )
129
+ raise ValueError(msg)
130
+
131
+ if copy or arr.dtype == object or dtype == object:
132
+ # Explicit copy, or required since NumPy can't view from / to object.
133
+ return arr.astype(dtype, copy=True)
134
+
135
+ return arr.astype(dtype, copy=copy)
136
+
137
+
138
+ def _astype_float_to_int_nansafe(
139
+ values: np.ndarray, dtype: np.dtype, copy: bool
140
+ ) -> np.ndarray:
141
+ """
142
+ astype with a check preventing converting NaN to an meaningless integer value.
143
+ """
144
+ if not np.isfinite(values).all():
145
+ raise IntCastingNaNError(
146
+ "Cannot convert non-finite values (NA or inf) to integer"
147
+ )
148
+ if dtype.kind == "u":
149
+ # GH#45151
150
+ if not (values >= 0).all():
151
+ raise ValueError(f"Cannot losslessly cast from {values.dtype} to {dtype}")
152
+ with warnings.catch_warnings():
153
+ warnings.filterwarnings("ignore", category=RuntimeWarning)
154
+ return values.astype(dtype, copy=copy)
155
+
156
+
157
+ def astype_array(values: ArrayLike, dtype: DtypeObj, copy: bool = False) -> ArrayLike:
158
+ """
159
+ Cast array (ndarray or ExtensionArray) to the new dtype.
160
+
161
+ Parameters
162
+ ----------
163
+ values : ndarray or ExtensionArray
164
+ dtype : dtype object
165
+ copy : bool, default False
166
+ copy if indicated
167
+
168
+ Returns
169
+ -------
170
+ ndarray or ExtensionArray
171
+ """
172
+ if values.dtype == dtype:
173
+ if copy:
174
+ return values.copy()
175
+ return values
176
+
177
+ if not isinstance(values, np.ndarray):
178
+ # i.e. ExtensionArray
179
+ values = values.astype(dtype, copy=copy)
180
+
181
+ else:
182
+ values = _astype_nansafe(values, dtype, copy=copy)
183
+
184
+ # in pandas we don't store numpy str dtypes, so convert to object
185
+ if isinstance(dtype, np.dtype) and issubclass(values.dtype.type, str):
186
+ values = np.array(values, dtype=object)
187
+
188
+ return values
189
+
190
+
191
+ def astype_array_safe(
192
+ values: ArrayLike, dtype, copy: bool = False, errors: IgnoreRaise = "raise"
193
+ ) -> ArrayLike:
194
+ """
195
+ Cast array (ndarray or ExtensionArray) to the new dtype.
196
+
197
+ This basically is the implementation for DataFrame/Series.astype and
198
+ includes all custom logic for pandas (NaN-safety, converting str to object,
199
+ not allowing )
200
+
201
+ Parameters
202
+ ----------
203
+ values : ndarray or ExtensionArray
204
+ dtype : str, dtype convertible
205
+ copy : bool, default False
206
+ copy if indicated
207
+ errors : str, {'raise', 'ignore'}, default 'raise'
208
+ - ``raise`` : allow exceptions to be raised
209
+ - ``ignore`` : suppress exceptions. On error return original object
210
+
211
+ Returns
212
+ -------
213
+ ndarray or ExtensionArray
214
+ """
215
+ errors_legal_values = ("raise", "ignore")
216
+
217
+ if errors not in errors_legal_values:
218
+ invalid_arg = (
219
+ "Expected value of kwarg 'errors' to be one of "
220
+ f"{list(errors_legal_values)}. Supplied value is '{errors}'"
221
+ )
222
+ raise ValueError(invalid_arg)
223
+
224
+ if inspect.isclass(dtype) and issubclass(dtype, ExtensionDtype):
225
+ msg = (
226
+ f"Expected an instance of {dtype.__name__}, "
227
+ "but got the class instead. Try instantiating 'dtype'."
228
+ )
229
+ raise TypeError(msg)
230
+
231
+ dtype = pandas_dtype(dtype)
232
+ if isinstance(dtype, NumpyEADtype):
233
+ # Ensure we don't end up with a NumpyExtensionArray
234
+ dtype = dtype.numpy_dtype
235
+
236
+ try:
237
+ new_values = astype_array(values, dtype, copy=copy)
238
+ except (ValueError, TypeError):
239
+ # e.g. _astype_nansafe can fail on object-dtype of strings
240
+ # trying to convert to float
241
+ if errors == "ignore":
242
+ new_values = values
243
+ else:
244
+ raise
245
+
246
+ return new_values
247
+
248
+
249
+ def astype_is_view(dtype: DtypeObj, new_dtype: DtypeObj) -> bool:
250
+ """Checks if astype avoided copying the data.
251
+
252
+ Parameters
253
+ ----------
254
+ dtype : Original dtype
255
+ new_dtype : target dtype
256
+
257
+ Returns
258
+ -------
259
+ True if new data is a view or not guaranteed to be a copy, False otherwise
260
+ """
261
+ if isinstance(dtype, np.dtype) and not isinstance(new_dtype, np.dtype):
262
+ new_dtype, dtype = dtype, new_dtype
263
+
264
+ if dtype == new_dtype:
265
+ return True
266
+
267
+ elif isinstance(dtype, np.dtype) and isinstance(new_dtype, np.dtype):
268
+ # Only equal numpy dtypes avoid a copy
269
+ return False
270
+
271
+ elif is_string_dtype(dtype) and is_string_dtype(new_dtype):
272
+ # Potentially! a view when converting from object to string
273
+ return True
274
+
275
+ elif is_object_dtype(dtype) and new_dtype.kind == "O":
276
+ # When the underlying array has dtype object, we don't have to make a copy
277
+ return True
278
+
279
+ elif dtype.kind in "mM" and new_dtype.kind in "mM":
280
+ dtype = getattr(dtype, "numpy_dtype", dtype)
281
+ new_dtype = getattr(new_dtype, "numpy_dtype", new_dtype)
282
+ return getattr(dtype, "unit", None) == getattr(new_dtype, "unit", None)
283
+
284
+ numpy_dtype = getattr(dtype, "numpy_dtype", None)
285
+ new_numpy_dtype = getattr(new_dtype, "numpy_dtype", None)
286
+
287
+ if numpy_dtype is None and isinstance(dtype, np.dtype):
288
+ numpy_dtype = dtype
289
+
290
+ if new_numpy_dtype is None and isinstance(new_dtype, np.dtype):
291
+ new_numpy_dtype = new_dtype
292
+
293
+ if numpy_dtype is not None and new_numpy_dtype is not None:
294
+ # if both have NumPy dtype or one of them is a numpy dtype
295
+ # they are only a view when the numpy dtypes are equal, e.g.
296
+ # int64 -> Int64 or int64[pyarrow]
297
+ # int64 -> Int32 copies
298
+ return numpy_dtype == new_numpy_dtype
299
+
300
+ # Assume this is a view since we don't know for sure if a copy was made
301
+ return True
venv/lib/python3.10/site-packages/pandas/core/dtypes/base.py ADDED
@@ -0,0 +1,583 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ Extend pandas with custom array types.
3
+ """
4
+ from __future__ import annotations
5
+
6
+ from typing import (
7
+ TYPE_CHECKING,
8
+ Any,
9
+ TypeVar,
10
+ cast,
11
+ overload,
12
+ )
13
+
14
+ import numpy as np
15
+
16
+ from pandas._libs import missing as libmissing
17
+ from pandas._libs.hashtable import object_hash
18
+ from pandas._libs.properties import cache_readonly
19
+ from pandas.errors import AbstractMethodError
20
+
21
+ from pandas.core.dtypes.generic import (
22
+ ABCDataFrame,
23
+ ABCIndex,
24
+ ABCSeries,
25
+ )
26
+
27
+ if TYPE_CHECKING:
28
+ from pandas._typing import (
29
+ DtypeObj,
30
+ Self,
31
+ Shape,
32
+ npt,
33
+ type_t,
34
+ )
35
+
36
+ from pandas import Index
37
+ from pandas.core.arrays import ExtensionArray
38
+
39
+ # To parameterize on same ExtensionDtype
40
+ ExtensionDtypeT = TypeVar("ExtensionDtypeT", bound="ExtensionDtype")
41
+
42
+
43
+ class ExtensionDtype:
44
+ """
45
+ A custom data type, to be paired with an ExtensionArray.
46
+
47
+ See Also
48
+ --------
49
+ extensions.register_extension_dtype: Register an ExtensionType
50
+ with pandas as class decorator.
51
+ extensions.ExtensionArray: Abstract base class for custom 1-D array types.
52
+
53
+ Notes
54
+ -----
55
+ The interface includes the following abstract methods that must
56
+ be implemented by subclasses:
57
+
58
+ * type
59
+ * name
60
+ * construct_array_type
61
+
62
+ The following attributes and methods influence the behavior of the dtype in
63
+ pandas operations
64
+
65
+ * _is_numeric
66
+ * _is_boolean
67
+ * _get_common_dtype
68
+
69
+ The `na_value` class attribute can be used to set the default NA value
70
+ for this type. :attr:`numpy.nan` is used by default.
71
+
72
+ ExtensionDtypes are required to be hashable. The base class provides
73
+ a default implementation, which relies on the ``_metadata`` class
74
+ attribute. ``_metadata`` should be a tuple containing the strings
75
+ that define your data type. For example, with ``PeriodDtype`` that's
76
+ the ``freq`` attribute.
77
+
78
+ **If you have a parametrized dtype you should set the ``_metadata``
79
+ class property**.
80
+
81
+ Ideally, the attributes in ``_metadata`` will match the
82
+ parameters to your ``ExtensionDtype.__init__`` (if any). If any of
83
+ the attributes in ``_metadata`` don't implement the standard
84
+ ``__eq__`` or ``__hash__``, the default implementations here will not
85
+ work.
86
+
87
+ Examples
88
+ --------
89
+
90
+ For interaction with Apache Arrow (pyarrow), a ``__from_arrow__`` method
91
+ can be implemented: this method receives a pyarrow Array or ChunkedArray
92
+ as only argument and is expected to return the appropriate pandas
93
+ ExtensionArray for this dtype and the passed values:
94
+
95
+ >>> import pyarrow
96
+ >>> from pandas.api.extensions import ExtensionArray
97
+ >>> class ExtensionDtype:
98
+ ... def __from_arrow__(
99
+ ... self,
100
+ ... array: pyarrow.Array | pyarrow.ChunkedArray
101
+ ... ) -> ExtensionArray:
102
+ ... ...
103
+
104
+ This class does not inherit from 'abc.ABCMeta' for performance reasons.
105
+ Methods and properties required by the interface raise
106
+ ``pandas.errors.AbstractMethodError`` and no ``register`` method is
107
+ provided for registering virtual subclasses.
108
+ """
109
+
110
+ _metadata: tuple[str, ...] = ()
111
+
112
+ def __str__(self) -> str:
113
+ return self.name
114
+
115
+ def __eq__(self, other: object) -> bool:
116
+ """
117
+ Check whether 'other' is equal to self.
118
+
119
+ By default, 'other' is considered equal if either
120
+
121
+ * it's a string matching 'self.name'.
122
+ * it's an instance of this type and all of the attributes
123
+ in ``self._metadata`` are equal between `self` and `other`.
124
+
125
+ Parameters
126
+ ----------
127
+ other : Any
128
+
129
+ Returns
130
+ -------
131
+ bool
132
+ """
133
+ if isinstance(other, str):
134
+ try:
135
+ other = self.construct_from_string(other)
136
+ except TypeError:
137
+ return False
138
+ if isinstance(other, type(self)):
139
+ return all(
140
+ getattr(self, attr) == getattr(other, attr) for attr in self._metadata
141
+ )
142
+ return False
143
+
144
+ def __hash__(self) -> int:
145
+ # for python>=3.10, different nan objects have different hashes
146
+ # we need to avoid that and thus use hash function with old behavior
147
+ return object_hash(tuple(getattr(self, attr) for attr in self._metadata))
148
+
149
+ def __ne__(self, other: object) -> bool:
150
+ return not self.__eq__(other)
151
+
152
+ @property
153
+ def na_value(self) -> object:
154
+ """
155
+ Default NA value to use for this type.
156
+
157
+ This is used in e.g. ExtensionArray.take. This should be the
158
+ user-facing "boxed" version of the NA value, not the physical NA value
159
+ for storage. e.g. for JSONArray, this is an empty dictionary.
160
+ """
161
+ return np.nan
162
+
163
+ @property
164
+ def type(self) -> type_t[Any]:
165
+ """
166
+ The scalar type for the array, e.g. ``int``
167
+
168
+ It's expected ``ExtensionArray[item]`` returns an instance
169
+ of ``ExtensionDtype.type`` for scalar ``item``, assuming
170
+ that value is valid (not NA). NA values do not need to be
171
+ instances of `type`.
172
+ """
173
+ raise AbstractMethodError(self)
174
+
175
+ @property
176
+ def kind(self) -> str:
177
+ """
178
+ A character code (one of 'biufcmMOSUV'), default 'O'
179
+
180
+ This should match the NumPy dtype used when the array is
181
+ converted to an ndarray, which is probably 'O' for object if
182
+ the extension type cannot be represented as a built-in NumPy
183
+ type.
184
+
185
+ See Also
186
+ --------
187
+ numpy.dtype.kind
188
+ """
189
+ return "O"
190
+
191
+ @property
192
+ def name(self) -> str:
193
+ """
194
+ A string identifying the data type.
195
+
196
+ Will be used for display in, e.g. ``Series.dtype``
197
+ """
198
+ raise AbstractMethodError(self)
199
+
200
+ @property
201
+ def names(self) -> list[str] | None:
202
+ """
203
+ Ordered list of field names, or None if there are no fields.
204
+
205
+ This is for compatibility with NumPy arrays, and may be removed in the
206
+ future.
207
+ """
208
+ return None
209
+
210
+ @classmethod
211
+ def construct_array_type(cls) -> type_t[ExtensionArray]:
212
+ """
213
+ Return the array type associated with this dtype.
214
+
215
+ Returns
216
+ -------
217
+ type
218
+ """
219
+ raise AbstractMethodError(cls)
220
+
221
+ def empty(self, shape: Shape) -> ExtensionArray:
222
+ """
223
+ Construct an ExtensionArray of this dtype with the given shape.
224
+
225
+ Analogous to numpy.empty.
226
+
227
+ Parameters
228
+ ----------
229
+ shape : int or tuple[int]
230
+
231
+ Returns
232
+ -------
233
+ ExtensionArray
234
+ """
235
+ cls = self.construct_array_type()
236
+ return cls._empty(shape, dtype=self)
237
+
238
+ @classmethod
239
+ def construct_from_string(cls, string: str) -> Self:
240
+ r"""
241
+ Construct this type from a string.
242
+
243
+ This is useful mainly for data types that accept parameters.
244
+ For example, a period dtype accepts a frequency parameter that
245
+ can be set as ``period[h]`` (where H means hourly frequency).
246
+
247
+ By default, in the abstract class, just the name of the type is
248
+ expected. But subclasses can overwrite this method to accept
249
+ parameters.
250
+
251
+ Parameters
252
+ ----------
253
+ string : str
254
+ The name of the type, for example ``category``.
255
+
256
+ Returns
257
+ -------
258
+ ExtensionDtype
259
+ Instance of the dtype.
260
+
261
+ Raises
262
+ ------
263
+ TypeError
264
+ If a class cannot be constructed from this 'string'.
265
+
266
+ Examples
267
+ --------
268
+ For extension dtypes with arguments the following may be an
269
+ adequate implementation.
270
+
271
+ >>> import re
272
+ >>> @classmethod
273
+ ... def construct_from_string(cls, string):
274
+ ... pattern = re.compile(r"^my_type\[(?P<arg_name>.+)\]$")
275
+ ... match = pattern.match(string)
276
+ ... if match:
277
+ ... return cls(**match.groupdict())
278
+ ... else:
279
+ ... raise TypeError(
280
+ ... f"Cannot construct a '{cls.__name__}' from '{string}'"
281
+ ... )
282
+ """
283
+ if not isinstance(string, str):
284
+ raise TypeError(
285
+ f"'construct_from_string' expects a string, got {type(string)}"
286
+ )
287
+ # error: Non-overlapping equality check (left operand type: "str", right
288
+ # operand type: "Callable[[ExtensionDtype], str]") [comparison-overlap]
289
+ assert isinstance(cls.name, str), (cls, type(cls.name))
290
+ if string != cls.name:
291
+ raise TypeError(f"Cannot construct a '{cls.__name__}' from '{string}'")
292
+ return cls()
293
+
294
+ @classmethod
295
+ def is_dtype(cls, dtype: object) -> bool:
296
+ """
297
+ Check if we match 'dtype'.
298
+
299
+ Parameters
300
+ ----------
301
+ dtype : object
302
+ The object to check.
303
+
304
+ Returns
305
+ -------
306
+ bool
307
+
308
+ Notes
309
+ -----
310
+ The default implementation is True if
311
+
312
+ 1. ``cls.construct_from_string(dtype)`` is an instance
313
+ of ``cls``.
314
+ 2. ``dtype`` is an object and is an instance of ``cls``
315
+ 3. ``dtype`` has a ``dtype`` attribute, and any of the above
316
+ conditions is true for ``dtype.dtype``.
317
+ """
318
+ dtype = getattr(dtype, "dtype", dtype)
319
+
320
+ if isinstance(dtype, (ABCSeries, ABCIndex, ABCDataFrame, np.dtype)):
321
+ # https://github.com/pandas-dev/pandas/issues/22960
322
+ # avoid passing data to `construct_from_string`. This could
323
+ # cause a FutureWarning from numpy about failing elementwise
324
+ # comparison from, e.g., comparing DataFrame == 'category'.
325
+ return False
326
+ elif dtype is None:
327
+ return False
328
+ elif isinstance(dtype, cls):
329
+ return True
330
+ if isinstance(dtype, str):
331
+ try:
332
+ return cls.construct_from_string(dtype) is not None
333
+ except TypeError:
334
+ return False
335
+ return False
336
+
337
+ @property
338
+ def _is_numeric(self) -> bool:
339
+ """
340
+ Whether columns with this dtype should be considered numeric.
341
+
342
+ By default ExtensionDtypes are assumed to be non-numeric.
343
+ They'll be excluded from operations that exclude non-numeric
344
+ columns, like (groupby) reductions, plotting, etc.
345
+ """
346
+ return False
347
+
348
+ @property
349
+ def _is_boolean(self) -> bool:
350
+ """
351
+ Whether this dtype should be considered boolean.
352
+
353
+ By default, ExtensionDtypes are assumed to be non-numeric.
354
+ Setting this to True will affect the behavior of several places,
355
+ e.g.
356
+
357
+ * is_bool
358
+ * boolean indexing
359
+
360
+ Returns
361
+ -------
362
+ bool
363
+ """
364
+ return False
365
+
366
+ def _get_common_dtype(self, dtypes: list[DtypeObj]) -> DtypeObj | None:
367
+ """
368
+ Return the common dtype, if one exists.
369
+
370
+ Used in `find_common_type` implementation. This is for example used
371
+ to determine the resulting dtype in a concat operation.
372
+
373
+ If no common dtype exists, return None (which gives the other dtypes
374
+ the chance to determine a common dtype). If all dtypes in the list
375
+ return None, then the common dtype will be "object" dtype (this means
376
+ it is never needed to return "object" dtype from this method itself).
377
+
378
+ Parameters
379
+ ----------
380
+ dtypes : list of dtypes
381
+ The dtypes for which to determine a common dtype. This is a list
382
+ of np.dtype or ExtensionDtype instances.
383
+
384
+ Returns
385
+ -------
386
+ Common dtype (np.dtype or ExtensionDtype) or None
387
+ """
388
+ if len(set(dtypes)) == 1:
389
+ # only itself
390
+ return self
391
+ else:
392
+ return None
393
+
394
+ @property
395
+ def _can_hold_na(self) -> bool:
396
+ """
397
+ Can arrays of this dtype hold NA values?
398
+ """
399
+ return True
400
+
401
+ @property
402
+ def _is_immutable(self) -> bool:
403
+ """
404
+ Can arrays with this dtype be modified with __setitem__? If not, return
405
+ True.
406
+
407
+ Immutable arrays are expected to raise TypeError on __setitem__ calls.
408
+ """
409
+ return False
410
+
411
+ @cache_readonly
412
+ def index_class(self) -> type_t[Index]:
413
+ """
414
+ The Index subclass to return from Index.__new__ when this dtype is
415
+ encountered.
416
+ """
417
+ from pandas import Index
418
+
419
+ return Index
420
+
421
+ @property
422
+ def _supports_2d(self) -> bool:
423
+ """
424
+ Do ExtensionArrays with this dtype support 2D arrays?
425
+
426
+ Historically ExtensionArrays were limited to 1D. By returning True here,
427
+ authors can indicate that their arrays support 2D instances. This can
428
+ improve performance in some cases, particularly operations with `axis=1`.
429
+
430
+ Arrays that support 2D values should:
431
+
432
+ - implement Array.reshape
433
+ - subclass the Dim2CompatTests in tests.extension.base
434
+ - _concat_same_type should support `axis` keyword
435
+ - _reduce and reductions should support `axis` keyword
436
+ """
437
+ return False
438
+
439
+ @property
440
+ def _can_fast_transpose(self) -> bool:
441
+ """
442
+ Is transposing an array with this dtype zero-copy?
443
+
444
+ Only relevant for cases where _supports_2d is True.
445
+ """
446
+ return False
447
+
448
+
449
+ class StorageExtensionDtype(ExtensionDtype):
450
+ """ExtensionDtype that may be backed by more than one implementation."""
451
+
452
+ name: str
453
+ _metadata = ("storage",)
454
+
455
+ def __init__(self, storage: str | None = None) -> None:
456
+ self.storage = storage
457
+
458
+ def __repr__(self) -> str:
459
+ return f"{self.name}[{self.storage}]"
460
+
461
+ def __str__(self) -> str:
462
+ return self.name
463
+
464
+ def __eq__(self, other: object) -> bool:
465
+ if isinstance(other, str) and other == self.name:
466
+ return True
467
+ return super().__eq__(other)
468
+
469
+ def __hash__(self) -> int:
470
+ # custom __eq__ so have to override __hash__
471
+ return super().__hash__()
472
+
473
+ @property
474
+ def na_value(self) -> libmissing.NAType:
475
+ return libmissing.NA
476
+
477
+
478
+ def register_extension_dtype(cls: type_t[ExtensionDtypeT]) -> type_t[ExtensionDtypeT]:
479
+ """
480
+ Register an ExtensionType with pandas as class decorator.
481
+
482
+ This enables operations like ``.astype(name)`` for the name
483
+ of the ExtensionDtype.
484
+
485
+ Returns
486
+ -------
487
+ callable
488
+ A class decorator.
489
+
490
+ Examples
491
+ --------
492
+ >>> from pandas.api.extensions import register_extension_dtype, ExtensionDtype
493
+ >>> @register_extension_dtype
494
+ ... class MyExtensionDtype(ExtensionDtype):
495
+ ... name = "myextension"
496
+ """
497
+ _registry.register(cls)
498
+ return cls
499
+
500
+
501
+ class Registry:
502
+ """
503
+ Registry for dtype inference.
504
+
505
+ The registry allows one to map a string repr of a extension
506
+ dtype to an extension dtype. The string alias can be used in several
507
+ places, including
508
+
509
+ * Series and Index constructors
510
+ * :meth:`pandas.array`
511
+ * :meth:`pandas.Series.astype`
512
+
513
+ Multiple extension types can be registered.
514
+ These are tried in order.
515
+ """
516
+
517
+ def __init__(self) -> None:
518
+ self.dtypes: list[type_t[ExtensionDtype]] = []
519
+
520
+ def register(self, dtype: type_t[ExtensionDtype]) -> None:
521
+ """
522
+ Parameters
523
+ ----------
524
+ dtype : ExtensionDtype class
525
+ """
526
+ if not issubclass(dtype, ExtensionDtype):
527
+ raise ValueError("can only register pandas extension dtypes")
528
+
529
+ self.dtypes.append(dtype)
530
+
531
+ @overload
532
+ def find(self, dtype: type_t[ExtensionDtypeT]) -> type_t[ExtensionDtypeT]:
533
+ ...
534
+
535
+ @overload
536
+ def find(self, dtype: ExtensionDtypeT) -> ExtensionDtypeT:
537
+ ...
538
+
539
+ @overload
540
+ def find(self, dtype: str) -> ExtensionDtype | None:
541
+ ...
542
+
543
+ @overload
544
+ def find(
545
+ self, dtype: npt.DTypeLike
546
+ ) -> type_t[ExtensionDtype] | ExtensionDtype | None:
547
+ ...
548
+
549
+ def find(
550
+ self, dtype: type_t[ExtensionDtype] | ExtensionDtype | npt.DTypeLike
551
+ ) -> type_t[ExtensionDtype] | ExtensionDtype | None:
552
+ """
553
+ Parameters
554
+ ----------
555
+ dtype : ExtensionDtype class or instance or str or numpy dtype or python type
556
+
557
+ Returns
558
+ -------
559
+ return the first matching dtype, otherwise return None
560
+ """
561
+ if not isinstance(dtype, str):
562
+ dtype_type: type_t
563
+ if not isinstance(dtype, type):
564
+ dtype_type = type(dtype)
565
+ else:
566
+ dtype_type = dtype
567
+ if issubclass(dtype_type, ExtensionDtype):
568
+ # cast needed here as mypy doesn't know we have figured
569
+ # out it is an ExtensionDtype or type_t[ExtensionDtype]
570
+ return cast("ExtensionDtype | type_t[ExtensionDtype]", dtype)
571
+
572
+ return None
573
+
574
+ for dtype_type in self.dtypes:
575
+ try:
576
+ return dtype_type.construct_from_string(dtype)
577
+ except TypeError:
578
+ pass
579
+
580
+ return None
581
+
582
+
583
+ _registry = Registry()
venv/lib/python3.10/site-packages/pandas/core/dtypes/common.py ADDED
@@ -0,0 +1,1748 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ Common type operations.
3
+ """
4
+ from __future__ import annotations
5
+
6
+ from typing import (
7
+ TYPE_CHECKING,
8
+ Any,
9
+ Callable,
10
+ )
11
+ import warnings
12
+
13
+ import numpy as np
14
+
15
+ from pandas._libs import (
16
+ Interval,
17
+ Period,
18
+ algos,
19
+ lib,
20
+ )
21
+ from pandas._libs.tslibs import conversion
22
+ from pandas.util._exceptions import find_stack_level
23
+
24
+ from pandas.core.dtypes.base import _registry as registry
25
+ from pandas.core.dtypes.dtypes import (
26
+ CategoricalDtype,
27
+ DatetimeTZDtype,
28
+ ExtensionDtype,
29
+ IntervalDtype,
30
+ PeriodDtype,
31
+ SparseDtype,
32
+ )
33
+ from pandas.core.dtypes.generic import ABCIndex
34
+ from pandas.core.dtypes.inference import (
35
+ is_array_like,
36
+ is_bool,
37
+ is_complex,
38
+ is_dataclass,
39
+ is_decimal,
40
+ is_dict_like,
41
+ is_file_like,
42
+ is_float,
43
+ is_hashable,
44
+ is_integer,
45
+ is_interval,
46
+ is_iterator,
47
+ is_list_like,
48
+ is_named_tuple,
49
+ is_nested_list_like,
50
+ is_number,
51
+ is_re,
52
+ is_re_compilable,
53
+ is_scalar,
54
+ is_sequence,
55
+ )
56
+
57
+ if TYPE_CHECKING:
58
+ from pandas._typing import (
59
+ ArrayLike,
60
+ DtypeObj,
61
+ )
62
+
63
+ DT64NS_DTYPE = conversion.DT64NS_DTYPE
64
+ TD64NS_DTYPE = conversion.TD64NS_DTYPE
65
+ INT64_DTYPE = np.dtype(np.int64)
66
+
67
+ # oh the troubles to reduce import time
68
+ _is_scipy_sparse = None
69
+
70
+ ensure_float64 = algos.ensure_float64
71
+ ensure_int64 = algos.ensure_int64
72
+ ensure_int32 = algos.ensure_int32
73
+ ensure_int16 = algos.ensure_int16
74
+ ensure_int8 = algos.ensure_int8
75
+ ensure_platform_int = algos.ensure_platform_int
76
+ ensure_object = algos.ensure_object
77
+ ensure_uint64 = algos.ensure_uint64
78
+
79
+
80
+ def ensure_str(value: bytes | Any) -> str:
81
+ """
82
+ Ensure that bytes and non-strings get converted into ``str`` objects.
83
+ """
84
+ if isinstance(value, bytes):
85
+ value = value.decode("utf-8")
86
+ elif not isinstance(value, str):
87
+ value = str(value)
88
+ return value
89
+
90
+
91
+ def ensure_python_int(value: int | np.integer) -> int:
92
+ """
93
+ Ensure that a value is a python int.
94
+
95
+ Parameters
96
+ ----------
97
+ value: int or numpy.integer
98
+
99
+ Returns
100
+ -------
101
+ int
102
+
103
+ Raises
104
+ ------
105
+ TypeError: if the value isn't an int or can't be converted to one.
106
+ """
107
+ if not (is_integer(value) or is_float(value)):
108
+ if not is_scalar(value):
109
+ raise TypeError(
110
+ f"Value needs to be a scalar value, was type {type(value).__name__}"
111
+ )
112
+ raise TypeError(f"Wrong type {type(value)} for value {value}")
113
+ try:
114
+ new_value = int(value)
115
+ assert new_value == value
116
+ except (TypeError, ValueError, AssertionError) as err:
117
+ raise TypeError(f"Wrong type {type(value)} for value {value}") from err
118
+ return new_value
119
+
120
+
121
+ def classes(*klasses) -> Callable:
122
+ """Evaluate if the tipo is a subclass of the klasses."""
123
+ return lambda tipo: issubclass(tipo, klasses)
124
+
125
+
126
+ def _classes_and_not_datetimelike(*klasses) -> Callable:
127
+ """
128
+ Evaluate if the tipo is a subclass of the klasses
129
+ and not a datetimelike.
130
+ """
131
+ return lambda tipo: (
132
+ issubclass(tipo, klasses)
133
+ and not issubclass(tipo, (np.datetime64, np.timedelta64))
134
+ )
135
+
136
+
137
+ def is_object_dtype(arr_or_dtype) -> bool:
138
+ """
139
+ Check whether an array-like or dtype is of the object dtype.
140
+
141
+ Parameters
142
+ ----------
143
+ arr_or_dtype : array-like or dtype
144
+ The array-like or dtype to check.
145
+
146
+ Returns
147
+ -------
148
+ boolean
149
+ Whether or not the array-like or dtype is of the object dtype.
150
+
151
+ Examples
152
+ --------
153
+ >>> from pandas.api.types import is_object_dtype
154
+ >>> is_object_dtype(object)
155
+ True
156
+ >>> is_object_dtype(int)
157
+ False
158
+ >>> is_object_dtype(np.array([], dtype=object))
159
+ True
160
+ >>> is_object_dtype(np.array([], dtype=int))
161
+ False
162
+ >>> is_object_dtype([1, 2, 3])
163
+ False
164
+ """
165
+ return _is_dtype_type(arr_or_dtype, classes(np.object_))
166
+
167
+
168
+ def is_sparse(arr) -> bool:
169
+ """
170
+ Check whether an array-like is a 1-D pandas sparse array.
171
+
172
+ .. deprecated:: 2.1.0
173
+ Use isinstance(dtype, pd.SparseDtype) instead.
174
+
175
+ Check that the one-dimensional array-like is a pandas sparse array.
176
+ Returns True if it is a pandas sparse array, not another type of
177
+ sparse array.
178
+
179
+ Parameters
180
+ ----------
181
+ arr : array-like
182
+ Array-like to check.
183
+
184
+ Returns
185
+ -------
186
+ bool
187
+ Whether or not the array-like is a pandas sparse array.
188
+
189
+ Examples
190
+ --------
191
+ Returns `True` if the parameter is a 1-D pandas sparse array.
192
+
193
+ >>> from pandas.api.types import is_sparse
194
+ >>> is_sparse(pd.arrays.SparseArray([0, 0, 1, 0]))
195
+ True
196
+ >>> is_sparse(pd.Series(pd.arrays.SparseArray([0, 0, 1, 0])))
197
+ True
198
+
199
+ Returns `False` if the parameter is not sparse.
200
+
201
+ >>> is_sparse(np.array([0, 0, 1, 0]))
202
+ False
203
+ >>> is_sparse(pd.Series([0, 1, 0, 0]))
204
+ False
205
+
206
+ Returns `False` if the parameter is not a pandas sparse array.
207
+
208
+ >>> from scipy.sparse import bsr_matrix
209
+ >>> is_sparse(bsr_matrix([0, 1, 0, 0]))
210
+ False
211
+
212
+ Returns `False` if the parameter has more than one dimension.
213
+ """
214
+ warnings.warn(
215
+ "is_sparse is deprecated and will be removed in a future "
216
+ "version. Check `isinstance(dtype, pd.SparseDtype)` instead.",
217
+ DeprecationWarning,
218
+ stacklevel=2,
219
+ )
220
+
221
+ dtype = getattr(arr, "dtype", arr)
222
+ return isinstance(dtype, SparseDtype)
223
+
224
+
225
+ def is_scipy_sparse(arr) -> bool:
226
+ """
227
+ Check whether an array-like is a scipy.sparse.spmatrix instance.
228
+
229
+ Parameters
230
+ ----------
231
+ arr : array-like
232
+ The array-like to check.
233
+
234
+ Returns
235
+ -------
236
+ boolean
237
+ Whether or not the array-like is a scipy.sparse.spmatrix instance.
238
+
239
+ Notes
240
+ -----
241
+ If scipy is not installed, this function will always return False.
242
+
243
+ Examples
244
+ --------
245
+ >>> from scipy.sparse import bsr_matrix
246
+ >>> is_scipy_sparse(bsr_matrix([1, 2, 3]))
247
+ True
248
+ >>> is_scipy_sparse(pd.arrays.SparseArray([1, 2, 3]))
249
+ False
250
+ """
251
+ global _is_scipy_sparse
252
+
253
+ if _is_scipy_sparse is None: # pylint: disable=used-before-assignment
254
+ try:
255
+ from scipy.sparse import issparse as _is_scipy_sparse
256
+ except ImportError:
257
+ _is_scipy_sparse = lambda _: False
258
+
259
+ assert _is_scipy_sparse is not None
260
+ return _is_scipy_sparse(arr)
261
+
262
+
263
+ def is_datetime64_dtype(arr_or_dtype) -> bool:
264
+ """
265
+ Check whether an array-like or dtype is of the datetime64 dtype.
266
+
267
+ Parameters
268
+ ----------
269
+ arr_or_dtype : array-like or dtype
270
+ The array-like or dtype to check.
271
+
272
+ Returns
273
+ -------
274
+ boolean
275
+ Whether or not the array-like or dtype is of the datetime64 dtype.
276
+
277
+ Examples
278
+ --------
279
+ >>> from pandas.api.types import is_datetime64_dtype
280
+ >>> is_datetime64_dtype(object)
281
+ False
282
+ >>> is_datetime64_dtype(np.datetime64)
283
+ True
284
+ >>> is_datetime64_dtype(np.array([], dtype=int))
285
+ False
286
+ >>> is_datetime64_dtype(np.array([], dtype=np.datetime64))
287
+ True
288
+ >>> is_datetime64_dtype([1, 2, 3])
289
+ False
290
+ """
291
+ if isinstance(arr_or_dtype, np.dtype):
292
+ # GH#33400 fastpath for dtype object
293
+ return arr_or_dtype.kind == "M"
294
+ return _is_dtype_type(arr_or_dtype, classes(np.datetime64))
295
+
296
+
297
+ def is_datetime64tz_dtype(arr_or_dtype) -> bool:
298
+ """
299
+ Check whether an array-like or dtype is of a DatetimeTZDtype dtype.
300
+
301
+ .. deprecated:: 2.1.0
302
+ Use isinstance(dtype, pd.DatetimeTZDtype) instead.
303
+
304
+ Parameters
305
+ ----------
306
+ arr_or_dtype : array-like or dtype
307
+ The array-like or dtype to check.
308
+
309
+ Returns
310
+ -------
311
+ boolean
312
+ Whether or not the array-like or dtype is of a DatetimeTZDtype dtype.
313
+
314
+ Examples
315
+ --------
316
+ >>> from pandas.api.types import is_datetime64tz_dtype
317
+ >>> is_datetime64tz_dtype(object)
318
+ False
319
+ >>> is_datetime64tz_dtype([1, 2, 3])
320
+ False
321
+ >>> is_datetime64tz_dtype(pd.DatetimeIndex([1, 2, 3])) # tz-naive
322
+ False
323
+ >>> is_datetime64tz_dtype(pd.DatetimeIndex([1, 2, 3], tz="US/Eastern"))
324
+ True
325
+
326
+ >>> from pandas.core.dtypes.dtypes import DatetimeTZDtype
327
+ >>> dtype = DatetimeTZDtype("ns", tz="US/Eastern")
328
+ >>> s = pd.Series([], dtype=dtype)
329
+ >>> is_datetime64tz_dtype(dtype)
330
+ True
331
+ >>> is_datetime64tz_dtype(s)
332
+ True
333
+ """
334
+ # GH#52607
335
+ warnings.warn(
336
+ "is_datetime64tz_dtype is deprecated and will be removed in a future "
337
+ "version. Check `isinstance(dtype, pd.DatetimeTZDtype)` instead.",
338
+ DeprecationWarning,
339
+ stacklevel=2,
340
+ )
341
+ if isinstance(arr_or_dtype, DatetimeTZDtype):
342
+ # GH#33400 fastpath for dtype object
343
+ # GH 34986
344
+ return True
345
+
346
+ if arr_or_dtype is None:
347
+ return False
348
+ return DatetimeTZDtype.is_dtype(arr_or_dtype)
349
+
350
+
351
+ def is_timedelta64_dtype(arr_or_dtype) -> bool:
352
+ """
353
+ Check whether an array-like or dtype is of the timedelta64 dtype.
354
+
355
+ Parameters
356
+ ----------
357
+ arr_or_dtype : array-like or dtype
358
+ The array-like or dtype to check.
359
+
360
+ Returns
361
+ -------
362
+ boolean
363
+ Whether or not the array-like or dtype is of the timedelta64 dtype.
364
+
365
+ Examples
366
+ --------
367
+ >>> from pandas.core.dtypes.common import is_timedelta64_dtype
368
+ >>> is_timedelta64_dtype(object)
369
+ False
370
+ >>> is_timedelta64_dtype(np.timedelta64)
371
+ True
372
+ >>> is_timedelta64_dtype([1, 2, 3])
373
+ False
374
+ >>> is_timedelta64_dtype(pd.Series([], dtype="timedelta64[ns]"))
375
+ True
376
+ >>> is_timedelta64_dtype('0 days')
377
+ False
378
+ """
379
+ if isinstance(arr_or_dtype, np.dtype):
380
+ # GH#33400 fastpath for dtype object
381
+ return arr_or_dtype.kind == "m"
382
+
383
+ return _is_dtype_type(arr_or_dtype, classes(np.timedelta64))
384
+
385
+
386
+ def is_period_dtype(arr_or_dtype) -> bool:
387
+ """
388
+ Check whether an array-like or dtype is of the Period dtype.
389
+
390
+ .. deprecated:: 2.2.0
391
+ Use isinstance(dtype, pd.Period) instead.
392
+
393
+ Parameters
394
+ ----------
395
+ arr_or_dtype : array-like or dtype
396
+ The array-like or dtype to check.
397
+
398
+ Returns
399
+ -------
400
+ boolean
401
+ Whether or not the array-like or dtype is of the Period dtype.
402
+
403
+ Examples
404
+ --------
405
+ >>> from pandas.core.dtypes.common import is_period_dtype
406
+ >>> is_period_dtype(object)
407
+ False
408
+ >>> is_period_dtype(pd.PeriodDtype(freq="D"))
409
+ True
410
+ >>> is_period_dtype([1, 2, 3])
411
+ False
412
+ >>> is_period_dtype(pd.Period("2017-01-01"))
413
+ False
414
+ >>> is_period_dtype(pd.PeriodIndex([], freq="Y"))
415
+ True
416
+ """
417
+ warnings.warn(
418
+ "is_period_dtype is deprecated and will be removed in a future version. "
419
+ "Use `isinstance(dtype, pd.PeriodDtype)` instead",
420
+ DeprecationWarning,
421
+ stacklevel=2,
422
+ )
423
+ if isinstance(arr_or_dtype, ExtensionDtype):
424
+ # GH#33400 fastpath for dtype object
425
+ return arr_or_dtype.type is Period
426
+
427
+ if arr_or_dtype is None:
428
+ return False
429
+ return PeriodDtype.is_dtype(arr_or_dtype)
430
+
431
+
432
+ def is_interval_dtype(arr_or_dtype) -> bool:
433
+ """
434
+ Check whether an array-like or dtype is of the Interval dtype.
435
+
436
+ .. deprecated:: 2.2.0
437
+ Use isinstance(dtype, pd.IntervalDtype) instead.
438
+
439
+ Parameters
440
+ ----------
441
+ arr_or_dtype : array-like or dtype
442
+ The array-like or dtype to check.
443
+
444
+ Returns
445
+ -------
446
+ boolean
447
+ Whether or not the array-like or dtype is of the Interval dtype.
448
+
449
+ Examples
450
+ --------
451
+ >>> from pandas.core.dtypes.common import is_interval_dtype
452
+ >>> is_interval_dtype(object)
453
+ False
454
+ >>> is_interval_dtype(pd.IntervalDtype())
455
+ True
456
+ >>> is_interval_dtype([1, 2, 3])
457
+ False
458
+ >>>
459
+ >>> interval = pd.Interval(1, 2, closed="right")
460
+ >>> is_interval_dtype(interval)
461
+ False
462
+ >>> is_interval_dtype(pd.IntervalIndex([interval]))
463
+ True
464
+ """
465
+ # GH#52607
466
+ warnings.warn(
467
+ "is_interval_dtype is deprecated and will be removed in a future version. "
468
+ "Use `isinstance(dtype, pd.IntervalDtype)` instead",
469
+ DeprecationWarning,
470
+ stacklevel=2,
471
+ )
472
+ if isinstance(arr_or_dtype, ExtensionDtype):
473
+ # GH#33400 fastpath for dtype object
474
+ return arr_or_dtype.type is Interval
475
+
476
+ if arr_or_dtype is None:
477
+ return False
478
+ return IntervalDtype.is_dtype(arr_or_dtype)
479
+
480
+
481
+ def is_categorical_dtype(arr_or_dtype) -> bool:
482
+ """
483
+ Check whether an array-like or dtype is of the Categorical dtype.
484
+
485
+ .. deprecated:: 2.2.0
486
+ Use isinstance(dtype, pd.CategoricalDtype) instead.
487
+
488
+ Parameters
489
+ ----------
490
+ arr_or_dtype : array-like or dtype
491
+ The array-like or dtype to check.
492
+
493
+ Returns
494
+ -------
495
+ boolean
496
+ Whether or not the array-like or dtype is of the Categorical dtype.
497
+
498
+ Examples
499
+ --------
500
+ >>> from pandas.api.types import is_categorical_dtype
501
+ >>> from pandas import CategoricalDtype
502
+ >>> is_categorical_dtype(object)
503
+ False
504
+ >>> is_categorical_dtype(CategoricalDtype())
505
+ True
506
+ >>> is_categorical_dtype([1, 2, 3])
507
+ False
508
+ >>> is_categorical_dtype(pd.Categorical([1, 2, 3]))
509
+ True
510
+ >>> is_categorical_dtype(pd.CategoricalIndex([1, 2, 3]))
511
+ True
512
+ """
513
+ # GH#52527
514
+ warnings.warn(
515
+ "is_categorical_dtype is deprecated and will be removed in a future "
516
+ "version. Use isinstance(dtype, pd.CategoricalDtype) instead",
517
+ DeprecationWarning,
518
+ stacklevel=2,
519
+ )
520
+ if isinstance(arr_or_dtype, ExtensionDtype):
521
+ # GH#33400 fastpath for dtype object
522
+ return arr_or_dtype.name == "category"
523
+
524
+ if arr_or_dtype is None:
525
+ return False
526
+ return CategoricalDtype.is_dtype(arr_or_dtype)
527
+
528
+
529
+ def is_string_or_object_np_dtype(dtype: np.dtype) -> bool:
530
+ """
531
+ Faster alternative to is_string_dtype, assumes we have a np.dtype object.
532
+ """
533
+ return dtype == object or dtype.kind in "SU"
534
+
535
+
536
+ def is_string_dtype(arr_or_dtype) -> bool:
537
+ """
538
+ Check whether the provided array or dtype is of the string dtype.
539
+
540
+ If an array is passed with an object dtype, the elements must be
541
+ inferred as strings.
542
+
543
+ Parameters
544
+ ----------
545
+ arr_or_dtype : array-like or dtype
546
+ The array or dtype to check.
547
+
548
+ Returns
549
+ -------
550
+ boolean
551
+ Whether or not the array or dtype is of the string dtype.
552
+
553
+ Examples
554
+ --------
555
+ >>> from pandas.api.types import is_string_dtype
556
+ >>> is_string_dtype(str)
557
+ True
558
+ >>> is_string_dtype(object)
559
+ True
560
+ >>> is_string_dtype(int)
561
+ False
562
+ >>> is_string_dtype(np.array(['a', 'b']))
563
+ True
564
+ >>> is_string_dtype(pd.Series([1, 2]))
565
+ False
566
+ >>> is_string_dtype(pd.Series([1, 2], dtype=object))
567
+ False
568
+ """
569
+ if hasattr(arr_or_dtype, "dtype") and _get_dtype(arr_or_dtype).kind == "O":
570
+ return is_all_strings(arr_or_dtype)
571
+
572
+ def condition(dtype) -> bool:
573
+ if is_string_or_object_np_dtype(dtype):
574
+ return True
575
+ try:
576
+ return dtype == "string"
577
+ except TypeError:
578
+ return False
579
+
580
+ return _is_dtype(arr_or_dtype, condition)
581
+
582
+
583
+ def is_dtype_equal(source, target) -> bool:
584
+ """
585
+ Check if two dtypes are equal.
586
+
587
+ Parameters
588
+ ----------
589
+ source : The first dtype to compare
590
+ target : The second dtype to compare
591
+
592
+ Returns
593
+ -------
594
+ boolean
595
+ Whether or not the two dtypes are equal.
596
+
597
+ Examples
598
+ --------
599
+ >>> is_dtype_equal(int, float)
600
+ False
601
+ >>> is_dtype_equal("int", int)
602
+ True
603
+ >>> is_dtype_equal(object, "category")
604
+ False
605
+ >>> is_dtype_equal(CategoricalDtype(), "category")
606
+ True
607
+ >>> is_dtype_equal(DatetimeTZDtype(tz="UTC"), "datetime64")
608
+ False
609
+ """
610
+ if isinstance(target, str):
611
+ if not isinstance(source, str):
612
+ # GH#38516 ensure we get the same behavior from
613
+ # is_dtype_equal(CDT, "category") and CDT == "category"
614
+ try:
615
+ src = _get_dtype(source)
616
+ if isinstance(src, ExtensionDtype):
617
+ return src == target
618
+ except (TypeError, AttributeError, ImportError):
619
+ return False
620
+ elif isinstance(source, str):
621
+ return is_dtype_equal(target, source)
622
+
623
+ try:
624
+ source = _get_dtype(source)
625
+ target = _get_dtype(target)
626
+ return source == target
627
+ except (TypeError, AttributeError, ImportError):
628
+ # invalid comparison
629
+ # object == category will hit this
630
+ return False
631
+
632
+
633
+ def is_integer_dtype(arr_or_dtype) -> bool:
634
+ """
635
+ Check whether the provided array or dtype is of an integer dtype.
636
+
637
+ Unlike in `is_any_int_dtype`, timedelta64 instances will return False.
638
+
639
+ The nullable Integer dtypes (e.g. pandas.Int64Dtype) are also considered
640
+ as integer by this function.
641
+
642
+ Parameters
643
+ ----------
644
+ arr_or_dtype : array-like or dtype
645
+ The array or dtype to check.
646
+
647
+ Returns
648
+ -------
649
+ boolean
650
+ Whether or not the array or dtype is of an integer dtype and
651
+ not an instance of timedelta64.
652
+
653
+ Examples
654
+ --------
655
+ >>> from pandas.api.types import is_integer_dtype
656
+ >>> is_integer_dtype(str)
657
+ False
658
+ >>> is_integer_dtype(int)
659
+ True
660
+ >>> is_integer_dtype(float)
661
+ False
662
+ >>> is_integer_dtype(np.uint64)
663
+ True
664
+ >>> is_integer_dtype('int8')
665
+ True
666
+ >>> is_integer_dtype('Int8')
667
+ True
668
+ >>> is_integer_dtype(pd.Int8Dtype)
669
+ True
670
+ >>> is_integer_dtype(np.datetime64)
671
+ False
672
+ >>> is_integer_dtype(np.timedelta64)
673
+ False
674
+ >>> is_integer_dtype(np.array(['a', 'b']))
675
+ False
676
+ >>> is_integer_dtype(pd.Series([1, 2]))
677
+ True
678
+ >>> is_integer_dtype(np.array([], dtype=np.timedelta64))
679
+ False
680
+ >>> is_integer_dtype(pd.Index([1, 2.])) # float
681
+ False
682
+ """
683
+ return _is_dtype_type(
684
+ arr_or_dtype, _classes_and_not_datetimelike(np.integer)
685
+ ) or _is_dtype(
686
+ arr_or_dtype, lambda typ: isinstance(typ, ExtensionDtype) and typ.kind in "iu"
687
+ )
688
+
689
+
690
+ def is_signed_integer_dtype(arr_or_dtype) -> bool:
691
+ """
692
+ Check whether the provided array or dtype is of a signed integer dtype.
693
+
694
+ Unlike in `is_any_int_dtype`, timedelta64 instances will return False.
695
+
696
+ The nullable Integer dtypes (e.g. pandas.Int64Dtype) are also considered
697
+ as integer by this function.
698
+
699
+ Parameters
700
+ ----------
701
+ arr_or_dtype : array-like or dtype
702
+ The array or dtype to check.
703
+
704
+ Returns
705
+ -------
706
+ boolean
707
+ Whether or not the array or dtype is of a signed integer dtype
708
+ and not an instance of timedelta64.
709
+
710
+ Examples
711
+ --------
712
+ >>> from pandas.core.dtypes.common import is_signed_integer_dtype
713
+ >>> is_signed_integer_dtype(str)
714
+ False
715
+ >>> is_signed_integer_dtype(int)
716
+ True
717
+ >>> is_signed_integer_dtype(float)
718
+ False
719
+ >>> is_signed_integer_dtype(np.uint64) # unsigned
720
+ False
721
+ >>> is_signed_integer_dtype('int8')
722
+ True
723
+ >>> is_signed_integer_dtype('Int8')
724
+ True
725
+ >>> is_signed_integer_dtype(pd.Int8Dtype)
726
+ True
727
+ >>> is_signed_integer_dtype(np.datetime64)
728
+ False
729
+ >>> is_signed_integer_dtype(np.timedelta64)
730
+ False
731
+ >>> is_signed_integer_dtype(np.array(['a', 'b']))
732
+ False
733
+ >>> is_signed_integer_dtype(pd.Series([1, 2]))
734
+ True
735
+ >>> is_signed_integer_dtype(np.array([], dtype=np.timedelta64))
736
+ False
737
+ >>> is_signed_integer_dtype(pd.Index([1, 2.])) # float
738
+ False
739
+ >>> is_signed_integer_dtype(np.array([1, 2], dtype=np.uint32)) # unsigned
740
+ False
741
+ """
742
+ return _is_dtype_type(
743
+ arr_or_dtype, _classes_and_not_datetimelike(np.signedinteger)
744
+ ) or _is_dtype(
745
+ arr_or_dtype, lambda typ: isinstance(typ, ExtensionDtype) and typ.kind == "i"
746
+ )
747
+
748
+
749
+ def is_unsigned_integer_dtype(arr_or_dtype) -> bool:
750
+ """
751
+ Check whether the provided array or dtype is of an unsigned integer dtype.
752
+
753
+ The nullable Integer dtypes (e.g. pandas.UInt64Dtype) are also
754
+ considered as integer by this function.
755
+
756
+ Parameters
757
+ ----------
758
+ arr_or_dtype : array-like or dtype
759
+ The array or dtype to check.
760
+
761
+ Returns
762
+ -------
763
+ boolean
764
+ Whether or not the array or dtype is of an unsigned integer dtype.
765
+
766
+ Examples
767
+ --------
768
+ >>> from pandas.api.types import is_unsigned_integer_dtype
769
+ >>> is_unsigned_integer_dtype(str)
770
+ False
771
+ >>> is_unsigned_integer_dtype(int) # signed
772
+ False
773
+ >>> is_unsigned_integer_dtype(float)
774
+ False
775
+ >>> is_unsigned_integer_dtype(np.uint64)
776
+ True
777
+ >>> is_unsigned_integer_dtype('uint8')
778
+ True
779
+ >>> is_unsigned_integer_dtype('UInt8')
780
+ True
781
+ >>> is_unsigned_integer_dtype(pd.UInt8Dtype)
782
+ True
783
+ >>> is_unsigned_integer_dtype(np.array(['a', 'b']))
784
+ False
785
+ >>> is_unsigned_integer_dtype(pd.Series([1, 2])) # signed
786
+ False
787
+ >>> is_unsigned_integer_dtype(pd.Index([1, 2.])) # float
788
+ False
789
+ >>> is_unsigned_integer_dtype(np.array([1, 2], dtype=np.uint32))
790
+ True
791
+ """
792
+ return _is_dtype_type(
793
+ arr_or_dtype, _classes_and_not_datetimelike(np.unsignedinteger)
794
+ ) or _is_dtype(
795
+ arr_or_dtype, lambda typ: isinstance(typ, ExtensionDtype) and typ.kind == "u"
796
+ )
797
+
798
+
799
+ def is_int64_dtype(arr_or_dtype) -> bool:
800
+ """
801
+ Check whether the provided array or dtype is of the int64 dtype.
802
+
803
+ .. deprecated:: 2.1.0
804
+
805
+ is_int64_dtype is deprecated and will be removed in a future
806
+ version. Use dtype == np.int64 instead.
807
+
808
+ Parameters
809
+ ----------
810
+ arr_or_dtype : array-like or dtype
811
+ The array or dtype to check.
812
+
813
+ Returns
814
+ -------
815
+ boolean
816
+ Whether or not the array or dtype is of the int64 dtype.
817
+
818
+ Notes
819
+ -----
820
+ Depending on system architecture, the return value of `is_int64_dtype(
821
+ int)` will be True if the OS uses 64-bit integers and False if the OS
822
+ uses 32-bit integers.
823
+
824
+ Examples
825
+ --------
826
+ >>> from pandas.api.types import is_int64_dtype
827
+ >>> is_int64_dtype(str) # doctest: +SKIP
828
+ False
829
+ >>> is_int64_dtype(np.int32) # doctest: +SKIP
830
+ False
831
+ >>> is_int64_dtype(np.int64) # doctest: +SKIP
832
+ True
833
+ >>> is_int64_dtype('int8') # doctest: +SKIP
834
+ False
835
+ >>> is_int64_dtype('Int8') # doctest: +SKIP
836
+ False
837
+ >>> is_int64_dtype(pd.Int64Dtype) # doctest: +SKIP
838
+ True
839
+ >>> is_int64_dtype(float) # doctest: +SKIP
840
+ False
841
+ >>> is_int64_dtype(np.uint64) # unsigned # doctest: +SKIP
842
+ False
843
+ >>> is_int64_dtype(np.array(['a', 'b'])) # doctest: +SKIP
844
+ False
845
+ >>> is_int64_dtype(np.array([1, 2], dtype=np.int64)) # doctest: +SKIP
846
+ True
847
+ >>> is_int64_dtype(pd.Index([1, 2.])) # float # doctest: +SKIP
848
+ False
849
+ >>> is_int64_dtype(np.array([1, 2], dtype=np.uint32)) # unsigned # doctest: +SKIP
850
+ False
851
+ """
852
+ # GH#52564
853
+ warnings.warn(
854
+ "is_int64_dtype is deprecated and will be removed in a future "
855
+ "version. Use dtype == np.int64 instead.",
856
+ DeprecationWarning,
857
+ stacklevel=2,
858
+ )
859
+ return _is_dtype_type(arr_or_dtype, classes(np.int64))
860
+
861
+
862
+ def is_datetime64_any_dtype(arr_or_dtype) -> bool:
863
+ """
864
+ Check whether the provided array or dtype is of the datetime64 dtype.
865
+
866
+ Parameters
867
+ ----------
868
+ arr_or_dtype : array-like or dtype
869
+ The array or dtype to check.
870
+
871
+ Returns
872
+ -------
873
+ bool
874
+ Whether or not the array or dtype is of the datetime64 dtype.
875
+
876
+ Examples
877
+ --------
878
+ >>> from pandas.api.types import is_datetime64_any_dtype
879
+ >>> from pandas.core.dtypes.dtypes import DatetimeTZDtype
880
+ >>> is_datetime64_any_dtype(str)
881
+ False
882
+ >>> is_datetime64_any_dtype(int)
883
+ False
884
+ >>> is_datetime64_any_dtype(np.datetime64) # can be tz-naive
885
+ True
886
+ >>> is_datetime64_any_dtype(DatetimeTZDtype("ns", "US/Eastern"))
887
+ True
888
+ >>> is_datetime64_any_dtype(np.array(['a', 'b']))
889
+ False
890
+ >>> is_datetime64_any_dtype(np.array([1, 2]))
891
+ False
892
+ >>> is_datetime64_any_dtype(np.array([], dtype="datetime64[ns]"))
893
+ True
894
+ >>> is_datetime64_any_dtype(pd.DatetimeIndex([1, 2, 3], dtype="datetime64[ns]"))
895
+ True
896
+ """
897
+ if isinstance(arr_or_dtype, (np.dtype, ExtensionDtype)):
898
+ # GH#33400 fastpath for dtype object
899
+ return arr_or_dtype.kind == "M"
900
+
901
+ if arr_or_dtype is None:
902
+ return False
903
+
904
+ try:
905
+ tipo = _get_dtype(arr_or_dtype)
906
+ except TypeError:
907
+ return False
908
+ return lib.is_np_dtype(tipo, "M") or isinstance(tipo, DatetimeTZDtype)
909
+
910
+
911
+ def is_datetime64_ns_dtype(arr_or_dtype) -> bool:
912
+ """
913
+ Check whether the provided array or dtype is of the datetime64[ns] dtype.
914
+
915
+ Parameters
916
+ ----------
917
+ arr_or_dtype : array-like or dtype
918
+ The array or dtype to check.
919
+
920
+ Returns
921
+ -------
922
+ bool
923
+ Whether or not the array or dtype is of the datetime64[ns] dtype.
924
+
925
+ Examples
926
+ --------
927
+ >>> from pandas.api.types import is_datetime64_ns_dtype
928
+ >>> from pandas.core.dtypes.dtypes import DatetimeTZDtype
929
+ >>> is_datetime64_ns_dtype(str)
930
+ False
931
+ >>> is_datetime64_ns_dtype(int)
932
+ False
933
+ >>> is_datetime64_ns_dtype(np.datetime64) # no unit
934
+ False
935
+ >>> is_datetime64_ns_dtype(DatetimeTZDtype("ns", "US/Eastern"))
936
+ True
937
+ >>> is_datetime64_ns_dtype(np.array(['a', 'b']))
938
+ False
939
+ >>> is_datetime64_ns_dtype(np.array([1, 2]))
940
+ False
941
+ >>> is_datetime64_ns_dtype(np.array([], dtype="datetime64")) # no unit
942
+ False
943
+ >>> is_datetime64_ns_dtype(np.array([], dtype="datetime64[ps]")) # wrong unit
944
+ False
945
+ >>> is_datetime64_ns_dtype(pd.DatetimeIndex([1, 2, 3], dtype="datetime64[ns]"))
946
+ True
947
+ """
948
+ if arr_or_dtype is None:
949
+ return False
950
+ try:
951
+ tipo = _get_dtype(arr_or_dtype)
952
+ except TypeError:
953
+ return False
954
+ return tipo == DT64NS_DTYPE or (
955
+ isinstance(tipo, DatetimeTZDtype) and tipo.unit == "ns"
956
+ )
957
+
958
+
959
+ def is_timedelta64_ns_dtype(arr_or_dtype) -> bool:
960
+ """
961
+ Check whether the provided array or dtype is of the timedelta64[ns] dtype.
962
+
963
+ This is a very specific dtype, so generic ones like `np.timedelta64`
964
+ will return False if passed into this function.
965
+
966
+ Parameters
967
+ ----------
968
+ arr_or_dtype : array-like or dtype
969
+ The array or dtype to check.
970
+
971
+ Returns
972
+ -------
973
+ boolean
974
+ Whether or not the array or dtype is of the timedelta64[ns] dtype.
975
+
976
+ Examples
977
+ --------
978
+ >>> from pandas.core.dtypes.common import is_timedelta64_ns_dtype
979
+ >>> is_timedelta64_ns_dtype(np.dtype('m8[ns]'))
980
+ True
981
+ >>> is_timedelta64_ns_dtype(np.dtype('m8[ps]')) # Wrong frequency
982
+ False
983
+ >>> is_timedelta64_ns_dtype(np.array([1, 2], dtype='m8[ns]'))
984
+ True
985
+ >>> is_timedelta64_ns_dtype(np.array([1, 2], dtype=np.timedelta64))
986
+ False
987
+ """
988
+ return _is_dtype(arr_or_dtype, lambda dtype: dtype == TD64NS_DTYPE)
989
+
990
+
991
+ # This exists to silence numpy deprecation warnings, see GH#29553
992
+ def is_numeric_v_string_like(a: ArrayLike, b) -> bool:
993
+ """
994
+ Check if we are comparing a string-like object to a numeric ndarray.
995
+ NumPy doesn't like to compare such objects, especially numeric arrays
996
+ and scalar string-likes.
997
+
998
+ Parameters
999
+ ----------
1000
+ a : array-like, scalar
1001
+ The first object to check.
1002
+ b : array-like, scalar
1003
+ The second object to check.
1004
+
1005
+ Returns
1006
+ -------
1007
+ boolean
1008
+ Whether we return a comparing a string-like object to a numeric array.
1009
+
1010
+ Examples
1011
+ --------
1012
+ >>> is_numeric_v_string_like(np.array([1]), "foo")
1013
+ True
1014
+ >>> is_numeric_v_string_like(np.array([1, 2]), np.array(["foo"]))
1015
+ True
1016
+ >>> is_numeric_v_string_like(np.array(["foo"]), np.array([1, 2]))
1017
+ True
1018
+ >>> is_numeric_v_string_like(np.array([1]), np.array([2]))
1019
+ False
1020
+ >>> is_numeric_v_string_like(np.array(["foo"]), np.array(["foo"]))
1021
+ False
1022
+ """
1023
+ is_a_array = isinstance(a, np.ndarray)
1024
+ is_b_array = isinstance(b, np.ndarray)
1025
+
1026
+ is_a_numeric_array = is_a_array and a.dtype.kind in ("u", "i", "f", "c", "b")
1027
+ is_b_numeric_array = is_b_array and b.dtype.kind in ("u", "i", "f", "c", "b")
1028
+ is_a_string_array = is_a_array and a.dtype.kind in ("S", "U")
1029
+ is_b_string_array = is_b_array and b.dtype.kind in ("S", "U")
1030
+
1031
+ is_b_scalar_string_like = not is_b_array and isinstance(b, str)
1032
+
1033
+ return (
1034
+ (is_a_numeric_array and is_b_scalar_string_like)
1035
+ or (is_a_numeric_array and is_b_string_array)
1036
+ or (is_b_numeric_array and is_a_string_array)
1037
+ )
1038
+
1039
+
1040
+ def needs_i8_conversion(dtype: DtypeObj | None) -> bool:
1041
+ """
1042
+ Check whether the dtype should be converted to int64.
1043
+
1044
+ Dtype "needs" such a conversion if the dtype is of a datetime-like dtype
1045
+
1046
+ Parameters
1047
+ ----------
1048
+ dtype : np.dtype, ExtensionDtype, or None
1049
+
1050
+ Returns
1051
+ -------
1052
+ boolean
1053
+ Whether or not the dtype should be converted to int64.
1054
+
1055
+ Examples
1056
+ --------
1057
+ >>> needs_i8_conversion(str)
1058
+ False
1059
+ >>> needs_i8_conversion(np.int64)
1060
+ False
1061
+ >>> needs_i8_conversion(np.datetime64)
1062
+ False
1063
+ >>> needs_i8_conversion(np.dtype(np.datetime64))
1064
+ True
1065
+ >>> needs_i8_conversion(np.array(['a', 'b']))
1066
+ False
1067
+ >>> needs_i8_conversion(pd.Series([1, 2]))
1068
+ False
1069
+ >>> needs_i8_conversion(pd.Series([], dtype="timedelta64[ns]"))
1070
+ False
1071
+ >>> needs_i8_conversion(pd.DatetimeIndex([1, 2, 3], tz="US/Eastern"))
1072
+ False
1073
+ >>> needs_i8_conversion(pd.DatetimeIndex([1, 2, 3], tz="US/Eastern").dtype)
1074
+ True
1075
+ """
1076
+ if isinstance(dtype, np.dtype):
1077
+ return dtype.kind in "mM"
1078
+ return isinstance(dtype, (PeriodDtype, DatetimeTZDtype))
1079
+
1080
+
1081
+ def is_numeric_dtype(arr_or_dtype) -> bool:
1082
+ """
1083
+ Check whether the provided array or dtype is of a numeric dtype.
1084
+
1085
+ Parameters
1086
+ ----------
1087
+ arr_or_dtype : array-like or dtype
1088
+ The array or dtype to check.
1089
+
1090
+ Returns
1091
+ -------
1092
+ boolean
1093
+ Whether or not the array or dtype is of a numeric dtype.
1094
+
1095
+ Examples
1096
+ --------
1097
+ >>> from pandas.api.types import is_numeric_dtype
1098
+ >>> is_numeric_dtype(str)
1099
+ False
1100
+ >>> is_numeric_dtype(int)
1101
+ True
1102
+ >>> is_numeric_dtype(float)
1103
+ True
1104
+ >>> is_numeric_dtype(np.uint64)
1105
+ True
1106
+ >>> is_numeric_dtype(np.datetime64)
1107
+ False
1108
+ >>> is_numeric_dtype(np.timedelta64)
1109
+ False
1110
+ >>> is_numeric_dtype(np.array(['a', 'b']))
1111
+ False
1112
+ >>> is_numeric_dtype(pd.Series([1, 2]))
1113
+ True
1114
+ >>> is_numeric_dtype(pd.Index([1, 2.]))
1115
+ True
1116
+ >>> is_numeric_dtype(np.array([], dtype=np.timedelta64))
1117
+ False
1118
+ """
1119
+ return _is_dtype_type(
1120
+ arr_or_dtype, _classes_and_not_datetimelike(np.number, np.bool_)
1121
+ ) or _is_dtype(
1122
+ arr_or_dtype, lambda typ: isinstance(typ, ExtensionDtype) and typ._is_numeric
1123
+ )
1124
+
1125
+
1126
+ def is_any_real_numeric_dtype(arr_or_dtype) -> bool:
1127
+ """
1128
+ Check whether the provided array or dtype is of a real number dtype.
1129
+
1130
+ Parameters
1131
+ ----------
1132
+ arr_or_dtype : array-like or dtype
1133
+ The array or dtype to check.
1134
+
1135
+ Returns
1136
+ -------
1137
+ boolean
1138
+ Whether or not the array or dtype is of a real number dtype.
1139
+
1140
+ Examples
1141
+ --------
1142
+ >>> from pandas.api.types import is_any_real_numeric_dtype
1143
+ >>> is_any_real_numeric_dtype(int)
1144
+ True
1145
+ >>> is_any_real_numeric_dtype(float)
1146
+ True
1147
+ >>> is_any_real_numeric_dtype(object)
1148
+ False
1149
+ >>> is_any_real_numeric_dtype(str)
1150
+ False
1151
+ >>> is_any_real_numeric_dtype(complex(1, 2))
1152
+ False
1153
+ >>> is_any_real_numeric_dtype(bool)
1154
+ False
1155
+ """
1156
+ return (
1157
+ is_numeric_dtype(arr_or_dtype)
1158
+ and not is_complex_dtype(arr_or_dtype)
1159
+ and not is_bool_dtype(arr_or_dtype)
1160
+ )
1161
+
1162
+
1163
+ def is_float_dtype(arr_or_dtype) -> bool:
1164
+ """
1165
+ Check whether the provided array or dtype is of a float dtype.
1166
+
1167
+ Parameters
1168
+ ----------
1169
+ arr_or_dtype : array-like or dtype
1170
+ The array or dtype to check.
1171
+
1172
+ Returns
1173
+ -------
1174
+ boolean
1175
+ Whether or not the array or dtype is of a float dtype.
1176
+
1177
+ Examples
1178
+ --------
1179
+ >>> from pandas.api.types import is_float_dtype
1180
+ >>> is_float_dtype(str)
1181
+ False
1182
+ >>> is_float_dtype(int)
1183
+ False
1184
+ >>> is_float_dtype(float)
1185
+ True
1186
+ >>> is_float_dtype(np.array(['a', 'b']))
1187
+ False
1188
+ >>> is_float_dtype(pd.Series([1, 2]))
1189
+ False
1190
+ >>> is_float_dtype(pd.Index([1, 2.]))
1191
+ True
1192
+ """
1193
+ return _is_dtype_type(arr_or_dtype, classes(np.floating)) or _is_dtype(
1194
+ arr_or_dtype, lambda typ: isinstance(typ, ExtensionDtype) and typ.kind in "f"
1195
+ )
1196
+
1197
+
1198
+ def is_bool_dtype(arr_or_dtype) -> bool:
1199
+ """
1200
+ Check whether the provided array or dtype is of a boolean dtype.
1201
+
1202
+ Parameters
1203
+ ----------
1204
+ arr_or_dtype : array-like or dtype
1205
+ The array or dtype to check.
1206
+
1207
+ Returns
1208
+ -------
1209
+ boolean
1210
+ Whether or not the array or dtype is of a boolean dtype.
1211
+
1212
+ Notes
1213
+ -----
1214
+ An ExtensionArray is considered boolean when the ``_is_boolean``
1215
+ attribute is set to True.
1216
+
1217
+ Examples
1218
+ --------
1219
+ >>> from pandas.api.types import is_bool_dtype
1220
+ >>> is_bool_dtype(str)
1221
+ False
1222
+ >>> is_bool_dtype(int)
1223
+ False
1224
+ >>> is_bool_dtype(bool)
1225
+ True
1226
+ >>> is_bool_dtype(np.bool_)
1227
+ True
1228
+ >>> is_bool_dtype(np.array(['a', 'b']))
1229
+ False
1230
+ >>> is_bool_dtype(pd.Series([1, 2]))
1231
+ False
1232
+ >>> is_bool_dtype(np.array([True, False]))
1233
+ True
1234
+ >>> is_bool_dtype(pd.Categorical([True, False]))
1235
+ True
1236
+ >>> is_bool_dtype(pd.arrays.SparseArray([True, False]))
1237
+ True
1238
+ """
1239
+ if arr_or_dtype is None:
1240
+ return False
1241
+ try:
1242
+ dtype = _get_dtype(arr_or_dtype)
1243
+ except (TypeError, ValueError):
1244
+ return False
1245
+
1246
+ if isinstance(dtype, CategoricalDtype):
1247
+ arr_or_dtype = dtype.categories
1248
+ # now we use the special definition for Index
1249
+
1250
+ if isinstance(arr_or_dtype, ABCIndex):
1251
+ # Allow Index[object] that is all-bools or Index["boolean"]
1252
+ if arr_or_dtype.inferred_type == "boolean":
1253
+ if not is_bool_dtype(arr_or_dtype.dtype):
1254
+ # GH#52680
1255
+ warnings.warn(
1256
+ "The behavior of is_bool_dtype with an object-dtype Index "
1257
+ "of bool objects is deprecated. In a future version, "
1258
+ "this will return False. Cast the Index to a bool dtype instead.",
1259
+ DeprecationWarning,
1260
+ stacklevel=2,
1261
+ )
1262
+ return True
1263
+ return False
1264
+ elif isinstance(dtype, ExtensionDtype):
1265
+ return getattr(dtype, "_is_boolean", False)
1266
+
1267
+ return issubclass(dtype.type, np.bool_)
1268
+
1269
+
1270
+ def is_1d_only_ea_dtype(dtype: DtypeObj | None) -> bool:
1271
+ """
1272
+ Analogue to is_extension_array_dtype but excluding DatetimeTZDtype.
1273
+ """
1274
+ return isinstance(dtype, ExtensionDtype) and not dtype._supports_2d
1275
+
1276
+
1277
+ def is_extension_array_dtype(arr_or_dtype) -> bool:
1278
+ """
1279
+ Check if an object is a pandas extension array type.
1280
+
1281
+ See the :ref:`Use Guide <extending.extension-types>` for more.
1282
+
1283
+ Parameters
1284
+ ----------
1285
+ arr_or_dtype : object
1286
+ For array-like input, the ``.dtype`` attribute will
1287
+ be extracted.
1288
+
1289
+ Returns
1290
+ -------
1291
+ bool
1292
+ Whether the `arr_or_dtype` is an extension array type.
1293
+
1294
+ Notes
1295
+ -----
1296
+ This checks whether an object implements the pandas extension
1297
+ array interface. In pandas, this includes:
1298
+
1299
+ * Categorical
1300
+ * Sparse
1301
+ * Interval
1302
+ * Period
1303
+ * DatetimeArray
1304
+ * TimedeltaArray
1305
+
1306
+ Third-party libraries may implement arrays or types satisfying
1307
+ this interface as well.
1308
+
1309
+ Examples
1310
+ --------
1311
+ >>> from pandas.api.types import is_extension_array_dtype
1312
+ >>> arr = pd.Categorical(['a', 'b'])
1313
+ >>> is_extension_array_dtype(arr)
1314
+ True
1315
+ >>> is_extension_array_dtype(arr.dtype)
1316
+ True
1317
+
1318
+ >>> arr = np.array(['a', 'b'])
1319
+ >>> is_extension_array_dtype(arr.dtype)
1320
+ False
1321
+ """
1322
+ dtype = getattr(arr_or_dtype, "dtype", arr_or_dtype)
1323
+ if isinstance(dtype, ExtensionDtype):
1324
+ return True
1325
+ elif isinstance(dtype, np.dtype):
1326
+ return False
1327
+ else:
1328
+ return registry.find(dtype) is not None
1329
+
1330
+
1331
+ def is_ea_or_datetimelike_dtype(dtype: DtypeObj | None) -> bool:
1332
+ """
1333
+ Check for ExtensionDtype, datetime64 dtype, or timedelta64 dtype.
1334
+
1335
+ Notes
1336
+ -----
1337
+ Checks only for dtype objects, not dtype-castable strings or types.
1338
+ """
1339
+ return isinstance(dtype, ExtensionDtype) or (lib.is_np_dtype(dtype, "mM"))
1340
+
1341
+
1342
+ def is_complex_dtype(arr_or_dtype) -> bool:
1343
+ """
1344
+ Check whether the provided array or dtype is of a complex dtype.
1345
+
1346
+ Parameters
1347
+ ----------
1348
+ arr_or_dtype : array-like or dtype
1349
+ The array or dtype to check.
1350
+
1351
+ Returns
1352
+ -------
1353
+ boolean
1354
+ Whether or not the array or dtype is of a complex dtype.
1355
+
1356
+ Examples
1357
+ --------
1358
+ >>> from pandas.api.types import is_complex_dtype
1359
+ >>> is_complex_dtype(str)
1360
+ False
1361
+ >>> is_complex_dtype(int)
1362
+ False
1363
+ >>> is_complex_dtype(np.complex128)
1364
+ True
1365
+ >>> is_complex_dtype(np.array(['a', 'b']))
1366
+ False
1367
+ >>> is_complex_dtype(pd.Series([1, 2]))
1368
+ False
1369
+ >>> is_complex_dtype(np.array([1 + 1j, 5]))
1370
+ True
1371
+ """
1372
+ return _is_dtype_type(arr_or_dtype, classes(np.complexfloating))
1373
+
1374
+
1375
+ def _is_dtype(arr_or_dtype, condition) -> bool:
1376
+ """
1377
+ Return true if the condition is satisfied for the arr_or_dtype.
1378
+
1379
+ Parameters
1380
+ ----------
1381
+ arr_or_dtype : array-like, str, np.dtype, or ExtensionArrayType
1382
+ The array-like or dtype object whose dtype we want to extract.
1383
+ condition : callable[Union[np.dtype, ExtensionDtype]]
1384
+
1385
+ Returns
1386
+ -------
1387
+ bool
1388
+
1389
+ """
1390
+ if arr_or_dtype is None:
1391
+ return False
1392
+ try:
1393
+ dtype = _get_dtype(arr_or_dtype)
1394
+ except (TypeError, ValueError):
1395
+ return False
1396
+ return condition(dtype)
1397
+
1398
+
1399
+ def _get_dtype(arr_or_dtype) -> DtypeObj:
1400
+ """
1401
+ Get the dtype instance associated with an array
1402
+ or dtype object.
1403
+
1404
+ Parameters
1405
+ ----------
1406
+ arr_or_dtype : array-like or dtype
1407
+ The array-like or dtype object whose dtype we want to extract.
1408
+
1409
+ Returns
1410
+ -------
1411
+ obj_dtype : The extract dtype instance from the
1412
+ passed in array or dtype object.
1413
+
1414
+ Raises
1415
+ ------
1416
+ TypeError : The passed in object is None.
1417
+ """
1418
+ if arr_or_dtype is None:
1419
+ raise TypeError("Cannot deduce dtype from null object")
1420
+
1421
+ # fastpath
1422
+ if isinstance(arr_or_dtype, np.dtype):
1423
+ return arr_or_dtype
1424
+ elif isinstance(arr_or_dtype, type):
1425
+ return np.dtype(arr_or_dtype)
1426
+
1427
+ # if we have an array-like
1428
+ elif hasattr(arr_or_dtype, "dtype"):
1429
+ arr_or_dtype = arr_or_dtype.dtype
1430
+
1431
+ return pandas_dtype(arr_or_dtype)
1432
+
1433
+
1434
+ def _is_dtype_type(arr_or_dtype, condition) -> bool:
1435
+ """
1436
+ Return true if the condition is satisfied for the arr_or_dtype.
1437
+
1438
+ Parameters
1439
+ ----------
1440
+ arr_or_dtype : array-like or dtype
1441
+ The array-like or dtype object whose dtype we want to extract.
1442
+ condition : callable[Union[np.dtype, ExtensionDtypeType]]
1443
+
1444
+ Returns
1445
+ -------
1446
+ bool : if the condition is satisfied for the arr_or_dtype
1447
+ """
1448
+ if arr_or_dtype is None:
1449
+ return condition(type(None))
1450
+
1451
+ # fastpath
1452
+ if isinstance(arr_or_dtype, np.dtype):
1453
+ return condition(arr_or_dtype.type)
1454
+ elif isinstance(arr_or_dtype, type):
1455
+ if issubclass(arr_or_dtype, ExtensionDtype):
1456
+ arr_or_dtype = arr_or_dtype.type
1457
+ return condition(np.dtype(arr_or_dtype).type)
1458
+
1459
+ # if we have an array-like
1460
+ if hasattr(arr_or_dtype, "dtype"):
1461
+ arr_or_dtype = arr_or_dtype.dtype
1462
+
1463
+ # we are not possibly a dtype
1464
+ elif is_list_like(arr_or_dtype):
1465
+ return condition(type(None))
1466
+
1467
+ try:
1468
+ tipo = pandas_dtype(arr_or_dtype).type
1469
+ except (TypeError, ValueError):
1470
+ if is_scalar(arr_or_dtype):
1471
+ return condition(type(None))
1472
+
1473
+ return False
1474
+
1475
+ return condition(tipo)
1476
+
1477
+
1478
+ def infer_dtype_from_object(dtype) -> type:
1479
+ """
1480
+ Get a numpy dtype.type-style object for a dtype object.
1481
+
1482
+ This methods also includes handling of the datetime64[ns] and
1483
+ datetime64[ns, TZ] objects.
1484
+
1485
+ If no dtype can be found, we return ``object``.
1486
+
1487
+ Parameters
1488
+ ----------
1489
+ dtype : dtype, type
1490
+ The dtype object whose numpy dtype.type-style
1491
+ object we want to extract.
1492
+
1493
+ Returns
1494
+ -------
1495
+ type
1496
+ """
1497
+ if isinstance(dtype, type) and issubclass(dtype, np.generic):
1498
+ # Type object from a dtype
1499
+
1500
+ return dtype
1501
+ elif isinstance(dtype, (np.dtype, ExtensionDtype)):
1502
+ # dtype object
1503
+ try:
1504
+ _validate_date_like_dtype(dtype)
1505
+ except TypeError:
1506
+ # Should still pass if we don't have a date-like
1507
+ pass
1508
+ if hasattr(dtype, "numpy_dtype"):
1509
+ # TODO: Implement this properly
1510
+ # https://github.com/pandas-dev/pandas/issues/52576
1511
+ return dtype.numpy_dtype.type
1512
+ return dtype.type
1513
+
1514
+ try:
1515
+ dtype = pandas_dtype(dtype)
1516
+ except TypeError:
1517
+ pass
1518
+
1519
+ if isinstance(dtype, ExtensionDtype):
1520
+ return dtype.type
1521
+ elif isinstance(dtype, str):
1522
+ # TODO(jreback)
1523
+ # should deprecate these
1524
+ if dtype in ["datetimetz", "datetime64tz"]:
1525
+ return DatetimeTZDtype.type
1526
+ elif dtype in ["period"]:
1527
+ raise NotImplementedError
1528
+
1529
+ if dtype in ["datetime", "timedelta"]:
1530
+ dtype += "64"
1531
+ try:
1532
+ return infer_dtype_from_object(getattr(np, dtype))
1533
+ except (AttributeError, TypeError):
1534
+ # Handles cases like _get_dtype(int) i.e.,
1535
+ # Python objects that are valid dtypes
1536
+ # (unlike user-defined types, in general)
1537
+ #
1538
+ # TypeError handles the float16 type code of 'e'
1539
+ # further handle internal types
1540
+ pass
1541
+
1542
+ return infer_dtype_from_object(np.dtype(dtype))
1543
+
1544
+
1545
+ def _validate_date_like_dtype(dtype) -> None:
1546
+ """
1547
+ Check whether the dtype is a date-like dtype. Raises an error if invalid.
1548
+
1549
+ Parameters
1550
+ ----------
1551
+ dtype : dtype, type
1552
+ The dtype to check.
1553
+
1554
+ Raises
1555
+ ------
1556
+ TypeError : The dtype could not be casted to a date-like dtype.
1557
+ ValueError : The dtype is an illegal date-like dtype (e.g. the
1558
+ frequency provided is too specific)
1559
+ """
1560
+ try:
1561
+ typ = np.datetime_data(dtype)[0]
1562
+ except ValueError as e:
1563
+ raise TypeError(e) from e
1564
+ if typ not in ["generic", "ns"]:
1565
+ raise ValueError(
1566
+ f"{repr(dtype.name)} is too specific of a frequency, "
1567
+ f"try passing {repr(dtype.type.__name__)}"
1568
+ )
1569
+
1570
+
1571
+ def validate_all_hashable(*args, error_name: str | None = None) -> None:
1572
+ """
1573
+ Return None if all args are hashable, else raise a TypeError.
1574
+
1575
+ Parameters
1576
+ ----------
1577
+ *args
1578
+ Arguments to validate.
1579
+ error_name : str, optional
1580
+ The name to use if error
1581
+
1582
+ Raises
1583
+ ------
1584
+ TypeError : If an argument is not hashable
1585
+
1586
+ Returns
1587
+ -------
1588
+ None
1589
+ """
1590
+ if not all(is_hashable(arg) for arg in args):
1591
+ if error_name:
1592
+ raise TypeError(f"{error_name} must be a hashable type")
1593
+ raise TypeError("All elements must be hashable")
1594
+
1595
+
1596
+ def pandas_dtype(dtype) -> DtypeObj:
1597
+ """
1598
+ Convert input into a pandas only dtype object or a numpy dtype object.
1599
+
1600
+ Parameters
1601
+ ----------
1602
+ dtype : object to be converted
1603
+
1604
+ Returns
1605
+ -------
1606
+ np.dtype or a pandas dtype
1607
+
1608
+ Raises
1609
+ ------
1610
+ TypeError if not a dtype
1611
+
1612
+ Examples
1613
+ --------
1614
+ >>> pd.api.types.pandas_dtype(int)
1615
+ dtype('int64')
1616
+ """
1617
+ # short-circuit
1618
+ if isinstance(dtype, np.ndarray):
1619
+ return dtype.dtype
1620
+ elif isinstance(dtype, (np.dtype, ExtensionDtype)):
1621
+ return dtype
1622
+
1623
+ # registered extension types
1624
+ result = registry.find(dtype)
1625
+ if result is not None:
1626
+ if isinstance(result, type):
1627
+ # GH 31356, GH 54592
1628
+ warnings.warn(
1629
+ f"Instantiating {result.__name__} without any arguments."
1630
+ f"Pass a {result.__name__} instance to silence this warning.",
1631
+ UserWarning,
1632
+ stacklevel=find_stack_level(),
1633
+ )
1634
+ result = result()
1635
+ return result
1636
+
1637
+ # try a numpy dtype
1638
+ # raise a consistent TypeError if failed
1639
+ try:
1640
+ with warnings.catch_warnings():
1641
+ # GH#51523 - Series.astype(np.integer) doesn't show
1642
+ # numpy deprecation warning of np.integer
1643
+ # Hence enabling DeprecationWarning
1644
+ warnings.simplefilter("always", DeprecationWarning)
1645
+ npdtype = np.dtype(dtype)
1646
+ except SyntaxError as err:
1647
+ # np.dtype uses `eval` which can raise SyntaxError
1648
+ raise TypeError(f"data type '{dtype}' not understood") from err
1649
+
1650
+ # Any invalid dtype (such as pd.Timestamp) should raise an error.
1651
+ # np.dtype(invalid_type).kind = 0 for such objects. However, this will
1652
+ # also catch some valid dtypes such as object, np.object_ and 'object'
1653
+ # which we safeguard against by catching them earlier and returning
1654
+ # np.dtype(valid_dtype) before this condition is evaluated.
1655
+ if is_hashable(dtype) and dtype in [
1656
+ object,
1657
+ np.object_,
1658
+ "object",
1659
+ "O",
1660
+ "object_",
1661
+ ]:
1662
+ # check hashability to avoid errors/DeprecationWarning when we get
1663
+ # here and `dtype` is an array
1664
+ return npdtype
1665
+ elif npdtype.kind == "O":
1666
+ raise TypeError(f"dtype '{dtype}' not understood")
1667
+
1668
+ return npdtype
1669
+
1670
+
1671
+ def is_all_strings(value: ArrayLike) -> bool:
1672
+ """
1673
+ Check if this is an array of strings that we should try parsing.
1674
+
1675
+ Includes object-dtype ndarray containing all-strings, StringArray,
1676
+ and Categorical with all-string categories.
1677
+ Does not include numpy string dtypes.
1678
+ """
1679
+ dtype = value.dtype
1680
+
1681
+ if isinstance(dtype, np.dtype):
1682
+ if len(value) == 0:
1683
+ return dtype == np.dtype("object")
1684
+ else:
1685
+ return dtype == np.dtype("object") and lib.is_string_array(
1686
+ np.asarray(value), skipna=False
1687
+ )
1688
+ elif isinstance(dtype, CategoricalDtype):
1689
+ return dtype.categories.inferred_type == "string"
1690
+ return dtype == "string"
1691
+
1692
+
1693
+ __all__ = [
1694
+ "classes",
1695
+ "DT64NS_DTYPE",
1696
+ "ensure_float64",
1697
+ "ensure_python_int",
1698
+ "ensure_str",
1699
+ "infer_dtype_from_object",
1700
+ "INT64_DTYPE",
1701
+ "is_1d_only_ea_dtype",
1702
+ "is_all_strings",
1703
+ "is_any_real_numeric_dtype",
1704
+ "is_array_like",
1705
+ "is_bool",
1706
+ "is_bool_dtype",
1707
+ "is_categorical_dtype",
1708
+ "is_complex",
1709
+ "is_complex_dtype",
1710
+ "is_dataclass",
1711
+ "is_datetime64_any_dtype",
1712
+ "is_datetime64_dtype",
1713
+ "is_datetime64_ns_dtype",
1714
+ "is_datetime64tz_dtype",
1715
+ "is_decimal",
1716
+ "is_dict_like",
1717
+ "is_dtype_equal",
1718
+ "is_ea_or_datetimelike_dtype",
1719
+ "is_extension_array_dtype",
1720
+ "is_file_like",
1721
+ "is_float_dtype",
1722
+ "is_int64_dtype",
1723
+ "is_integer_dtype",
1724
+ "is_interval",
1725
+ "is_interval_dtype",
1726
+ "is_iterator",
1727
+ "is_named_tuple",
1728
+ "is_nested_list_like",
1729
+ "is_number",
1730
+ "is_numeric_dtype",
1731
+ "is_object_dtype",
1732
+ "is_period_dtype",
1733
+ "is_re",
1734
+ "is_re_compilable",
1735
+ "is_scipy_sparse",
1736
+ "is_sequence",
1737
+ "is_signed_integer_dtype",
1738
+ "is_sparse",
1739
+ "is_string_dtype",
1740
+ "is_string_or_object_np_dtype",
1741
+ "is_timedelta64_dtype",
1742
+ "is_timedelta64_ns_dtype",
1743
+ "is_unsigned_integer_dtype",
1744
+ "needs_i8_conversion",
1745
+ "pandas_dtype",
1746
+ "TD64NS_DTYPE",
1747
+ "validate_all_hashable",
1748
+ ]
venv/lib/python3.10/site-packages/pandas/core/dtypes/concat.py ADDED
@@ -0,0 +1,348 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ Utility functions related to concat.
3
+ """
4
+ from __future__ import annotations
5
+
6
+ from typing import (
7
+ TYPE_CHECKING,
8
+ cast,
9
+ )
10
+ import warnings
11
+
12
+ import numpy as np
13
+
14
+ from pandas._libs import lib
15
+ from pandas.util._exceptions import find_stack_level
16
+
17
+ from pandas.core.dtypes.astype import astype_array
18
+ from pandas.core.dtypes.cast import (
19
+ common_dtype_categorical_compat,
20
+ find_common_type,
21
+ np_find_common_type,
22
+ )
23
+ from pandas.core.dtypes.dtypes import CategoricalDtype
24
+ from pandas.core.dtypes.generic import (
25
+ ABCCategoricalIndex,
26
+ ABCSeries,
27
+ )
28
+
29
+ if TYPE_CHECKING:
30
+ from collections.abc import Sequence
31
+
32
+ from pandas._typing import (
33
+ ArrayLike,
34
+ AxisInt,
35
+ DtypeObj,
36
+ )
37
+
38
+ from pandas.core.arrays import (
39
+ Categorical,
40
+ ExtensionArray,
41
+ )
42
+
43
+
44
+ def _is_nonempty(x, axis) -> bool:
45
+ # filter empty arrays
46
+ # 1-d dtypes always are included here
47
+ if x.ndim <= axis:
48
+ return True
49
+ return x.shape[axis] > 0
50
+
51
+
52
+ def concat_compat(
53
+ to_concat: Sequence[ArrayLike], axis: AxisInt = 0, ea_compat_axis: bool = False
54
+ ) -> ArrayLike:
55
+ """
56
+ provide concatenation of an array of arrays each of which is a single
57
+ 'normalized' dtypes (in that for example, if it's object, then it is a
58
+ non-datetimelike and provide a combined dtype for the resulting array that
59
+ preserves the overall dtype if possible)
60
+
61
+ Parameters
62
+ ----------
63
+ to_concat : sequence of arrays
64
+ axis : axis to provide concatenation
65
+ ea_compat_axis : bool, default False
66
+ For ExtensionArray compat, behave as if axis == 1 when determining
67
+ whether to drop empty arrays.
68
+
69
+ Returns
70
+ -------
71
+ a single array, preserving the combined dtypes
72
+ """
73
+ if len(to_concat) and lib.dtypes_all_equal([obj.dtype for obj in to_concat]):
74
+ # fastpath!
75
+ obj = to_concat[0]
76
+ if isinstance(obj, np.ndarray):
77
+ to_concat_arrs = cast("Sequence[np.ndarray]", to_concat)
78
+ return np.concatenate(to_concat_arrs, axis=axis)
79
+
80
+ to_concat_eas = cast("Sequence[ExtensionArray]", to_concat)
81
+ if ea_compat_axis:
82
+ # We have 1D objects, that don't support axis keyword
83
+ return obj._concat_same_type(to_concat_eas)
84
+ elif axis == 0:
85
+ return obj._concat_same_type(to_concat_eas)
86
+ else:
87
+ # e.g. DatetimeArray
88
+ # NB: We are assuming here that ensure_wrapped_if_arraylike has
89
+ # been called where relevant.
90
+ return obj._concat_same_type(
91
+ # error: Unexpected keyword argument "axis" for "_concat_same_type"
92
+ # of "ExtensionArray"
93
+ to_concat_eas,
94
+ axis=axis, # type: ignore[call-arg]
95
+ )
96
+
97
+ # If all arrays are empty, there's nothing to convert, just short-cut to
98
+ # the concatenation, #3121.
99
+ #
100
+ # Creating an empty array directly is tempting, but the winnings would be
101
+ # marginal given that it would still require shape & dtype calculation and
102
+ # np.concatenate which has them both implemented is compiled.
103
+ orig = to_concat
104
+ non_empties = [x for x in to_concat if _is_nonempty(x, axis)]
105
+ if non_empties and axis == 0 and not ea_compat_axis:
106
+ # ea_compat_axis see GH#39574
107
+ to_concat = non_empties
108
+
109
+ any_ea, kinds, target_dtype = _get_result_dtype(to_concat, non_empties)
110
+
111
+ if len(to_concat) < len(orig):
112
+ _, _, alt_dtype = _get_result_dtype(orig, non_empties)
113
+ if alt_dtype != target_dtype:
114
+ # GH#39122
115
+ warnings.warn(
116
+ "The behavior of array concatenation with empty entries is "
117
+ "deprecated. In a future version, this will no longer exclude "
118
+ "empty items when determining the result dtype. "
119
+ "To retain the old behavior, exclude the empty entries before "
120
+ "the concat operation.",
121
+ FutureWarning,
122
+ stacklevel=find_stack_level(),
123
+ )
124
+
125
+ if target_dtype is not None:
126
+ to_concat = [astype_array(arr, target_dtype, copy=False) for arr in to_concat]
127
+
128
+ if not isinstance(to_concat[0], np.ndarray):
129
+ # i.e. isinstance(to_concat[0], ExtensionArray)
130
+ to_concat_eas = cast("Sequence[ExtensionArray]", to_concat)
131
+ cls = type(to_concat[0])
132
+ # GH#53640: eg. for datetime array, axis=1 but 0 is default
133
+ # However, class method `_concat_same_type()` for some classes
134
+ # may not support the `axis` keyword
135
+ if ea_compat_axis or axis == 0:
136
+ return cls._concat_same_type(to_concat_eas)
137
+ else:
138
+ return cls._concat_same_type(
139
+ to_concat_eas,
140
+ axis=axis, # type: ignore[call-arg]
141
+ )
142
+ else:
143
+ to_concat_arrs = cast("Sequence[np.ndarray]", to_concat)
144
+ result = np.concatenate(to_concat_arrs, axis=axis)
145
+
146
+ if not any_ea and "b" in kinds and result.dtype.kind in "iuf":
147
+ # GH#39817 cast to object instead of casting bools to numeric
148
+ result = result.astype(object, copy=False)
149
+ return result
150
+
151
+
152
+ def _get_result_dtype(
153
+ to_concat: Sequence[ArrayLike], non_empties: Sequence[ArrayLike]
154
+ ) -> tuple[bool, set[str], DtypeObj | None]:
155
+ target_dtype = None
156
+
157
+ dtypes = {obj.dtype for obj in to_concat}
158
+ kinds = {obj.dtype.kind for obj in to_concat}
159
+
160
+ any_ea = any(not isinstance(x, np.ndarray) for x in to_concat)
161
+ if any_ea:
162
+ # i.e. any ExtensionArrays
163
+
164
+ # we ignore axis here, as internally concatting with EAs is always
165
+ # for axis=0
166
+ if len(dtypes) != 1:
167
+ target_dtype = find_common_type([x.dtype for x in to_concat])
168
+ target_dtype = common_dtype_categorical_compat(to_concat, target_dtype)
169
+
170
+ elif not len(non_empties):
171
+ # we have all empties, but may need to coerce the result dtype to
172
+ # object if we have non-numeric type operands (numpy would otherwise
173
+ # cast this to float)
174
+ if len(kinds) != 1:
175
+ if not len(kinds - {"i", "u", "f"}) or not len(kinds - {"b", "i", "u"}):
176
+ # let numpy coerce
177
+ pass
178
+ else:
179
+ # coerce to object
180
+ target_dtype = np.dtype(object)
181
+ kinds = {"o"}
182
+ else:
183
+ # error: Argument 1 to "np_find_common_type" has incompatible type
184
+ # "*Set[Union[ExtensionDtype, Any]]"; expected "dtype[Any]"
185
+ target_dtype = np_find_common_type(*dtypes) # type: ignore[arg-type]
186
+
187
+ return any_ea, kinds, target_dtype
188
+
189
+
190
+ def union_categoricals(
191
+ to_union, sort_categories: bool = False, ignore_order: bool = False
192
+ ) -> Categorical:
193
+ """
194
+ Combine list-like of Categorical-like, unioning categories.
195
+
196
+ All categories must have the same dtype.
197
+
198
+ Parameters
199
+ ----------
200
+ to_union : list-like
201
+ Categorical, CategoricalIndex, or Series with dtype='category'.
202
+ sort_categories : bool, default False
203
+ If true, resulting categories will be lexsorted, otherwise
204
+ they will be ordered as they appear in the data.
205
+ ignore_order : bool, default False
206
+ If true, the ordered attribute of the Categoricals will be ignored.
207
+ Results in an unordered categorical.
208
+
209
+ Returns
210
+ -------
211
+ Categorical
212
+
213
+ Raises
214
+ ------
215
+ TypeError
216
+ - all inputs do not have the same dtype
217
+ - all inputs do not have the same ordered property
218
+ - all inputs are ordered and their categories are not identical
219
+ - sort_categories=True and Categoricals are ordered
220
+ ValueError
221
+ Empty list of categoricals passed
222
+
223
+ Notes
224
+ -----
225
+ To learn more about categories, see `link
226
+ <https://pandas.pydata.org/pandas-docs/stable/user_guide/categorical.html#unioning>`__
227
+
228
+ Examples
229
+ --------
230
+ If you want to combine categoricals that do not necessarily have
231
+ the same categories, `union_categoricals` will combine a list-like
232
+ of categoricals. The new categories will be the union of the
233
+ categories being combined.
234
+
235
+ >>> a = pd.Categorical(["b", "c"])
236
+ >>> b = pd.Categorical(["a", "b"])
237
+ >>> pd.api.types.union_categoricals([a, b])
238
+ ['b', 'c', 'a', 'b']
239
+ Categories (3, object): ['b', 'c', 'a']
240
+
241
+ By default, the resulting categories will be ordered as they appear
242
+ in the `categories` of the data. If you want the categories to be
243
+ lexsorted, use `sort_categories=True` argument.
244
+
245
+ >>> pd.api.types.union_categoricals([a, b], sort_categories=True)
246
+ ['b', 'c', 'a', 'b']
247
+ Categories (3, object): ['a', 'b', 'c']
248
+
249
+ `union_categoricals` also works with the case of combining two
250
+ categoricals of the same categories and order information (e.g. what
251
+ you could also `append` for).
252
+
253
+ >>> a = pd.Categorical(["a", "b"], ordered=True)
254
+ >>> b = pd.Categorical(["a", "b", "a"], ordered=True)
255
+ >>> pd.api.types.union_categoricals([a, b])
256
+ ['a', 'b', 'a', 'b', 'a']
257
+ Categories (2, object): ['a' < 'b']
258
+
259
+ Raises `TypeError` because the categories are ordered and not identical.
260
+
261
+ >>> a = pd.Categorical(["a", "b"], ordered=True)
262
+ >>> b = pd.Categorical(["a", "b", "c"], ordered=True)
263
+ >>> pd.api.types.union_categoricals([a, b])
264
+ Traceback (most recent call last):
265
+ ...
266
+ TypeError: to union ordered Categoricals, all categories must be the same
267
+
268
+ Ordered categoricals with different categories or orderings can be
269
+ combined by using the `ignore_ordered=True` argument.
270
+
271
+ >>> a = pd.Categorical(["a", "b", "c"], ordered=True)
272
+ >>> b = pd.Categorical(["c", "b", "a"], ordered=True)
273
+ >>> pd.api.types.union_categoricals([a, b], ignore_order=True)
274
+ ['a', 'b', 'c', 'c', 'b', 'a']
275
+ Categories (3, object): ['a', 'b', 'c']
276
+
277
+ `union_categoricals` also works with a `CategoricalIndex`, or `Series`
278
+ containing categorical data, but note that the resulting array will
279
+ always be a plain `Categorical`
280
+
281
+ >>> a = pd.Series(["b", "c"], dtype='category')
282
+ >>> b = pd.Series(["a", "b"], dtype='category')
283
+ >>> pd.api.types.union_categoricals([a, b])
284
+ ['b', 'c', 'a', 'b']
285
+ Categories (3, object): ['b', 'c', 'a']
286
+ """
287
+ from pandas import Categorical
288
+ from pandas.core.arrays.categorical import recode_for_categories
289
+
290
+ if len(to_union) == 0:
291
+ raise ValueError("No Categoricals to union")
292
+
293
+ def _maybe_unwrap(x):
294
+ if isinstance(x, (ABCCategoricalIndex, ABCSeries)):
295
+ return x._values
296
+ elif isinstance(x, Categorical):
297
+ return x
298
+ else:
299
+ raise TypeError("all components to combine must be Categorical")
300
+
301
+ to_union = [_maybe_unwrap(x) for x in to_union]
302
+ first = to_union[0]
303
+
304
+ if not lib.dtypes_all_equal([obj.categories.dtype for obj in to_union]):
305
+ raise TypeError("dtype of categories must be the same")
306
+
307
+ ordered = False
308
+ if all(first._categories_match_up_to_permutation(other) for other in to_union[1:]):
309
+ # identical categories - fastpath
310
+ categories = first.categories
311
+ ordered = first.ordered
312
+
313
+ all_codes = [first._encode_with_my_categories(x)._codes for x in to_union]
314
+ new_codes = np.concatenate(all_codes)
315
+
316
+ if sort_categories and not ignore_order and ordered:
317
+ raise TypeError("Cannot use sort_categories=True with ordered Categoricals")
318
+
319
+ if sort_categories and not categories.is_monotonic_increasing:
320
+ categories = categories.sort_values()
321
+ indexer = categories.get_indexer(first.categories)
322
+
323
+ from pandas.core.algorithms import take_nd
324
+
325
+ new_codes = take_nd(indexer, new_codes, fill_value=-1)
326
+ elif ignore_order or all(not c.ordered for c in to_union):
327
+ # different categories - union and recode
328
+ cats = first.categories.append([c.categories for c in to_union[1:]])
329
+ categories = cats.unique()
330
+ if sort_categories:
331
+ categories = categories.sort_values()
332
+
333
+ new_codes = [
334
+ recode_for_categories(c.codes, c.categories, categories) for c in to_union
335
+ ]
336
+ new_codes = np.concatenate(new_codes)
337
+ else:
338
+ # ordered - to show a proper error message
339
+ if all(c.ordered for c in to_union):
340
+ msg = "to union ordered Categoricals, all categories must be the same"
341
+ raise TypeError(msg)
342
+ raise TypeError("Categorical.ordered must be the same")
343
+
344
+ if ignore_order:
345
+ ordered = False
346
+
347
+ dtype = CategoricalDtype(categories=categories, ordered=ordered)
348
+ return Categorical._simple_new(new_codes, dtype=dtype)
venv/lib/python3.10/site-packages/pandas/core/dtypes/dtypes.py ADDED
@@ -0,0 +1,2348 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ Define extension dtypes.
3
+ """
4
+ from __future__ import annotations
5
+
6
+ from datetime import (
7
+ date,
8
+ datetime,
9
+ time,
10
+ timedelta,
11
+ )
12
+ from decimal import Decimal
13
+ import re
14
+ from typing import (
15
+ TYPE_CHECKING,
16
+ Any,
17
+ cast,
18
+ )
19
+ import warnings
20
+
21
+ import numpy as np
22
+ import pytz
23
+
24
+ from pandas._libs import (
25
+ lib,
26
+ missing as libmissing,
27
+ )
28
+ from pandas._libs.interval import Interval
29
+ from pandas._libs.properties import cache_readonly
30
+ from pandas._libs.tslibs import (
31
+ BaseOffset,
32
+ NaT,
33
+ NaTType,
34
+ Period,
35
+ Timedelta,
36
+ Timestamp,
37
+ timezones,
38
+ to_offset,
39
+ tz_compare,
40
+ )
41
+ from pandas._libs.tslibs.dtypes import (
42
+ PeriodDtypeBase,
43
+ abbrev_to_npy_unit,
44
+ )
45
+ from pandas._libs.tslibs.offsets import BDay
46
+ from pandas.compat import pa_version_under10p1
47
+ from pandas.errors import PerformanceWarning
48
+ from pandas.util._exceptions import find_stack_level
49
+
50
+ from pandas.core.dtypes.base import (
51
+ ExtensionDtype,
52
+ StorageExtensionDtype,
53
+ register_extension_dtype,
54
+ )
55
+ from pandas.core.dtypes.generic import (
56
+ ABCCategoricalIndex,
57
+ ABCIndex,
58
+ ABCRangeIndex,
59
+ )
60
+ from pandas.core.dtypes.inference import (
61
+ is_bool,
62
+ is_list_like,
63
+ )
64
+
65
+ from pandas.util import capitalize_first_letter
66
+
67
+ if not pa_version_under10p1:
68
+ import pyarrow as pa
69
+
70
+ if TYPE_CHECKING:
71
+ from collections.abc import MutableMapping
72
+ from datetime import tzinfo
73
+
74
+ import pyarrow as pa # noqa: TCH004
75
+
76
+ from pandas._typing import (
77
+ Dtype,
78
+ DtypeObj,
79
+ IntervalClosedType,
80
+ Ordered,
81
+ Self,
82
+ npt,
83
+ type_t,
84
+ )
85
+
86
+ from pandas import (
87
+ Categorical,
88
+ CategoricalIndex,
89
+ DatetimeIndex,
90
+ Index,
91
+ IntervalIndex,
92
+ PeriodIndex,
93
+ )
94
+ from pandas.core.arrays import (
95
+ BaseMaskedArray,
96
+ DatetimeArray,
97
+ IntervalArray,
98
+ NumpyExtensionArray,
99
+ PeriodArray,
100
+ SparseArray,
101
+ )
102
+ from pandas.core.arrays.arrow import ArrowExtensionArray
103
+
104
+ str_type = str
105
+
106
+
107
+ class PandasExtensionDtype(ExtensionDtype):
108
+ """
109
+ A np.dtype duck-typed class, suitable for holding a custom dtype.
110
+
111
+ THIS IS NOT A REAL NUMPY DTYPE
112
+ """
113
+
114
+ type: Any
115
+ kind: Any
116
+ # The Any type annotations above are here only because mypy seems to have a
117
+ # problem dealing with multiple inheritance from PandasExtensionDtype
118
+ # and ExtensionDtype's @properties in the subclasses below. The kind and
119
+ # type variables in those subclasses are explicitly typed below.
120
+ subdtype = None
121
+ str: str_type
122
+ num = 100
123
+ shape: tuple[int, ...] = ()
124
+ itemsize = 8
125
+ base: DtypeObj | None = None
126
+ isbuiltin = 0
127
+ isnative = 0
128
+ _cache_dtypes: dict[str_type, PandasExtensionDtype] = {}
129
+
130
+ def __repr__(self) -> str_type:
131
+ """
132
+ Return a string representation for a particular object.
133
+ """
134
+ return str(self)
135
+
136
+ def __hash__(self) -> int:
137
+ raise NotImplementedError("sub-classes should implement an __hash__ method")
138
+
139
+ def __getstate__(self) -> dict[str_type, Any]:
140
+ # pickle support; we don't want to pickle the cache
141
+ return {k: getattr(self, k, None) for k in self._metadata}
142
+
143
+ @classmethod
144
+ def reset_cache(cls) -> None:
145
+ """clear the cache"""
146
+ cls._cache_dtypes = {}
147
+
148
+
149
+ class CategoricalDtypeType(type):
150
+ """
151
+ the type of CategoricalDtype, this metaclass determines subclass ability
152
+ """
153
+
154
+
155
+ @register_extension_dtype
156
+ class CategoricalDtype(PandasExtensionDtype, ExtensionDtype):
157
+ """
158
+ Type for categorical data with the categories and orderedness.
159
+
160
+ Parameters
161
+ ----------
162
+ categories : sequence, optional
163
+ Must be unique, and must not contain any nulls.
164
+ The categories are stored in an Index,
165
+ and if an index is provided the dtype of that index will be used.
166
+ ordered : bool or None, default False
167
+ Whether or not this categorical is treated as a ordered categorical.
168
+ None can be used to maintain the ordered value of existing categoricals when
169
+ used in operations that combine categoricals, e.g. astype, and will resolve to
170
+ False if there is no existing ordered to maintain.
171
+
172
+ Attributes
173
+ ----------
174
+ categories
175
+ ordered
176
+
177
+ Methods
178
+ -------
179
+ None
180
+
181
+ See Also
182
+ --------
183
+ Categorical : Represent a categorical variable in classic R / S-plus fashion.
184
+
185
+ Notes
186
+ -----
187
+ This class is useful for specifying the type of a ``Categorical``
188
+ independent of the values. See :ref:`categorical.categoricaldtype`
189
+ for more.
190
+
191
+ Examples
192
+ --------
193
+ >>> t = pd.CategoricalDtype(categories=['b', 'a'], ordered=True)
194
+ >>> pd.Series(['a', 'b', 'a', 'c'], dtype=t)
195
+ 0 a
196
+ 1 b
197
+ 2 a
198
+ 3 NaN
199
+ dtype: category
200
+ Categories (2, object): ['b' < 'a']
201
+
202
+ An empty CategoricalDtype with a specific dtype can be created
203
+ by providing an empty index. As follows,
204
+
205
+ >>> pd.CategoricalDtype(pd.DatetimeIndex([])).categories.dtype
206
+ dtype('<M8[ns]')
207
+ """
208
+
209
+ # TODO: Document public vs. private API
210
+ name = "category"
211
+ type: type[CategoricalDtypeType] = CategoricalDtypeType
212
+ kind: str_type = "O"
213
+ str = "|O08"
214
+ base = np.dtype("O")
215
+ _metadata = ("categories", "ordered")
216
+ _cache_dtypes: dict[str_type, PandasExtensionDtype] = {}
217
+ _supports_2d = False
218
+ _can_fast_transpose = False
219
+
220
+ def __init__(self, categories=None, ordered: Ordered = False) -> None:
221
+ self._finalize(categories, ordered, fastpath=False)
222
+
223
+ @classmethod
224
+ def _from_fastpath(
225
+ cls, categories=None, ordered: bool | None = None
226
+ ) -> CategoricalDtype:
227
+ self = cls.__new__(cls)
228
+ self._finalize(categories, ordered, fastpath=True)
229
+ return self
230
+
231
+ @classmethod
232
+ def _from_categorical_dtype(
233
+ cls, dtype: CategoricalDtype, categories=None, ordered: Ordered | None = None
234
+ ) -> CategoricalDtype:
235
+ if categories is ordered is None:
236
+ return dtype
237
+ if categories is None:
238
+ categories = dtype.categories
239
+ if ordered is None:
240
+ ordered = dtype.ordered
241
+ return cls(categories, ordered)
242
+
243
+ @classmethod
244
+ def _from_values_or_dtype(
245
+ cls,
246
+ values=None,
247
+ categories=None,
248
+ ordered: bool | None = None,
249
+ dtype: Dtype | None = None,
250
+ ) -> CategoricalDtype:
251
+ """
252
+ Construct dtype from the input parameters used in :class:`Categorical`.
253
+
254
+ This constructor method specifically does not do the factorization
255
+ step, if that is needed to find the categories. This constructor may
256
+ therefore return ``CategoricalDtype(categories=None, ordered=None)``,
257
+ which may not be useful. Additional steps may therefore have to be
258
+ taken to create the final dtype.
259
+
260
+ The return dtype is specified from the inputs in this prioritized
261
+ order:
262
+ 1. if dtype is a CategoricalDtype, return dtype
263
+ 2. if dtype is the string 'category', create a CategoricalDtype from
264
+ the supplied categories and ordered parameters, and return that.
265
+ 3. if values is a categorical, use value.dtype, but override it with
266
+ categories and ordered if either/both of those are not None.
267
+ 4. if dtype is None and values is not a categorical, construct the
268
+ dtype from categories and ordered, even if either of those is None.
269
+
270
+ Parameters
271
+ ----------
272
+ values : list-like, optional
273
+ The list-like must be 1-dimensional.
274
+ categories : list-like, optional
275
+ Categories for the CategoricalDtype.
276
+ ordered : bool, optional
277
+ Designating if the categories are ordered.
278
+ dtype : CategoricalDtype or the string "category", optional
279
+ If ``CategoricalDtype``, cannot be used together with
280
+ `categories` or `ordered`.
281
+
282
+ Returns
283
+ -------
284
+ CategoricalDtype
285
+
286
+ Examples
287
+ --------
288
+ >>> pd.CategoricalDtype._from_values_or_dtype()
289
+ CategoricalDtype(categories=None, ordered=None, categories_dtype=None)
290
+ >>> pd.CategoricalDtype._from_values_or_dtype(
291
+ ... categories=['a', 'b'], ordered=True
292
+ ... )
293
+ CategoricalDtype(categories=['a', 'b'], ordered=True, categories_dtype=object)
294
+ >>> dtype1 = pd.CategoricalDtype(['a', 'b'], ordered=True)
295
+ >>> dtype2 = pd.CategoricalDtype(['x', 'y'], ordered=False)
296
+ >>> c = pd.Categorical([0, 1], dtype=dtype1)
297
+ >>> pd.CategoricalDtype._from_values_or_dtype(
298
+ ... c, ['x', 'y'], ordered=True, dtype=dtype2
299
+ ... )
300
+ Traceback (most recent call last):
301
+ ...
302
+ ValueError: Cannot specify `categories` or `ordered` together with
303
+ `dtype`.
304
+
305
+ The supplied dtype takes precedence over values' dtype:
306
+
307
+ >>> pd.CategoricalDtype._from_values_or_dtype(c, dtype=dtype2)
308
+ CategoricalDtype(categories=['x', 'y'], ordered=False, categories_dtype=object)
309
+ """
310
+
311
+ if dtype is not None:
312
+ # The dtype argument takes precedence over values.dtype (if any)
313
+ if isinstance(dtype, str):
314
+ if dtype == "category":
315
+ if ordered is None and cls.is_dtype(values):
316
+ # GH#49309 preserve orderedness
317
+ ordered = values.dtype.ordered
318
+
319
+ dtype = CategoricalDtype(categories, ordered)
320
+ else:
321
+ raise ValueError(f"Unknown dtype {repr(dtype)}")
322
+ elif categories is not None or ordered is not None:
323
+ raise ValueError(
324
+ "Cannot specify `categories` or `ordered` together with `dtype`."
325
+ )
326
+ elif not isinstance(dtype, CategoricalDtype):
327
+ raise ValueError(f"Cannot not construct CategoricalDtype from {dtype}")
328
+ elif cls.is_dtype(values):
329
+ # If no "dtype" was passed, use the one from "values", but honor
330
+ # the "ordered" and "categories" arguments
331
+ dtype = values.dtype._from_categorical_dtype(
332
+ values.dtype, categories, ordered
333
+ )
334
+ else:
335
+ # If dtype=None and values is not categorical, create a new dtype.
336
+ # Note: This could potentially have categories=None and
337
+ # ordered=None.
338
+ dtype = CategoricalDtype(categories, ordered)
339
+
340
+ return cast(CategoricalDtype, dtype)
341
+
342
+ @classmethod
343
+ def construct_from_string(cls, string: str_type) -> CategoricalDtype:
344
+ """
345
+ Construct a CategoricalDtype from a string.
346
+
347
+ Parameters
348
+ ----------
349
+ string : str
350
+ Must be the string "category" in order to be successfully constructed.
351
+
352
+ Returns
353
+ -------
354
+ CategoricalDtype
355
+ Instance of the dtype.
356
+
357
+ Raises
358
+ ------
359
+ TypeError
360
+ If a CategoricalDtype cannot be constructed from the input.
361
+ """
362
+ if not isinstance(string, str):
363
+ raise TypeError(
364
+ f"'construct_from_string' expects a string, got {type(string)}"
365
+ )
366
+ if string != cls.name:
367
+ raise TypeError(f"Cannot construct a 'CategoricalDtype' from '{string}'")
368
+
369
+ # need ordered=None to ensure that operations specifying dtype="category" don't
370
+ # override the ordered value for existing categoricals
371
+ return cls(ordered=None)
372
+
373
+ def _finalize(self, categories, ordered: Ordered, fastpath: bool = False) -> None:
374
+ if ordered is not None:
375
+ self.validate_ordered(ordered)
376
+
377
+ if categories is not None:
378
+ categories = self.validate_categories(categories, fastpath=fastpath)
379
+
380
+ self._categories = categories
381
+ self._ordered = ordered
382
+
383
+ def __setstate__(self, state: MutableMapping[str_type, Any]) -> None:
384
+ # for pickle compat. __get_state__ is defined in the
385
+ # PandasExtensionDtype superclass and uses the public properties to
386
+ # pickle -> need to set the settable private ones here (see GH26067)
387
+ self._categories = state.pop("categories", None)
388
+ self._ordered = state.pop("ordered", False)
389
+
390
+ def __hash__(self) -> int:
391
+ # _hash_categories returns a uint64, so use the negative
392
+ # space for when we have unknown categories to avoid a conflict
393
+ if self.categories is None:
394
+ if self.ordered:
395
+ return -1
396
+ else:
397
+ return -2
398
+ # We *do* want to include the real self.ordered here
399
+ return int(self._hash_categories)
400
+
401
+ def __eq__(self, other: object) -> bool:
402
+ """
403
+ Rules for CDT equality:
404
+ 1) Any CDT is equal to the string 'category'
405
+ 2) Any CDT is equal to itself
406
+ 3) Any CDT is equal to a CDT with categories=None regardless of ordered
407
+ 4) A CDT with ordered=True is only equal to another CDT with
408
+ ordered=True and identical categories in the same order
409
+ 5) A CDT with ordered={False, None} is only equal to another CDT with
410
+ ordered={False, None} and identical categories, but same order is
411
+ not required. There is no distinction between False/None.
412
+ 6) Any other comparison returns False
413
+ """
414
+ if isinstance(other, str):
415
+ return other == self.name
416
+ elif other is self:
417
+ return True
418
+ elif not (hasattr(other, "ordered") and hasattr(other, "categories")):
419
+ return False
420
+ elif self.categories is None or other.categories is None:
421
+ # For non-fully-initialized dtypes, these are only equal to
422
+ # - the string "category" (handled above)
423
+ # - other CategoricalDtype with categories=None
424
+ return self.categories is other.categories
425
+ elif self.ordered or other.ordered:
426
+ # At least one has ordered=True; equal if both have ordered=True
427
+ # and the same values for categories in the same order.
428
+ return (self.ordered == other.ordered) and self.categories.equals(
429
+ other.categories
430
+ )
431
+ else:
432
+ # Neither has ordered=True; equal if both have the same categories,
433
+ # but same order is not necessary. There is no distinction between
434
+ # ordered=False and ordered=None: CDT(., False) and CDT(., None)
435
+ # will be equal if they have the same categories.
436
+ left = self.categories
437
+ right = other.categories
438
+
439
+ # GH#36280 the ordering of checks here is for performance
440
+ if not left.dtype == right.dtype:
441
+ return False
442
+
443
+ if len(left) != len(right):
444
+ return False
445
+
446
+ if self.categories.equals(other.categories):
447
+ # Check and see if they happen to be identical categories
448
+ return True
449
+
450
+ if left.dtype != object:
451
+ # Faster than calculating hash
452
+ indexer = left.get_indexer(right)
453
+ # Because left and right have the same length and are unique,
454
+ # `indexer` not having any -1s implies that there is a
455
+ # bijection between `left` and `right`.
456
+ return (indexer != -1).all()
457
+
458
+ # With object-dtype we need a comparison that identifies
459
+ # e.g. int(2) as distinct from float(2)
460
+ return set(left) == set(right)
461
+
462
+ def __repr__(self) -> str_type:
463
+ if self.categories is None:
464
+ data = "None"
465
+ dtype = "None"
466
+ else:
467
+ data = self.categories._format_data(name=type(self).__name__)
468
+ if isinstance(self.categories, ABCRangeIndex):
469
+ data = str(self.categories._range)
470
+ data = data.rstrip(", ")
471
+ dtype = self.categories.dtype
472
+
473
+ return (
474
+ f"CategoricalDtype(categories={data}, ordered={self.ordered}, "
475
+ f"categories_dtype={dtype})"
476
+ )
477
+
478
+ @cache_readonly
479
+ def _hash_categories(self) -> int:
480
+ from pandas.core.util.hashing import (
481
+ combine_hash_arrays,
482
+ hash_array,
483
+ hash_tuples,
484
+ )
485
+
486
+ categories = self.categories
487
+ ordered = self.ordered
488
+
489
+ if len(categories) and isinstance(categories[0], tuple):
490
+ # assumes if any individual category is a tuple, then all our. ATM
491
+ # I don't really want to support just some of the categories being
492
+ # tuples.
493
+ cat_list = list(categories) # breaks if a np.array of categories
494
+ cat_array = hash_tuples(cat_list)
495
+ else:
496
+ if categories.dtype == "O" and len({type(x) for x in categories}) != 1:
497
+ # TODO: hash_array doesn't handle mixed types. It casts
498
+ # everything to a str first, which means we treat
499
+ # {'1', '2'} the same as {'1', 2}
500
+ # find a better solution
501
+ hashed = hash((tuple(categories), ordered))
502
+ return hashed
503
+
504
+ if DatetimeTZDtype.is_dtype(categories.dtype):
505
+ # Avoid future warning.
506
+ categories = categories.view("datetime64[ns]")
507
+
508
+ cat_array = hash_array(np.asarray(categories), categorize=False)
509
+ if ordered:
510
+ cat_array = np.vstack(
511
+ [cat_array, np.arange(len(cat_array), dtype=cat_array.dtype)]
512
+ )
513
+ else:
514
+ cat_array = np.array([cat_array])
515
+ combined_hashed = combine_hash_arrays(iter(cat_array), num_items=len(cat_array))
516
+ return np.bitwise_xor.reduce(combined_hashed)
517
+
518
+ @classmethod
519
+ def construct_array_type(cls) -> type_t[Categorical]:
520
+ """
521
+ Return the array type associated with this dtype.
522
+
523
+ Returns
524
+ -------
525
+ type
526
+ """
527
+ from pandas import Categorical
528
+
529
+ return Categorical
530
+
531
+ @staticmethod
532
+ def validate_ordered(ordered: Ordered) -> None:
533
+ """
534
+ Validates that we have a valid ordered parameter. If
535
+ it is not a boolean, a TypeError will be raised.
536
+
537
+ Parameters
538
+ ----------
539
+ ordered : object
540
+ The parameter to be verified.
541
+
542
+ Raises
543
+ ------
544
+ TypeError
545
+ If 'ordered' is not a boolean.
546
+ """
547
+ if not is_bool(ordered):
548
+ raise TypeError("'ordered' must either be 'True' or 'False'")
549
+
550
+ @staticmethod
551
+ def validate_categories(categories, fastpath: bool = False) -> Index:
552
+ """
553
+ Validates that we have good categories
554
+
555
+ Parameters
556
+ ----------
557
+ categories : array-like
558
+ fastpath : bool
559
+ Whether to skip nan and uniqueness checks
560
+
561
+ Returns
562
+ -------
563
+ categories : Index
564
+ """
565
+ from pandas.core.indexes.base import Index
566
+
567
+ if not fastpath and not is_list_like(categories):
568
+ raise TypeError(
569
+ f"Parameter 'categories' must be list-like, was {repr(categories)}"
570
+ )
571
+ if not isinstance(categories, ABCIndex):
572
+ categories = Index._with_infer(categories, tupleize_cols=False)
573
+
574
+ if not fastpath:
575
+ if categories.hasnans:
576
+ raise ValueError("Categorical categories cannot be null")
577
+
578
+ if not categories.is_unique:
579
+ raise ValueError("Categorical categories must be unique")
580
+
581
+ if isinstance(categories, ABCCategoricalIndex):
582
+ categories = categories.categories
583
+
584
+ return categories
585
+
586
+ def update_dtype(self, dtype: str_type | CategoricalDtype) -> CategoricalDtype:
587
+ """
588
+ Returns a CategoricalDtype with categories and ordered taken from dtype
589
+ if specified, otherwise falling back to self if unspecified
590
+
591
+ Parameters
592
+ ----------
593
+ dtype : CategoricalDtype
594
+
595
+ Returns
596
+ -------
597
+ new_dtype : CategoricalDtype
598
+ """
599
+ if isinstance(dtype, str) and dtype == "category":
600
+ # dtype='category' should not change anything
601
+ return self
602
+ elif not self.is_dtype(dtype):
603
+ raise ValueError(
604
+ f"a CategoricalDtype must be passed to perform an update, "
605
+ f"got {repr(dtype)}"
606
+ )
607
+ else:
608
+ # from here on, dtype is a CategoricalDtype
609
+ dtype = cast(CategoricalDtype, dtype)
610
+
611
+ # update categories/ordered unless they've been explicitly passed as None
612
+ new_categories = (
613
+ dtype.categories if dtype.categories is not None else self.categories
614
+ )
615
+ new_ordered = dtype.ordered if dtype.ordered is not None else self.ordered
616
+
617
+ return CategoricalDtype(new_categories, new_ordered)
618
+
619
+ @property
620
+ def categories(self) -> Index:
621
+ """
622
+ An ``Index`` containing the unique categories allowed.
623
+
624
+ Examples
625
+ --------
626
+ >>> cat_type = pd.CategoricalDtype(categories=['a', 'b'], ordered=True)
627
+ >>> cat_type.categories
628
+ Index(['a', 'b'], dtype='object')
629
+ """
630
+ return self._categories
631
+
632
+ @property
633
+ def ordered(self) -> Ordered:
634
+ """
635
+ Whether the categories have an ordered relationship.
636
+
637
+ Examples
638
+ --------
639
+ >>> cat_type = pd.CategoricalDtype(categories=['a', 'b'], ordered=True)
640
+ >>> cat_type.ordered
641
+ True
642
+
643
+ >>> cat_type = pd.CategoricalDtype(categories=['a', 'b'], ordered=False)
644
+ >>> cat_type.ordered
645
+ False
646
+ """
647
+ return self._ordered
648
+
649
+ @property
650
+ def _is_boolean(self) -> bool:
651
+ from pandas.core.dtypes.common import is_bool_dtype
652
+
653
+ return is_bool_dtype(self.categories)
654
+
655
+ def _get_common_dtype(self, dtypes: list[DtypeObj]) -> DtypeObj | None:
656
+ # check if we have all categorical dtype with identical categories
657
+ if all(isinstance(x, CategoricalDtype) for x in dtypes):
658
+ first = dtypes[0]
659
+ if all(first == other for other in dtypes[1:]):
660
+ return first
661
+
662
+ # special case non-initialized categorical
663
+ # TODO we should figure out the expected return value in general
664
+ non_init_cats = [
665
+ isinstance(x, CategoricalDtype) and x.categories is None for x in dtypes
666
+ ]
667
+ if all(non_init_cats):
668
+ return self
669
+ elif any(non_init_cats):
670
+ return None
671
+
672
+ # categorical is aware of Sparse -> extract sparse subdtypes
673
+ dtypes = [x.subtype if isinstance(x, SparseDtype) else x for x in dtypes]
674
+ # extract the categories' dtype
675
+ non_cat_dtypes = [
676
+ x.categories.dtype if isinstance(x, CategoricalDtype) else x for x in dtypes
677
+ ]
678
+ # TODO should categorical always give an answer?
679
+ from pandas.core.dtypes.cast import find_common_type
680
+
681
+ return find_common_type(non_cat_dtypes)
682
+
683
+ @cache_readonly
684
+ def index_class(self) -> type_t[CategoricalIndex]:
685
+ from pandas import CategoricalIndex
686
+
687
+ return CategoricalIndex
688
+
689
+
690
+ @register_extension_dtype
691
+ class DatetimeTZDtype(PandasExtensionDtype):
692
+ """
693
+ An ExtensionDtype for timezone-aware datetime data.
694
+
695
+ **This is not an actual numpy dtype**, but a duck type.
696
+
697
+ Parameters
698
+ ----------
699
+ unit : str, default "ns"
700
+ The precision of the datetime data. Currently limited
701
+ to ``"ns"``.
702
+ tz : str, int, or datetime.tzinfo
703
+ The timezone.
704
+
705
+ Attributes
706
+ ----------
707
+ unit
708
+ tz
709
+
710
+ Methods
711
+ -------
712
+ None
713
+
714
+ Raises
715
+ ------
716
+ ZoneInfoNotFoundError
717
+ When the requested timezone cannot be found.
718
+
719
+ Examples
720
+ --------
721
+ >>> from zoneinfo import ZoneInfo
722
+ >>> pd.DatetimeTZDtype(tz=ZoneInfo('UTC'))
723
+ datetime64[ns, UTC]
724
+
725
+ >>> pd.DatetimeTZDtype(tz=ZoneInfo('Europe/Paris'))
726
+ datetime64[ns, Europe/Paris]
727
+ """
728
+
729
+ type: type[Timestamp] = Timestamp
730
+ kind: str_type = "M"
731
+ num = 101
732
+ _metadata = ("unit", "tz")
733
+ _match = re.compile(r"(datetime64|M8)\[(?P<unit>.+), (?P<tz>.+)\]")
734
+ _cache_dtypes: dict[str_type, PandasExtensionDtype] = {}
735
+ _supports_2d = True
736
+ _can_fast_transpose = True
737
+
738
+ @property
739
+ def na_value(self) -> NaTType:
740
+ return NaT
741
+
742
+ @cache_readonly
743
+ def base(self) -> DtypeObj: # type: ignore[override]
744
+ return np.dtype(f"M8[{self.unit}]")
745
+
746
+ # error: Signature of "str" incompatible with supertype "PandasExtensionDtype"
747
+ @cache_readonly
748
+ def str(self) -> str: # type: ignore[override]
749
+ return f"|M8[{self.unit}]"
750
+
751
+ def __init__(self, unit: str_type | DatetimeTZDtype = "ns", tz=None) -> None:
752
+ if isinstance(unit, DatetimeTZDtype):
753
+ # error: "str" has no attribute "tz"
754
+ unit, tz = unit.unit, unit.tz # type: ignore[attr-defined]
755
+
756
+ if unit != "ns":
757
+ if isinstance(unit, str) and tz is None:
758
+ # maybe a string like datetime64[ns, tz], which we support for
759
+ # now.
760
+ result = type(self).construct_from_string(unit)
761
+ unit = result.unit
762
+ tz = result.tz
763
+ msg = (
764
+ f"Passing a dtype alias like 'datetime64[ns, {tz}]' "
765
+ "to DatetimeTZDtype is no longer supported. Use "
766
+ "'DatetimeTZDtype.construct_from_string()' instead."
767
+ )
768
+ raise ValueError(msg)
769
+ if unit not in ["s", "ms", "us", "ns"]:
770
+ raise ValueError("DatetimeTZDtype only supports s, ms, us, ns units")
771
+
772
+ if tz:
773
+ tz = timezones.maybe_get_tz(tz)
774
+ tz = timezones.tz_standardize(tz)
775
+ elif tz is not None:
776
+ raise pytz.UnknownTimeZoneError(tz)
777
+ if tz is None:
778
+ raise TypeError("A 'tz' is required.")
779
+
780
+ self._unit = unit
781
+ self._tz = tz
782
+
783
+ @cache_readonly
784
+ def _creso(self) -> int:
785
+ """
786
+ The NPY_DATETIMEUNIT corresponding to this dtype's resolution.
787
+ """
788
+ return abbrev_to_npy_unit(self.unit)
789
+
790
+ @property
791
+ def unit(self) -> str_type:
792
+ """
793
+ The precision of the datetime data.
794
+
795
+ Examples
796
+ --------
797
+ >>> from zoneinfo import ZoneInfo
798
+ >>> dtype = pd.DatetimeTZDtype(tz=ZoneInfo('America/Los_Angeles'))
799
+ >>> dtype.unit
800
+ 'ns'
801
+ """
802
+ return self._unit
803
+
804
+ @property
805
+ def tz(self) -> tzinfo:
806
+ """
807
+ The timezone.
808
+
809
+ Examples
810
+ --------
811
+ >>> from zoneinfo import ZoneInfo
812
+ >>> dtype = pd.DatetimeTZDtype(tz=ZoneInfo('America/Los_Angeles'))
813
+ >>> dtype.tz
814
+ zoneinfo.ZoneInfo(key='America/Los_Angeles')
815
+ """
816
+ return self._tz
817
+
818
+ @classmethod
819
+ def construct_array_type(cls) -> type_t[DatetimeArray]:
820
+ """
821
+ Return the array type associated with this dtype.
822
+
823
+ Returns
824
+ -------
825
+ type
826
+ """
827
+ from pandas.core.arrays import DatetimeArray
828
+
829
+ return DatetimeArray
830
+
831
+ @classmethod
832
+ def construct_from_string(cls, string: str_type) -> DatetimeTZDtype:
833
+ """
834
+ Construct a DatetimeTZDtype from a string.
835
+
836
+ Parameters
837
+ ----------
838
+ string : str
839
+ The string alias for this DatetimeTZDtype.
840
+ Should be formatted like ``datetime64[ns, <tz>]``,
841
+ where ``<tz>`` is the timezone name.
842
+
843
+ Examples
844
+ --------
845
+ >>> DatetimeTZDtype.construct_from_string('datetime64[ns, UTC]')
846
+ datetime64[ns, UTC]
847
+ """
848
+ if not isinstance(string, str):
849
+ raise TypeError(
850
+ f"'construct_from_string' expects a string, got {type(string)}"
851
+ )
852
+
853
+ msg = f"Cannot construct a 'DatetimeTZDtype' from '{string}'"
854
+ match = cls._match.match(string)
855
+ if match:
856
+ d = match.groupdict()
857
+ try:
858
+ return cls(unit=d["unit"], tz=d["tz"])
859
+ except (KeyError, TypeError, ValueError) as err:
860
+ # KeyError if maybe_get_tz tries and fails to get a
861
+ # pytz timezone (actually pytz.UnknownTimeZoneError).
862
+ # TypeError if we pass a nonsense tz;
863
+ # ValueError if we pass a unit other than "ns"
864
+ raise TypeError(msg) from err
865
+ raise TypeError(msg)
866
+
867
+ def __str__(self) -> str_type:
868
+ return f"datetime64[{self.unit}, {self.tz}]"
869
+
870
+ @property
871
+ def name(self) -> str_type:
872
+ """A string representation of the dtype."""
873
+ return str(self)
874
+
875
+ def __hash__(self) -> int:
876
+ # make myself hashable
877
+ # TODO: update this.
878
+ return hash(str(self))
879
+
880
+ def __eq__(self, other: object) -> bool:
881
+ if isinstance(other, str):
882
+ if other.startswith("M8["):
883
+ other = f"datetime64[{other[3:]}"
884
+ return other == self.name
885
+
886
+ return (
887
+ isinstance(other, DatetimeTZDtype)
888
+ and self.unit == other.unit
889
+ and tz_compare(self.tz, other.tz)
890
+ )
891
+
892
+ def __from_arrow__(self, array: pa.Array | pa.ChunkedArray) -> DatetimeArray:
893
+ """
894
+ Construct DatetimeArray from pyarrow Array/ChunkedArray.
895
+
896
+ Note: If the units in the pyarrow Array are the same as this
897
+ DatetimeDtype, then values corresponding to the integer representation
898
+ of ``NaT`` (e.g. one nanosecond before :attr:`pandas.Timestamp.min`)
899
+ are converted to ``NaT``, regardless of the null indicator in the
900
+ pyarrow array.
901
+
902
+ Parameters
903
+ ----------
904
+ array : pyarrow.Array or pyarrow.ChunkedArray
905
+ The Arrow array to convert to DatetimeArray.
906
+
907
+ Returns
908
+ -------
909
+ extension array : DatetimeArray
910
+ """
911
+ import pyarrow
912
+
913
+ from pandas.core.arrays import DatetimeArray
914
+
915
+ array = array.cast(pyarrow.timestamp(unit=self._unit), safe=True)
916
+
917
+ if isinstance(array, pyarrow.Array):
918
+ np_arr = array.to_numpy(zero_copy_only=False)
919
+ else:
920
+ np_arr = array.to_numpy()
921
+
922
+ return DatetimeArray._simple_new(np_arr, dtype=self)
923
+
924
+ def __setstate__(self, state) -> None:
925
+ # for pickle compat. __get_state__ is defined in the
926
+ # PandasExtensionDtype superclass and uses the public properties to
927
+ # pickle -> need to set the settable private ones here (see GH26067)
928
+ self._tz = state["tz"]
929
+ self._unit = state["unit"]
930
+
931
+ def _get_common_dtype(self, dtypes: list[DtypeObj]) -> DtypeObj | None:
932
+ if all(isinstance(t, DatetimeTZDtype) and t.tz == self.tz for t in dtypes):
933
+ np_dtype = np.max([cast(DatetimeTZDtype, t).base for t in [self, *dtypes]])
934
+ unit = np.datetime_data(np_dtype)[0]
935
+ return type(self)(unit=unit, tz=self.tz)
936
+ return super()._get_common_dtype(dtypes)
937
+
938
+ @cache_readonly
939
+ def index_class(self) -> type_t[DatetimeIndex]:
940
+ from pandas import DatetimeIndex
941
+
942
+ return DatetimeIndex
943
+
944
+
945
+ @register_extension_dtype
946
+ class PeriodDtype(PeriodDtypeBase, PandasExtensionDtype):
947
+ """
948
+ An ExtensionDtype for Period data.
949
+
950
+ **This is not an actual numpy dtype**, but a duck type.
951
+
952
+ Parameters
953
+ ----------
954
+ freq : str or DateOffset
955
+ The frequency of this PeriodDtype.
956
+
957
+ Attributes
958
+ ----------
959
+ freq
960
+
961
+ Methods
962
+ -------
963
+ None
964
+
965
+ Examples
966
+ --------
967
+ >>> pd.PeriodDtype(freq='D')
968
+ period[D]
969
+
970
+ >>> pd.PeriodDtype(freq=pd.offsets.MonthEnd())
971
+ period[M]
972
+ """
973
+
974
+ type: type[Period] = Period
975
+ kind: str_type = "O"
976
+ str = "|O08"
977
+ base = np.dtype("O")
978
+ num = 102
979
+ _metadata = ("freq",)
980
+ _match = re.compile(r"(P|p)eriod\[(?P<freq>.+)\]")
981
+ # error: Incompatible types in assignment (expression has type
982
+ # "Dict[int, PandasExtensionDtype]", base class "PandasExtensionDtype"
983
+ # defined the type as "Dict[str, PandasExtensionDtype]") [assignment]
984
+ _cache_dtypes: dict[BaseOffset, int] = {} # type: ignore[assignment]
985
+ __hash__ = PeriodDtypeBase.__hash__
986
+ _freq: BaseOffset
987
+ _supports_2d = True
988
+ _can_fast_transpose = True
989
+
990
+ def __new__(cls, freq) -> PeriodDtype: # noqa: PYI034
991
+ """
992
+ Parameters
993
+ ----------
994
+ freq : PeriodDtype, BaseOffset, or string
995
+ """
996
+ if isinstance(freq, PeriodDtype):
997
+ return freq
998
+
999
+ if not isinstance(freq, BaseOffset):
1000
+ freq = cls._parse_dtype_strict(freq)
1001
+
1002
+ if isinstance(freq, BDay):
1003
+ # GH#53446
1004
+ # TODO(3.0): enforcing this will close GH#10575
1005
+ warnings.warn(
1006
+ "PeriodDtype[B] is deprecated and will be removed in a future "
1007
+ "version. Use a DatetimeIndex with freq='B' instead",
1008
+ FutureWarning,
1009
+ stacklevel=find_stack_level(),
1010
+ )
1011
+
1012
+ try:
1013
+ dtype_code = cls._cache_dtypes[freq]
1014
+ except KeyError:
1015
+ dtype_code = freq._period_dtype_code
1016
+ cls._cache_dtypes[freq] = dtype_code
1017
+ u = PeriodDtypeBase.__new__(cls, dtype_code, freq.n)
1018
+ u._freq = freq
1019
+ return u
1020
+
1021
+ def __reduce__(self) -> tuple[type_t[Self], tuple[str_type]]:
1022
+ return type(self), (self.name,)
1023
+
1024
+ @property
1025
+ def freq(self) -> BaseOffset:
1026
+ """
1027
+ The frequency object of this PeriodDtype.
1028
+
1029
+ Examples
1030
+ --------
1031
+ >>> dtype = pd.PeriodDtype(freq='D')
1032
+ >>> dtype.freq
1033
+ <Day>
1034
+ """
1035
+ return self._freq
1036
+
1037
+ @classmethod
1038
+ def _parse_dtype_strict(cls, freq: str_type) -> BaseOffset:
1039
+ if isinstance(freq, str): # note: freq is already of type str!
1040
+ if freq.startswith(("Period[", "period[")):
1041
+ m = cls._match.search(freq)
1042
+ if m is not None:
1043
+ freq = m.group("freq")
1044
+
1045
+ freq_offset = to_offset(freq, is_period=True)
1046
+ if freq_offset is not None:
1047
+ return freq_offset
1048
+
1049
+ raise TypeError(
1050
+ "PeriodDtype argument should be string or BaseOffset, "
1051
+ f"got {type(freq).__name__}"
1052
+ )
1053
+
1054
+ @classmethod
1055
+ def construct_from_string(cls, string: str_type) -> PeriodDtype:
1056
+ """
1057
+ Strict construction from a string, raise a TypeError if not
1058
+ possible
1059
+ """
1060
+ if (
1061
+ isinstance(string, str)
1062
+ and (string.startswith(("period[", "Period[")))
1063
+ or isinstance(string, BaseOffset)
1064
+ ):
1065
+ # do not parse string like U as period[U]
1066
+ # avoid tuple to be regarded as freq
1067
+ try:
1068
+ return cls(freq=string)
1069
+ except ValueError:
1070
+ pass
1071
+ if isinstance(string, str):
1072
+ msg = f"Cannot construct a 'PeriodDtype' from '{string}'"
1073
+ else:
1074
+ msg = f"'construct_from_string' expects a string, got {type(string)}"
1075
+ raise TypeError(msg)
1076
+
1077
+ def __str__(self) -> str_type:
1078
+ return self.name
1079
+
1080
+ @property
1081
+ def name(self) -> str_type:
1082
+ return f"period[{self._freqstr}]"
1083
+
1084
+ @property
1085
+ def na_value(self) -> NaTType:
1086
+ return NaT
1087
+
1088
+ def __eq__(self, other: object) -> bool:
1089
+ if isinstance(other, str):
1090
+ return other in [self.name, capitalize_first_letter(self.name)]
1091
+
1092
+ return super().__eq__(other)
1093
+
1094
+ def __ne__(self, other: object) -> bool:
1095
+ return not self.__eq__(other)
1096
+
1097
+ @classmethod
1098
+ def is_dtype(cls, dtype: object) -> bool:
1099
+ """
1100
+ Return a boolean if we if the passed type is an actual dtype that we
1101
+ can match (via string or type)
1102
+ """
1103
+ if isinstance(dtype, str):
1104
+ # PeriodDtype can be instantiated from freq string like "U",
1105
+ # but doesn't regard freq str like "U" as dtype.
1106
+ if dtype.startswith(("period[", "Period[")):
1107
+ try:
1108
+ return cls._parse_dtype_strict(dtype) is not None
1109
+ except ValueError:
1110
+ return False
1111
+ else:
1112
+ return False
1113
+ return super().is_dtype(dtype)
1114
+
1115
+ @classmethod
1116
+ def construct_array_type(cls) -> type_t[PeriodArray]:
1117
+ """
1118
+ Return the array type associated with this dtype.
1119
+
1120
+ Returns
1121
+ -------
1122
+ type
1123
+ """
1124
+ from pandas.core.arrays import PeriodArray
1125
+
1126
+ return PeriodArray
1127
+
1128
+ def __from_arrow__(self, array: pa.Array | pa.ChunkedArray) -> PeriodArray:
1129
+ """
1130
+ Construct PeriodArray from pyarrow Array/ChunkedArray.
1131
+ """
1132
+ import pyarrow
1133
+
1134
+ from pandas.core.arrays import PeriodArray
1135
+ from pandas.core.arrays.arrow._arrow_utils import (
1136
+ pyarrow_array_to_numpy_and_mask,
1137
+ )
1138
+
1139
+ if isinstance(array, pyarrow.Array):
1140
+ chunks = [array]
1141
+ else:
1142
+ chunks = array.chunks
1143
+
1144
+ results = []
1145
+ for arr in chunks:
1146
+ data, mask = pyarrow_array_to_numpy_and_mask(arr, dtype=np.dtype(np.int64))
1147
+ parr = PeriodArray(data.copy(), dtype=self, copy=False)
1148
+ # error: Invalid index type "ndarray[Any, dtype[bool_]]" for "PeriodArray";
1149
+ # expected type "Union[int, Sequence[int], Sequence[bool], slice]"
1150
+ parr[~mask] = NaT # type: ignore[index]
1151
+ results.append(parr)
1152
+
1153
+ if not results:
1154
+ return PeriodArray(np.array([], dtype="int64"), dtype=self, copy=False)
1155
+ return PeriodArray._concat_same_type(results)
1156
+
1157
+ @cache_readonly
1158
+ def index_class(self) -> type_t[PeriodIndex]:
1159
+ from pandas import PeriodIndex
1160
+
1161
+ return PeriodIndex
1162
+
1163
+
1164
+ @register_extension_dtype
1165
+ class IntervalDtype(PandasExtensionDtype):
1166
+ """
1167
+ An ExtensionDtype for Interval data.
1168
+
1169
+ **This is not an actual numpy dtype**, but a duck type.
1170
+
1171
+ Parameters
1172
+ ----------
1173
+ subtype : str, np.dtype
1174
+ The dtype of the Interval bounds.
1175
+
1176
+ Attributes
1177
+ ----------
1178
+ subtype
1179
+
1180
+ Methods
1181
+ -------
1182
+ None
1183
+
1184
+ Examples
1185
+ --------
1186
+ >>> pd.IntervalDtype(subtype='int64', closed='both')
1187
+ interval[int64, both]
1188
+ """
1189
+
1190
+ name = "interval"
1191
+ kind: str_type = "O"
1192
+ str = "|O08"
1193
+ base = np.dtype("O")
1194
+ num = 103
1195
+ _metadata = (
1196
+ "subtype",
1197
+ "closed",
1198
+ )
1199
+
1200
+ _match = re.compile(
1201
+ r"(I|i)nterval\[(?P<subtype>[^,]+(\[.+\])?)"
1202
+ r"(, (?P<closed>(right|left|both|neither)))?\]"
1203
+ )
1204
+
1205
+ _cache_dtypes: dict[str_type, PandasExtensionDtype] = {}
1206
+ _subtype: None | np.dtype
1207
+ _closed: IntervalClosedType | None
1208
+
1209
+ def __init__(self, subtype=None, closed: IntervalClosedType | None = None) -> None:
1210
+ from pandas.core.dtypes.common import (
1211
+ is_string_dtype,
1212
+ pandas_dtype,
1213
+ )
1214
+
1215
+ if closed is not None and closed not in {"right", "left", "both", "neither"}:
1216
+ raise ValueError("closed must be one of 'right', 'left', 'both', 'neither'")
1217
+
1218
+ if isinstance(subtype, IntervalDtype):
1219
+ if closed is not None and closed != subtype.closed:
1220
+ raise ValueError(
1221
+ "dtype.closed and 'closed' do not match. "
1222
+ "Try IntervalDtype(dtype.subtype, closed) instead."
1223
+ )
1224
+ self._subtype = subtype._subtype
1225
+ self._closed = subtype._closed
1226
+ elif subtype is None:
1227
+ # we are called as an empty constructor
1228
+ # generally for pickle compat
1229
+ self._subtype = None
1230
+ self._closed = closed
1231
+ elif isinstance(subtype, str) and subtype.lower() == "interval":
1232
+ self._subtype = None
1233
+ self._closed = closed
1234
+ else:
1235
+ if isinstance(subtype, str):
1236
+ m = IntervalDtype._match.search(subtype)
1237
+ if m is not None:
1238
+ gd = m.groupdict()
1239
+ subtype = gd["subtype"]
1240
+ if gd.get("closed", None) is not None:
1241
+ if closed is not None:
1242
+ if closed != gd["closed"]:
1243
+ raise ValueError(
1244
+ "'closed' keyword does not match value "
1245
+ "specified in dtype string"
1246
+ )
1247
+ closed = gd["closed"] # type: ignore[assignment]
1248
+
1249
+ try:
1250
+ subtype = pandas_dtype(subtype)
1251
+ except TypeError as err:
1252
+ raise TypeError("could not construct IntervalDtype") from err
1253
+ if CategoricalDtype.is_dtype(subtype) or is_string_dtype(subtype):
1254
+ # GH 19016
1255
+ msg = (
1256
+ "category, object, and string subtypes are not supported "
1257
+ "for IntervalDtype"
1258
+ )
1259
+ raise TypeError(msg)
1260
+ self._subtype = subtype
1261
+ self._closed = closed
1262
+
1263
+ @cache_readonly
1264
+ def _can_hold_na(self) -> bool:
1265
+ subtype = self._subtype
1266
+ if subtype is None:
1267
+ # partially-initialized
1268
+ raise NotImplementedError(
1269
+ "_can_hold_na is not defined for partially-initialized IntervalDtype"
1270
+ )
1271
+ if subtype.kind in "iu":
1272
+ return False
1273
+ return True
1274
+
1275
+ @property
1276
+ def closed(self) -> IntervalClosedType:
1277
+ return self._closed # type: ignore[return-value]
1278
+
1279
+ @property
1280
+ def subtype(self):
1281
+ """
1282
+ The dtype of the Interval bounds.
1283
+
1284
+ Examples
1285
+ --------
1286
+ >>> dtype = pd.IntervalDtype(subtype='int64', closed='both')
1287
+ >>> dtype.subtype
1288
+ dtype('int64')
1289
+ """
1290
+ return self._subtype
1291
+
1292
+ @classmethod
1293
+ def construct_array_type(cls) -> type[IntervalArray]:
1294
+ """
1295
+ Return the array type associated with this dtype.
1296
+
1297
+ Returns
1298
+ -------
1299
+ type
1300
+ """
1301
+ from pandas.core.arrays import IntervalArray
1302
+
1303
+ return IntervalArray
1304
+
1305
+ @classmethod
1306
+ def construct_from_string(cls, string: str_type) -> IntervalDtype:
1307
+ """
1308
+ attempt to construct this type from a string, raise a TypeError
1309
+ if its not possible
1310
+ """
1311
+ if not isinstance(string, str):
1312
+ raise TypeError(
1313
+ f"'construct_from_string' expects a string, got {type(string)}"
1314
+ )
1315
+
1316
+ if string.lower() == "interval" or cls._match.search(string) is not None:
1317
+ return cls(string)
1318
+
1319
+ msg = (
1320
+ f"Cannot construct a 'IntervalDtype' from '{string}'.\n\n"
1321
+ "Incorrectly formatted string passed to constructor. "
1322
+ "Valid formats include Interval or Interval[dtype] "
1323
+ "where dtype is numeric, datetime, or timedelta"
1324
+ )
1325
+ raise TypeError(msg)
1326
+
1327
+ @property
1328
+ def type(self) -> type[Interval]:
1329
+ return Interval
1330
+
1331
+ def __str__(self) -> str_type:
1332
+ if self.subtype is None:
1333
+ return "interval"
1334
+ if self.closed is None:
1335
+ # Only partially initialized GH#38394
1336
+ return f"interval[{self.subtype}]"
1337
+ return f"interval[{self.subtype}, {self.closed}]"
1338
+
1339
+ def __hash__(self) -> int:
1340
+ # make myself hashable
1341
+ return hash(str(self))
1342
+
1343
+ def __eq__(self, other: object) -> bool:
1344
+ if isinstance(other, str):
1345
+ return other.lower() in (self.name.lower(), str(self).lower())
1346
+ elif not isinstance(other, IntervalDtype):
1347
+ return False
1348
+ elif self.subtype is None or other.subtype is None:
1349
+ # None should match any subtype
1350
+ return True
1351
+ elif self.closed != other.closed:
1352
+ return False
1353
+ else:
1354
+ return self.subtype == other.subtype
1355
+
1356
+ def __setstate__(self, state) -> None:
1357
+ # for pickle compat. __get_state__ is defined in the
1358
+ # PandasExtensionDtype superclass and uses the public properties to
1359
+ # pickle -> need to set the settable private ones here (see GH26067)
1360
+ self._subtype = state["subtype"]
1361
+
1362
+ # backward-compat older pickles won't have "closed" key
1363
+ self._closed = state.pop("closed", None)
1364
+
1365
+ @classmethod
1366
+ def is_dtype(cls, dtype: object) -> bool:
1367
+ """
1368
+ Return a boolean if we if the passed type is an actual dtype that we
1369
+ can match (via string or type)
1370
+ """
1371
+ if isinstance(dtype, str):
1372
+ if dtype.lower().startswith("interval"):
1373
+ try:
1374
+ return cls.construct_from_string(dtype) is not None
1375
+ except (ValueError, TypeError):
1376
+ return False
1377
+ else:
1378
+ return False
1379
+ return super().is_dtype(dtype)
1380
+
1381
+ def __from_arrow__(self, array: pa.Array | pa.ChunkedArray) -> IntervalArray:
1382
+ """
1383
+ Construct IntervalArray from pyarrow Array/ChunkedArray.
1384
+ """
1385
+ import pyarrow
1386
+
1387
+ from pandas.core.arrays import IntervalArray
1388
+
1389
+ if isinstance(array, pyarrow.Array):
1390
+ chunks = [array]
1391
+ else:
1392
+ chunks = array.chunks
1393
+
1394
+ results = []
1395
+ for arr in chunks:
1396
+ if isinstance(arr, pyarrow.ExtensionArray):
1397
+ arr = arr.storage
1398
+ left = np.asarray(arr.field("left"), dtype=self.subtype)
1399
+ right = np.asarray(arr.field("right"), dtype=self.subtype)
1400
+ iarr = IntervalArray.from_arrays(left, right, closed=self.closed)
1401
+ results.append(iarr)
1402
+
1403
+ if not results:
1404
+ return IntervalArray.from_arrays(
1405
+ np.array([], dtype=self.subtype),
1406
+ np.array([], dtype=self.subtype),
1407
+ closed=self.closed,
1408
+ )
1409
+ return IntervalArray._concat_same_type(results)
1410
+
1411
+ def _get_common_dtype(self, dtypes: list[DtypeObj]) -> DtypeObj | None:
1412
+ if not all(isinstance(x, IntervalDtype) for x in dtypes):
1413
+ return None
1414
+
1415
+ closed = cast("IntervalDtype", dtypes[0]).closed
1416
+ if not all(cast("IntervalDtype", x).closed == closed for x in dtypes):
1417
+ return np.dtype(object)
1418
+
1419
+ from pandas.core.dtypes.cast import find_common_type
1420
+
1421
+ common = find_common_type([cast("IntervalDtype", x).subtype for x in dtypes])
1422
+ if common == object:
1423
+ return np.dtype(object)
1424
+ return IntervalDtype(common, closed=closed)
1425
+
1426
+ @cache_readonly
1427
+ def index_class(self) -> type_t[IntervalIndex]:
1428
+ from pandas import IntervalIndex
1429
+
1430
+ return IntervalIndex
1431
+
1432
+
1433
+ class NumpyEADtype(ExtensionDtype):
1434
+ """
1435
+ A Pandas ExtensionDtype for NumPy dtypes.
1436
+
1437
+ This is mostly for internal compatibility, and is not especially
1438
+ useful on its own.
1439
+
1440
+ Parameters
1441
+ ----------
1442
+ dtype : object
1443
+ Object to be converted to a NumPy data type object.
1444
+
1445
+ See Also
1446
+ --------
1447
+ numpy.dtype
1448
+ """
1449
+
1450
+ _metadata = ("_dtype",)
1451
+ _supports_2d = False
1452
+ _can_fast_transpose = False
1453
+
1454
+ def __init__(self, dtype: npt.DTypeLike | NumpyEADtype | None) -> None:
1455
+ if isinstance(dtype, NumpyEADtype):
1456
+ # make constructor idempotent
1457
+ dtype = dtype.numpy_dtype
1458
+ self._dtype = np.dtype(dtype)
1459
+
1460
+ def __repr__(self) -> str:
1461
+ return f"NumpyEADtype({repr(self.name)})"
1462
+
1463
+ @property
1464
+ def numpy_dtype(self) -> np.dtype:
1465
+ """
1466
+ The NumPy dtype this NumpyEADtype wraps.
1467
+ """
1468
+ return self._dtype
1469
+
1470
+ @property
1471
+ def name(self) -> str:
1472
+ """
1473
+ A bit-width name for this data-type.
1474
+ """
1475
+ return self._dtype.name
1476
+
1477
+ @property
1478
+ def type(self) -> type[np.generic]:
1479
+ """
1480
+ The type object used to instantiate a scalar of this NumPy data-type.
1481
+ """
1482
+ return self._dtype.type
1483
+
1484
+ @property
1485
+ def _is_numeric(self) -> bool:
1486
+ # exclude object, str, unicode, void.
1487
+ return self.kind in set("biufc")
1488
+
1489
+ @property
1490
+ def _is_boolean(self) -> bool:
1491
+ return self.kind == "b"
1492
+
1493
+ @classmethod
1494
+ def construct_from_string(cls, string: str) -> NumpyEADtype:
1495
+ try:
1496
+ dtype = np.dtype(string)
1497
+ except TypeError as err:
1498
+ if not isinstance(string, str):
1499
+ msg = f"'construct_from_string' expects a string, got {type(string)}"
1500
+ else:
1501
+ msg = f"Cannot construct a 'NumpyEADtype' from '{string}'"
1502
+ raise TypeError(msg) from err
1503
+ return cls(dtype)
1504
+
1505
+ @classmethod
1506
+ def construct_array_type(cls) -> type_t[NumpyExtensionArray]:
1507
+ """
1508
+ Return the array type associated with this dtype.
1509
+
1510
+ Returns
1511
+ -------
1512
+ type
1513
+ """
1514
+ from pandas.core.arrays import NumpyExtensionArray
1515
+
1516
+ return NumpyExtensionArray
1517
+
1518
+ @property
1519
+ def kind(self) -> str:
1520
+ """
1521
+ A character code (one of 'biufcmMOSUV') identifying the general kind of data.
1522
+ """
1523
+ return self._dtype.kind
1524
+
1525
+ @property
1526
+ def itemsize(self) -> int:
1527
+ """
1528
+ The element size of this data-type object.
1529
+ """
1530
+ return self._dtype.itemsize
1531
+
1532
+
1533
+ class BaseMaskedDtype(ExtensionDtype):
1534
+ """
1535
+ Base class for dtypes for BaseMaskedArray subclasses.
1536
+ """
1537
+
1538
+ base = None
1539
+ type: type
1540
+
1541
+ @property
1542
+ def na_value(self) -> libmissing.NAType:
1543
+ return libmissing.NA
1544
+
1545
+ @cache_readonly
1546
+ def numpy_dtype(self) -> np.dtype:
1547
+ """Return an instance of our numpy dtype"""
1548
+ return np.dtype(self.type)
1549
+
1550
+ @cache_readonly
1551
+ def kind(self) -> str:
1552
+ return self.numpy_dtype.kind
1553
+
1554
+ @cache_readonly
1555
+ def itemsize(self) -> int:
1556
+ """Return the number of bytes in this dtype"""
1557
+ return self.numpy_dtype.itemsize
1558
+
1559
+ @classmethod
1560
+ def construct_array_type(cls) -> type_t[BaseMaskedArray]:
1561
+ """
1562
+ Return the array type associated with this dtype.
1563
+
1564
+ Returns
1565
+ -------
1566
+ type
1567
+ """
1568
+ raise NotImplementedError
1569
+
1570
+ @classmethod
1571
+ def from_numpy_dtype(cls, dtype: np.dtype) -> BaseMaskedDtype:
1572
+ """
1573
+ Construct the MaskedDtype corresponding to the given numpy dtype.
1574
+ """
1575
+ if dtype.kind == "b":
1576
+ from pandas.core.arrays.boolean import BooleanDtype
1577
+
1578
+ return BooleanDtype()
1579
+ elif dtype.kind in "iu":
1580
+ from pandas.core.arrays.integer import NUMPY_INT_TO_DTYPE
1581
+
1582
+ return NUMPY_INT_TO_DTYPE[dtype]
1583
+ elif dtype.kind == "f":
1584
+ from pandas.core.arrays.floating import NUMPY_FLOAT_TO_DTYPE
1585
+
1586
+ return NUMPY_FLOAT_TO_DTYPE[dtype]
1587
+ else:
1588
+ raise NotImplementedError(dtype)
1589
+
1590
+ def _get_common_dtype(self, dtypes: list[DtypeObj]) -> DtypeObj | None:
1591
+ # We unwrap any masked dtypes, find the common dtype we would use
1592
+ # for that, then re-mask the result.
1593
+ from pandas.core.dtypes.cast import find_common_type
1594
+
1595
+ new_dtype = find_common_type(
1596
+ [
1597
+ dtype.numpy_dtype if isinstance(dtype, BaseMaskedDtype) else dtype
1598
+ for dtype in dtypes
1599
+ ]
1600
+ )
1601
+ if not isinstance(new_dtype, np.dtype):
1602
+ # If we ever support e.g. Masked[DatetimeArray] then this will change
1603
+ return None
1604
+ try:
1605
+ return type(self).from_numpy_dtype(new_dtype)
1606
+ except (KeyError, NotImplementedError):
1607
+ return None
1608
+
1609
+
1610
+ @register_extension_dtype
1611
+ class SparseDtype(ExtensionDtype):
1612
+ """
1613
+ Dtype for data stored in :class:`SparseArray`.
1614
+
1615
+ This dtype implements the pandas ExtensionDtype interface.
1616
+
1617
+ Parameters
1618
+ ----------
1619
+ dtype : str, ExtensionDtype, numpy.dtype, type, default numpy.float64
1620
+ The dtype of the underlying array storing the non-fill value values.
1621
+ fill_value : scalar, optional
1622
+ The scalar value not stored in the SparseArray. By default, this
1623
+ depends on `dtype`.
1624
+
1625
+ =========== ==========
1626
+ dtype na_value
1627
+ =========== ==========
1628
+ float ``np.nan``
1629
+ int ``0``
1630
+ bool ``False``
1631
+ datetime64 ``pd.NaT``
1632
+ timedelta64 ``pd.NaT``
1633
+ =========== ==========
1634
+
1635
+ The default value may be overridden by specifying a `fill_value`.
1636
+
1637
+ Attributes
1638
+ ----------
1639
+ None
1640
+
1641
+ Methods
1642
+ -------
1643
+ None
1644
+
1645
+ Examples
1646
+ --------
1647
+ >>> ser = pd.Series([1, 0, 0], dtype=pd.SparseDtype(dtype=int, fill_value=0))
1648
+ >>> ser
1649
+ 0 1
1650
+ 1 0
1651
+ 2 0
1652
+ dtype: Sparse[int64, 0]
1653
+ >>> ser.sparse.density
1654
+ 0.3333333333333333
1655
+ """
1656
+
1657
+ _is_immutable = True
1658
+
1659
+ # We include `_is_na_fill_value` in the metadata to avoid hash collisions
1660
+ # between SparseDtype(float, 0.0) and SparseDtype(float, nan).
1661
+ # Without is_na_fill_value in the comparison, those would be equal since
1662
+ # hash(nan) is (sometimes?) 0.
1663
+ _metadata = ("_dtype", "_fill_value", "_is_na_fill_value")
1664
+
1665
+ def __init__(self, dtype: Dtype = np.float64, fill_value: Any = None) -> None:
1666
+ if isinstance(dtype, type(self)):
1667
+ if fill_value is None:
1668
+ fill_value = dtype.fill_value
1669
+ dtype = dtype.subtype
1670
+
1671
+ from pandas.core.dtypes.common import (
1672
+ is_string_dtype,
1673
+ pandas_dtype,
1674
+ )
1675
+ from pandas.core.dtypes.missing import na_value_for_dtype
1676
+
1677
+ dtype = pandas_dtype(dtype)
1678
+ if is_string_dtype(dtype):
1679
+ dtype = np.dtype("object")
1680
+ if not isinstance(dtype, np.dtype):
1681
+ # GH#53160
1682
+ raise TypeError("SparseDtype subtype must be a numpy dtype")
1683
+
1684
+ if fill_value is None:
1685
+ fill_value = na_value_for_dtype(dtype)
1686
+
1687
+ self._dtype = dtype
1688
+ self._fill_value = fill_value
1689
+ self._check_fill_value()
1690
+
1691
+ def __hash__(self) -> int:
1692
+ # Python3 doesn't inherit __hash__ when a base class overrides
1693
+ # __eq__, so we explicitly do it here.
1694
+ return super().__hash__()
1695
+
1696
+ def __eq__(self, other: object) -> bool:
1697
+ # We have to override __eq__ to handle NA values in _metadata.
1698
+ # The base class does simple == checks, which fail for NA.
1699
+ if isinstance(other, str):
1700
+ try:
1701
+ other = self.construct_from_string(other)
1702
+ except TypeError:
1703
+ return False
1704
+
1705
+ if isinstance(other, type(self)):
1706
+ subtype = self.subtype == other.subtype
1707
+ if self._is_na_fill_value:
1708
+ # this case is complicated by two things:
1709
+ # SparseDtype(float, float(nan)) == SparseDtype(float, np.nan)
1710
+ # SparseDtype(float, np.nan) != SparseDtype(float, pd.NaT)
1711
+ # i.e. we want to treat any floating-point NaN as equal, but
1712
+ # not a floating-point NaN and a datetime NaT.
1713
+ fill_value = (
1714
+ other._is_na_fill_value
1715
+ and isinstance(self.fill_value, type(other.fill_value))
1716
+ or isinstance(other.fill_value, type(self.fill_value))
1717
+ )
1718
+ else:
1719
+ with warnings.catch_warnings():
1720
+ # Ignore spurious numpy warning
1721
+ warnings.filterwarnings(
1722
+ "ignore",
1723
+ "elementwise comparison failed",
1724
+ category=DeprecationWarning,
1725
+ )
1726
+
1727
+ fill_value = self.fill_value == other.fill_value
1728
+
1729
+ return subtype and fill_value
1730
+ return False
1731
+
1732
+ @property
1733
+ def fill_value(self):
1734
+ """
1735
+ The fill value of the array.
1736
+
1737
+ Converting the SparseArray to a dense ndarray will fill the
1738
+ array with this value.
1739
+
1740
+ .. warning::
1741
+
1742
+ It's possible to end up with a SparseArray that has ``fill_value``
1743
+ values in ``sp_values``. This can occur, for example, when setting
1744
+ ``SparseArray.fill_value`` directly.
1745
+ """
1746
+ return self._fill_value
1747
+
1748
+ def _check_fill_value(self) -> None:
1749
+ if not lib.is_scalar(self._fill_value):
1750
+ raise ValueError(
1751
+ f"fill_value must be a scalar. Got {self._fill_value} instead"
1752
+ )
1753
+
1754
+ from pandas.core.dtypes.cast import can_hold_element
1755
+ from pandas.core.dtypes.missing import (
1756
+ is_valid_na_for_dtype,
1757
+ isna,
1758
+ )
1759
+
1760
+ from pandas.core.construction import ensure_wrapped_if_datetimelike
1761
+
1762
+ # GH#23124 require fill_value and subtype to match
1763
+ val = self._fill_value
1764
+ if isna(val):
1765
+ if not is_valid_na_for_dtype(val, self.subtype):
1766
+ warnings.warn(
1767
+ "Allowing arbitrary scalar fill_value in SparseDtype is "
1768
+ "deprecated. In a future version, the fill_value must be "
1769
+ "a valid value for the SparseDtype.subtype.",
1770
+ FutureWarning,
1771
+ stacklevel=find_stack_level(),
1772
+ )
1773
+ else:
1774
+ dummy = np.empty(0, dtype=self.subtype)
1775
+ dummy = ensure_wrapped_if_datetimelike(dummy)
1776
+
1777
+ if not can_hold_element(dummy, val):
1778
+ warnings.warn(
1779
+ "Allowing arbitrary scalar fill_value in SparseDtype is "
1780
+ "deprecated. In a future version, the fill_value must be "
1781
+ "a valid value for the SparseDtype.subtype.",
1782
+ FutureWarning,
1783
+ stacklevel=find_stack_level(),
1784
+ )
1785
+
1786
+ @property
1787
+ def _is_na_fill_value(self) -> bool:
1788
+ from pandas import isna
1789
+
1790
+ return isna(self.fill_value)
1791
+
1792
+ @property
1793
+ def _is_numeric(self) -> bool:
1794
+ return not self.subtype == object
1795
+
1796
+ @property
1797
+ def _is_boolean(self) -> bool:
1798
+ return self.subtype.kind == "b"
1799
+
1800
+ @property
1801
+ def kind(self) -> str:
1802
+ """
1803
+ The sparse kind. Either 'integer', or 'block'.
1804
+ """
1805
+ return self.subtype.kind
1806
+
1807
+ @property
1808
+ def type(self):
1809
+ return self.subtype.type
1810
+
1811
+ @property
1812
+ def subtype(self):
1813
+ return self._dtype
1814
+
1815
+ @property
1816
+ def name(self) -> str:
1817
+ return f"Sparse[{self.subtype.name}, {repr(self.fill_value)}]"
1818
+
1819
+ def __repr__(self) -> str:
1820
+ return self.name
1821
+
1822
+ @classmethod
1823
+ def construct_array_type(cls) -> type_t[SparseArray]:
1824
+ """
1825
+ Return the array type associated with this dtype.
1826
+
1827
+ Returns
1828
+ -------
1829
+ type
1830
+ """
1831
+ from pandas.core.arrays.sparse.array import SparseArray
1832
+
1833
+ return SparseArray
1834
+
1835
+ @classmethod
1836
+ def construct_from_string(cls, string: str) -> SparseDtype:
1837
+ """
1838
+ Construct a SparseDtype from a string form.
1839
+
1840
+ Parameters
1841
+ ----------
1842
+ string : str
1843
+ Can take the following forms.
1844
+
1845
+ string dtype
1846
+ ================ ============================
1847
+ 'int' SparseDtype[np.int64, 0]
1848
+ 'Sparse' SparseDtype[np.float64, nan]
1849
+ 'Sparse[int]' SparseDtype[np.int64, 0]
1850
+ 'Sparse[int, 0]' SparseDtype[np.int64, 0]
1851
+ ================ ============================
1852
+
1853
+ It is not possible to specify non-default fill values
1854
+ with a string. An argument like ``'Sparse[int, 1]'``
1855
+ will raise a ``TypeError`` because the default fill value
1856
+ for integers is 0.
1857
+
1858
+ Returns
1859
+ -------
1860
+ SparseDtype
1861
+ """
1862
+ if not isinstance(string, str):
1863
+ raise TypeError(
1864
+ f"'construct_from_string' expects a string, got {type(string)}"
1865
+ )
1866
+ msg = f"Cannot construct a 'SparseDtype' from '{string}'"
1867
+ if string.startswith("Sparse"):
1868
+ try:
1869
+ sub_type, has_fill_value = cls._parse_subtype(string)
1870
+ except ValueError as err:
1871
+ raise TypeError(msg) from err
1872
+ else:
1873
+ result = SparseDtype(sub_type)
1874
+ msg = (
1875
+ f"Cannot construct a 'SparseDtype' from '{string}'.\n\nIt "
1876
+ "looks like the fill_value in the string is not "
1877
+ "the default for the dtype. Non-default fill_values "
1878
+ "are not supported. Use the 'SparseDtype()' "
1879
+ "constructor instead."
1880
+ )
1881
+ if has_fill_value and str(result) != string:
1882
+ raise TypeError(msg)
1883
+ return result
1884
+ else:
1885
+ raise TypeError(msg)
1886
+
1887
+ @staticmethod
1888
+ def _parse_subtype(dtype: str) -> tuple[str, bool]:
1889
+ """
1890
+ Parse a string to get the subtype
1891
+
1892
+ Parameters
1893
+ ----------
1894
+ dtype : str
1895
+ A string like
1896
+
1897
+ * Sparse[subtype]
1898
+ * Sparse[subtype, fill_value]
1899
+
1900
+ Returns
1901
+ -------
1902
+ subtype : str
1903
+
1904
+ Raises
1905
+ ------
1906
+ ValueError
1907
+ When the subtype cannot be extracted.
1908
+ """
1909
+ xpr = re.compile(r"Sparse\[(?P<subtype>[^,]*)(, )?(?P<fill_value>.*?)?\]$")
1910
+ m = xpr.match(dtype)
1911
+ has_fill_value = False
1912
+ if m:
1913
+ subtype = m.groupdict()["subtype"]
1914
+ has_fill_value = bool(m.groupdict()["fill_value"])
1915
+ elif dtype == "Sparse":
1916
+ subtype = "float64"
1917
+ else:
1918
+ raise ValueError(f"Cannot parse {dtype}")
1919
+ return subtype, has_fill_value
1920
+
1921
+ @classmethod
1922
+ def is_dtype(cls, dtype: object) -> bool:
1923
+ dtype = getattr(dtype, "dtype", dtype)
1924
+ if isinstance(dtype, str) and dtype.startswith("Sparse"):
1925
+ sub_type, _ = cls._parse_subtype(dtype)
1926
+ dtype = np.dtype(sub_type)
1927
+ elif isinstance(dtype, cls):
1928
+ return True
1929
+ return isinstance(dtype, np.dtype) or dtype == "Sparse"
1930
+
1931
+ def update_dtype(self, dtype) -> SparseDtype:
1932
+ """
1933
+ Convert the SparseDtype to a new dtype.
1934
+
1935
+ This takes care of converting the ``fill_value``.
1936
+
1937
+ Parameters
1938
+ ----------
1939
+ dtype : Union[str, numpy.dtype, SparseDtype]
1940
+ The new dtype to use.
1941
+
1942
+ * For a SparseDtype, it is simply returned
1943
+ * For a NumPy dtype (or str), the current fill value
1944
+ is converted to the new dtype, and a SparseDtype
1945
+ with `dtype` and the new fill value is returned.
1946
+
1947
+ Returns
1948
+ -------
1949
+ SparseDtype
1950
+ A new SparseDtype with the correct `dtype` and fill value
1951
+ for that `dtype`.
1952
+
1953
+ Raises
1954
+ ------
1955
+ ValueError
1956
+ When the current fill value cannot be converted to the
1957
+ new `dtype` (e.g. trying to convert ``np.nan`` to an
1958
+ integer dtype).
1959
+
1960
+
1961
+ Examples
1962
+ --------
1963
+ >>> SparseDtype(int, 0).update_dtype(float)
1964
+ Sparse[float64, 0.0]
1965
+
1966
+ >>> SparseDtype(int, 1).update_dtype(SparseDtype(float, np.nan))
1967
+ Sparse[float64, nan]
1968
+ """
1969
+ from pandas.core.dtypes.astype import astype_array
1970
+ from pandas.core.dtypes.common import pandas_dtype
1971
+
1972
+ cls = type(self)
1973
+ dtype = pandas_dtype(dtype)
1974
+
1975
+ if not isinstance(dtype, cls):
1976
+ if not isinstance(dtype, np.dtype):
1977
+ raise TypeError("sparse arrays of extension dtypes not supported")
1978
+
1979
+ fv_asarray = np.atleast_1d(np.array(self.fill_value))
1980
+ fvarr = astype_array(fv_asarray, dtype)
1981
+ # NB: not fv_0d.item(), as that casts dt64->int
1982
+ fill_value = fvarr[0]
1983
+ dtype = cls(dtype, fill_value=fill_value)
1984
+
1985
+ return dtype
1986
+
1987
+ @property
1988
+ def _subtype_with_str(self):
1989
+ """
1990
+ Whether the SparseDtype's subtype should be considered ``str``.
1991
+
1992
+ Typically, pandas will store string data in an object-dtype array.
1993
+ When converting values to a dtype, e.g. in ``.astype``, we need to
1994
+ be more specific, we need the actual underlying type.
1995
+
1996
+ Returns
1997
+ -------
1998
+ >>> SparseDtype(int, 1)._subtype_with_str
1999
+ dtype('int64')
2000
+
2001
+ >>> SparseDtype(object, 1)._subtype_with_str
2002
+ dtype('O')
2003
+
2004
+ >>> dtype = SparseDtype(str, '')
2005
+ >>> dtype.subtype
2006
+ dtype('O')
2007
+
2008
+ >>> dtype._subtype_with_str
2009
+ <class 'str'>
2010
+ """
2011
+ if isinstance(self.fill_value, str):
2012
+ return type(self.fill_value)
2013
+ return self.subtype
2014
+
2015
+ def _get_common_dtype(self, dtypes: list[DtypeObj]) -> DtypeObj | None:
2016
+ # TODO for now only handle SparseDtypes and numpy dtypes => extend
2017
+ # with other compatible extension dtypes
2018
+ from pandas.core.dtypes.cast import np_find_common_type
2019
+
2020
+ if any(
2021
+ isinstance(x, ExtensionDtype) and not isinstance(x, SparseDtype)
2022
+ for x in dtypes
2023
+ ):
2024
+ return None
2025
+
2026
+ fill_values = [x.fill_value for x in dtypes if isinstance(x, SparseDtype)]
2027
+ fill_value = fill_values[0]
2028
+
2029
+ from pandas import isna
2030
+
2031
+ # np.nan isn't a singleton, so we may end up with multiple
2032
+ # NaNs here, so we ignore the all NA case too.
2033
+ if not (len(set(fill_values)) == 1 or isna(fill_values).all()):
2034
+ warnings.warn(
2035
+ "Concatenating sparse arrays with multiple fill "
2036
+ f"values: '{fill_values}'. Picking the first and "
2037
+ "converting the rest.",
2038
+ PerformanceWarning,
2039
+ stacklevel=find_stack_level(),
2040
+ )
2041
+
2042
+ np_dtypes = (x.subtype if isinstance(x, SparseDtype) else x for x in dtypes)
2043
+ return SparseDtype(np_find_common_type(*np_dtypes), fill_value=fill_value)
2044
+
2045
+
2046
+ @register_extension_dtype
2047
+ class ArrowDtype(StorageExtensionDtype):
2048
+ """
2049
+ An ExtensionDtype for PyArrow data types.
2050
+
2051
+ .. warning::
2052
+
2053
+ ArrowDtype is considered experimental. The implementation and
2054
+ parts of the API may change without warning.
2055
+
2056
+ While most ``dtype`` arguments can accept the "string"
2057
+ constructor, e.g. ``"int64[pyarrow]"``, ArrowDtype is useful
2058
+ if the data type contains parameters like ``pyarrow.timestamp``.
2059
+
2060
+ Parameters
2061
+ ----------
2062
+ pyarrow_dtype : pa.DataType
2063
+ An instance of a `pyarrow.DataType <https://arrow.apache.org/docs/python/api/datatypes.html#factory-functions>`__.
2064
+
2065
+ Attributes
2066
+ ----------
2067
+ pyarrow_dtype
2068
+
2069
+ Methods
2070
+ -------
2071
+ None
2072
+
2073
+ Returns
2074
+ -------
2075
+ ArrowDtype
2076
+
2077
+ Examples
2078
+ --------
2079
+ >>> import pyarrow as pa
2080
+ >>> pd.ArrowDtype(pa.int64())
2081
+ int64[pyarrow]
2082
+
2083
+ Types with parameters must be constructed with ArrowDtype.
2084
+
2085
+ >>> pd.ArrowDtype(pa.timestamp("s", tz="America/New_York"))
2086
+ timestamp[s, tz=America/New_York][pyarrow]
2087
+ >>> pd.ArrowDtype(pa.list_(pa.int64()))
2088
+ list<item: int64>[pyarrow]
2089
+ """
2090
+
2091
+ _metadata = ("storage", "pyarrow_dtype") # type: ignore[assignment]
2092
+
2093
+ def __init__(self, pyarrow_dtype: pa.DataType) -> None:
2094
+ super().__init__("pyarrow")
2095
+ if pa_version_under10p1:
2096
+ raise ImportError("pyarrow>=10.0.1 is required for ArrowDtype")
2097
+ if not isinstance(pyarrow_dtype, pa.DataType):
2098
+ raise ValueError(
2099
+ f"pyarrow_dtype ({pyarrow_dtype}) must be an instance "
2100
+ f"of a pyarrow.DataType. Got {type(pyarrow_dtype)} instead."
2101
+ )
2102
+ self.pyarrow_dtype = pyarrow_dtype
2103
+
2104
+ def __repr__(self) -> str:
2105
+ return self.name
2106
+
2107
+ def __hash__(self) -> int:
2108
+ # make myself hashable
2109
+ return hash(str(self))
2110
+
2111
+ def __eq__(self, other: object) -> bool:
2112
+ if not isinstance(other, type(self)):
2113
+ return super().__eq__(other)
2114
+ return self.pyarrow_dtype == other.pyarrow_dtype
2115
+
2116
+ @property
2117
+ def type(self):
2118
+ """
2119
+ Returns associated scalar type.
2120
+ """
2121
+ pa_type = self.pyarrow_dtype
2122
+ if pa.types.is_integer(pa_type):
2123
+ return int
2124
+ elif pa.types.is_floating(pa_type):
2125
+ return float
2126
+ elif pa.types.is_string(pa_type) or pa.types.is_large_string(pa_type):
2127
+ return str
2128
+ elif (
2129
+ pa.types.is_binary(pa_type)
2130
+ or pa.types.is_fixed_size_binary(pa_type)
2131
+ or pa.types.is_large_binary(pa_type)
2132
+ ):
2133
+ return bytes
2134
+ elif pa.types.is_boolean(pa_type):
2135
+ return bool
2136
+ elif pa.types.is_duration(pa_type):
2137
+ if pa_type.unit == "ns":
2138
+ return Timedelta
2139
+ else:
2140
+ return timedelta
2141
+ elif pa.types.is_timestamp(pa_type):
2142
+ if pa_type.unit == "ns":
2143
+ return Timestamp
2144
+ else:
2145
+ return datetime
2146
+ elif pa.types.is_date(pa_type):
2147
+ return date
2148
+ elif pa.types.is_time(pa_type):
2149
+ return time
2150
+ elif pa.types.is_decimal(pa_type):
2151
+ return Decimal
2152
+ elif pa.types.is_dictionary(pa_type):
2153
+ # TODO: Potentially change this & CategoricalDtype.type to
2154
+ # something more representative of the scalar
2155
+ return CategoricalDtypeType
2156
+ elif pa.types.is_list(pa_type) or pa.types.is_large_list(pa_type):
2157
+ return list
2158
+ elif pa.types.is_fixed_size_list(pa_type):
2159
+ return list
2160
+ elif pa.types.is_map(pa_type):
2161
+ return list
2162
+ elif pa.types.is_struct(pa_type):
2163
+ return dict
2164
+ elif pa.types.is_null(pa_type):
2165
+ # TODO: None? pd.NA? pa.null?
2166
+ return type(pa_type)
2167
+ elif isinstance(pa_type, pa.ExtensionType):
2168
+ return type(self)(pa_type.storage_type).type
2169
+ raise NotImplementedError(pa_type)
2170
+
2171
+ @property
2172
+ def name(self) -> str: # type: ignore[override]
2173
+ """
2174
+ A string identifying the data type.
2175
+ """
2176
+ return f"{str(self.pyarrow_dtype)}[{self.storage}]"
2177
+
2178
+ @cache_readonly
2179
+ def numpy_dtype(self) -> np.dtype:
2180
+ """Return an instance of the related numpy dtype"""
2181
+ if pa.types.is_timestamp(self.pyarrow_dtype):
2182
+ # pa.timestamp(unit).to_pandas_dtype() returns ns units
2183
+ # regardless of the pyarrow timestamp units.
2184
+ # This can be removed if/when pyarrow addresses it:
2185
+ # https://github.com/apache/arrow/issues/34462
2186
+ return np.dtype(f"datetime64[{self.pyarrow_dtype.unit}]")
2187
+ if pa.types.is_duration(self.pyarrow_dtype):
2188
+ # pa.duration(unit).to_pandas_dtype() returns ns units
2189
+ # regardless of the pyarrow duration units
2190
+ # This can be removed if/when pyarrow addresses it:
2191
+ # https://github.com/apache/arrow/issues/34462
2192
+ return np.dtype(f"timedelta64[{self.pyarrow_dtype.unit}]")
2193
+ if pa.types.is_string(self.pyarrow_dtype) or pa.types.is_large_string(
2194
+ self.pyarrow_dtype
2195
+ ):
2196
+ # pa.string().to_pandas_dtype() = object which we don't want
2197
+ return np.dtype(str)
2198
+ try:
2199
+ return np.dtype(self.pyarrow_dtype.to_pandas_dtype())
2200
+ except (NotImplementedError, TypeError):
2201
+ return np.dtype(object)
2202
+
2203
+ @cache_readonly
2204
+ def kind(self) -> str:
2205
+ if pa.types.is_timestamp(self.pyarrow_dtype):
2206
+ # To mirror DatetimeTZDtype
2207
+ return "M"
2208
+ return self.numpy_dtype.kind
2209
+
2210
+ @cache_readonly
2211
+ def itemsize(self) -> int:
2212
+ """Return the number of bytes in this dtype"""
2213
+ return self.numpy_dtype.itemsize
2214
+
2215
+ @classmethod
2216
+ def construct_array_type(cls) -> type_t[ArrowExtensionArray]:
2217
+ """
2218
+ Return the array type associated with this dtype.
2219
+
2220
+ Returns
2221
+ -------
2222
+ type
2223
+ """
2224
+ from pandas.core.arrays.arrow import ArrowExtensionArray
2225
+
2226
+ return ArrowExtensionArray
2227
+
2228
+ @classmethod
2229
+ def construct_from_string(cls, string: str) -> ArrowDtype:
2230
+ """
2231
+ Construct this type from a string.
2232
+
2233
+ Parameters
2234
+ ----------
2235
+ string : str
2236
+ string should follow the format f"{pyarrow_type}[pyarrow]"
2237
+ e.g. int64[pyarrow]
2238
+ """
2239
+ if not isinstance(string, str):
2240
+ raise TypeError(
2241
+ f"'construct_from_string' expects a string, got {type(string)}"
2242
+ )
2243
+ if not string.endswith("[pyarrow]"):
2244
+ raise TypeError(f"'{string}' must end with '[pyarrow]'")
2245
+ if string == "string[pyarrow]":
2246
+ # Ensure Registry.find skips ArrowDtype to use StringDtype instead
2247
+ raise TypeError("string[pyarrow] should be constructed by StringDtype")
2248
+
2249
+ base_type = string[:-9] # get rid of "[pyarrow]"
2250
+ try:
2251
+ pa_dtype = pa.type_for_alias(base_type)
2252
+ except ValueError as err:
2253
+ has_parameters = re.search(r"[\[\(].*[\]\)]", base_type)
2254
+ if has_parameters:
2255
+ # Fallback to try common temporal types
2256
+ try:
2257
+ return cls._parse_temporal_dtype_string(base_type)
2258
+ except (NotImplementedError, ValueError):
2259
+ # Fall through to raise with nice exception message below
2260
+ pass
2261
+
2262
+ raise NotImplementedError(
2263
+ "Passing pyarrow type specific parameters "
2264
+ f"({has_parameters.group()}) in the string is not supported. "
2265
+ "Please construct an ArrowDtype object with a pyarrow_dtype "
2266
+ "instance with specific parameters."
2267
+ ) from err
2268
+ raise TypeError(f"'{base_type}' is not a valid pyarrow data type.") from err
2269
+ return cls(pa_dtype)
2270
+
2271
+ # TODO(arrow#33642): This can be removed once supported by pyarrow
2272
+ @classmethod
2273
+ def _parse_temporal_dtype_string(cls, string: str) -> ArrowDtype:
2274
+ """
2275
+ Construct a temporal ArrowDtype from string.
2276
+ """
2277
+ # we assume
2278
+ # 1) "[pyarrow]" has already been stripped from the end of our string.
2279
+ # 2) we know "[" is present
2280
+ head, tail = string.split("[", 1)
2281
+
2282
+ if not tail.endswith("]"):
2283
+ raise ValueError
2284
+ tail = tail[:-1]
2285
+
2286
+ if head == "timestamp":
2287
+ assert "," in tail # otherwise type_for_alias should work
2288
+ unit, tz = tail.split(",", 1)
2289
+ unit = unit.strip()
2290
+ tz = tz.strip()
2291
+ if tz.startswith("tz="):
2292
+ tz = tz[3:]
2293
+
2294
+ pa_type = pa.timestamp(unit, tz=tz)
2295
+ dtype = cls(pa_type)
2296
+ return dtype
2297
+
2298
+ raise NotImplementedError(string)
2299
+
2300
+ @property
2301
+ def _is_numeric(self) -> bool:
2302
+ """
2303
+ Whether columns with this dtype should be considered numeric.
2304
+ """
2305
+ # TODO: pa.types.is_boolean?
2306
+ return (
2307
+ pa.types.is_integer(self.pyarrow_dtype)
2308
+ or pa.types.is_floating(self.pyarrow_dtype)
2309
+ or pa.types.is_decimal(self.pyarrow_dtype)
2310
+ )
2311
+
2312
+ @property
2313
+ def _is_boolean(self) -> bool:
2314
+ """
2315
+ Whether this dtype should be considered boolean.
2316
+ """
2317
+ return pa.types.is_boolean(self.pyarrow_dtype)
2318
+
2319
+ def _get_common_dtype(self, dtypes: list[DtypeObj]) -> DtypeObj | None:
2320
+ # We unwrap any masked dtypes, find the common dtype we would use
2321
+ # for that, then re-mask the result.
2322
+ # Mirrors BaseMaskedDtype
2323
+ from pandas.core.dtypes.cast import find_common_type
2324
+
2325
+ null_dtype = type(self)(pa.null())
2326
+
2327
+ new_dtype = find_common_type(
2328
+ [
2329
+ dtype.numpy_dtype if isinstance(dtype, ArrowDtype) else dtype
2330
+ for dtype in dtypes
2331
+ if dtype != null_dtype
2332
+ ]
2333
+ )
2334
+ if not isinstance(new_dtype, np.dtype):
2335
+ return None
2336
+ try:
2337
+ pa_dtype = pa.from_numpy_dtype(new_dtype)
2338
+ return type(self)(pa_dtype)
2339
+ except NotImplementedError:
2340
+ return None
2341
+
2342
+ def __from_arrow__(self, array: pa.Array | pa.ChunkedArray):
2343
+ """
2344
+ Construct IntegerArray/FloatingArray from pyarrow Array/ChunkedArray.
2345
+ """
2346
+ array_class = self.construct_array_type()
2347
+ arr = array.cast(self.pyarrow_dtype, safe=True)
2348
+ return array_class(arr)
venv/lib/python3.10/site-packages/pandas/core/dtypes/generic.py ADDED
@@ -0,0 +1,147 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """ define generic base classes for pandas objects """
2
+ from __future__ import annotations
3
+
4
+ from typing import (
5
+ TYPE_CHECKING,
6
+ Type,
7
+ cast,
8
+ )
9
+
10
+ if TYPE_CHECKING:
11
+ from pandas import (
12
+ Categorical,
13
+ CategoricalIndex,
14
+ DataFrame,
15
+ DatetimeIndex,
16
+ Index,
17
+ IntervalIndex,
18
+ MultiIndex,
19
+ PeriodIndex,
20
+ RangeIndex,
21
+ Series,
22
+ TimedeltaIndex,
23
+ )
24
+ from pandas.core.arrays import (
25
+ DatetimeArray,
26
+ ExtensionArray,
27
+ NumpyExtensionArray,
28
+ PeriodArray,
29
+ TimedeltaArray,
30
+ )
31
+ from pandas.core.generic import NDFrame
32
+
33
+
34
+ # define abstract base classes to enable isinstance type checking on our
35
+ # objects
36
+ def create_pandas_abc_type(name, attr, comp):
37
+ def _check(inst) -> bool:
38
+ return getattr(inst, attr, "_typ") in comp
39
+
40
+ # https://github.com/python/mypy/issues/1006
41
+ # error: 'classmethod' used with a non-method
42
+ @classmethod # type: ignore[misc]
43
+ def _instancecheck(cls, inst) -> bool:
44
+ return _check(inst) and not isinstance(inst, type)
45
+
46
+ @classmethod # type: ignore[misc]
47
+ def _subclasscheck(cls, inst) -> bool:
48
+ # Raise instead of returning False
49
+ # This is consistent with default __subclasscheck__ behavior
50
+ if not isinstance(inst, type):
51
+ raise TypeError("issubclass() arg 1 must be a class")
52
+
53
+ return _check(inst)
54
+
55
+ dct = {"__instancecheck__": _instancecheck, "__subclasscheck__": _subclasscheck}
56
+ meta = type("ABCBase", (type,), dct)
57
+ return meta(name, (), dct)
58
+
59
+
60
+ ABCRangeIndex = cast(
61
+ "Type[RangeIndex]",
62
+ create_pandas_abc_type("ABCRangeIndex", "_typ", ("rangeindex",)),
63
+ )
64
+ ABCMultiIndex = cast(
65
+ "Type[MultiIndex]",
66
+ create_pandas_abc_type("ABCMultiIndex", "_typ", ("multiindex",)),
67
+ )
68
+ ABCDatetimeIndex = cast(
69
+ "Type[DatetimeIndex]",
70
+ create_pandas_abc_type("ABCDatetimeIndex", "_typ", ("datetimeindex",)),
71
+ )
72
+ ABCTimedeltaIndex = cast(
73
+ "Type[TimedeltaIndex]",
74
+ create_pandas_abc_type("ABCTimedeltaIndex", "_typ", ("timedeltaindex",)),
75
+ )
76
+ ABCPeriodIndex = cast(
77
+ "Type[PeriodIndex]",
78
+ create_pandas_abc_type("ABCPeriodIndex", "_typ", ("periodindex",)),
79
+ )
80
+ ABCCategoricalIndex = cast(
81
+ "Type[CategoricalIndex]",
82
+ create_pandas_abc_type("ABCCategoricalIndex", "_typ", ("categoricalindex",)),
83
+ )
84
+ ABCIntervalIndex = cast(
85
+ "Type[IntervalIndex]",
86
+ create_pandas_abc_type("ABCIntervalIndex", "_typ", ("intervalindex",)),
87
+ )
88
+ ABCIndex = cast(
89
+ "Type[Index]",
90
+ create_pandas_abc_type(
91
+ "ABCIndex",
92
+ "_typ",
93
+ {
94
+ "index",
95
+ "rangeindex",
96
+ "multiindex",
97
+ "datetimeindex",
98
+ "timedeltaindex",
99
+ "periodindex",
100
+ "categoricalindex",
101
+ "intervalindex",
102
+ },
103
+ ),
104
+ )
105
+
106
+
107
+ ABCNDFrame = cast(
108
+ "Type[NDFrame]",
109
+ create_pandas_abc_type("ABCNDFrame", "_typ", ("series", "dataframe")),
110
+ )
111
+ ABCSeries = cast(
112
+ "Type[Series]",
113
+ create_pandas_abc_type("ABCSeries", "_typ", ("series",)),
114
+ )
115
+ ABCDataFrame = cast(
116
+ "Type[DataFrame]", create_pandas_abc_type("ABCDataFrame", "_typ", ("dataframe",))
117
+ )
118
+
119
+ ABCCategorical = cast(
120
+ "Type[Categorical]",
121
+ create_pandas_abc_type("ABCCategorical", "_typ", ("categorical")),
122
+ )
123
+ ABCDatetimeArray = cast(
124
+ "Type[DatetimeArray]",
125
+ create_pandas_abc_type("ABCDatetimeArray", "_typ", ("datetimearray")),
126
+ )
127
+ ABCTimedeltaArray = cast(
128
+ "Type[TimedeltaArray]",
129
+ create_pandas_abc_type("ABCTimedeltaArray", "_typ", ("timedeltaarray")),
130
+ )
131
+ ABCPeriodArray = cast(
132
+ "Type[PeriodArray]",
133
+ create_pandas_abc_type("ABCPeriodArray", "_typ", ("periodarray",)),
134
+ )
135
+ ABCExtensionArray = cast(
136
+ "Type[ExtensionArray]",
137
+ create_pandas_abc_type(
138
+ "ABCExtensionArray",
139
+ "_typ",
140
+ # Note: IntervalArray and SparseArray are included bc they have _typ="extension"
141
+ {"extension", "categorical", "periodarray", "datetimearray", "timedeltaarray"},
142
+ ),
143
+ )
144
+ ABCNumpyExtensionArray = cast(
145
+ "Type[NumpyExtensionArray]",
146
+ create_pandas_abc_type("ABCNumpyExtensionArray", "_typ", ("npy_extension",)),
147
+ )
venv/lib/python3.10/site-packages/pandas/core/dtypes/inference.py ADDED
@@ -0,0 +1,437 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """ basic inference routines """
2
+
3
+ from __future__ import annotations
4
+
5
+ from collections import abc
6
+ from numbers import Number
7
+ import re
8
+ from re import Pattern
9
+ from typing import TYPE_CHECKING
10
+
11
+ import numpy as np
12
+
13
+ from pandas._libs import lib
14
+
15
+ if TYPE_CHECKING:
16
+ from collections.abc import Hashable
17
+
18
+ from pandas._typing import TypeGuard
19
+
20
+ is_bool = lib.is_bool
21
+
22
+ is_integer = lib.is_integer
23
+
24
+ is_float = lib.is_float
25
+
26
+ is_complex = lib.is_complex
27
+
28
+ is_scalar = lib.is_scalar
29
+
30
+ is_decimal = lib.is_decimal
31
+
32
+ is_interval = lib.is_interval
33
+
34
+ is_list_like = lib.is_list_like
35
+
36
+ is_iterator = lib.is_iterator
37
+
38
+
39
+ def is_number(obj) -> TypeGuard[Number | np.number]:
40
+ """
41
+ Check if the object is a number.
42
+
43
+ Returns True when the object is a number, and False if is not.
44
+
45
+ Parameters
46
+ ----------
47
+ obj : any type
48
+ The object to check if is a number.
49
+
50
+ Returns
51
+ -------
52
+ bool
53
+ Whether `obj` is a number or not.
54
+
55
+ See Also
56
+ --------
57
+ api.types.is_integer: Checks a subgroup of numbers.
58
+
59
+ Examples
60
+ --------
61
+ >>> from pandas.api.types import is_number
62
+ >>> is_number(1)
63
+ True
64
+ >>> is_number(7.15)
65
+ True
66
+
67
+ Booleans are valid because they are int subclass.
68
+
69
+ >>> is_number(False)
70
+ True
71
+
72
+ >>> is_number("foo")
73
+ False
74
+ >>> is_number("5")
75
+ False
76
+ """
77
+ return isinstance(obj, (Number, np.number))
78
+
79
+
80
+ def iterable_not_string(obj) -> bool:
81
+ """
82
+ Check if the object is an iterable but not a string.
83
+
84
+ Parameters
85
+ ----------
86
+ obj : The object to check.
87
+
88
+ Returns
89
+ -------
90
+ is_iter_not_string : bool
91
+ Whether `obj` is a non-string iterable.
92
+
93
+ Examples
94
+ --------
95
+ >>> iterable_not_string([1, 2, 3])
96
+ True
97
+ >>> iterable_not_string("foo")
98
+ False
99
+ >>> iterable_not_string(1)
100
+ False
101
+ """
102
+ return isinstance(obj, abc.Iterable) and not isinstance(obj, str)
103
+
104
+
105
+ def is_file_like(obj) -> bool:
106
+ """
107
+ Check if the object is a file-like object.
108
+
109
+ For objects to be considered file-like, they must
110
+ be an iterator AND have either a `read` and/or `write`
111
+ method as an attribute.
112
+
113
+ Note: file-like objects must be iterable, but
114
+ iterable objects need not be file-like.
115
+
116
+ Parameters
117
+ ----------
118
+ obj : The object to check
119
+
120
+ Returns
121
+ -------
122
+ bool
123
+ Whether `obj` has file-like properties.
124
+
125
+ Examples
126
+ --------
127
+ >>> import io
128
+ >>> from pandas.api.types import is_file_like
129
+ >>> buffer = io.StringIO("data")
130
+ >>> is_file_like(buffer)
131
+ True
132
+ >>> is_file_like([1, 2, 3])
133
+ False
134
+ """
135
+ if not (hasattr(obj, "read") or hasattr(obj, "write")):
136
+ return False
137
+
138
+ return bool(hasattr(obj, "__iter__"))
139
+
140
+
141
+ def is_re(obj) -> TypeGuard[Pattern]:
142
+ """
143
+ Check if the object is a regex pattern instance.
144
+
145
+ Parameters
146
+ ----------
147
+ obj : The object to check
148
+
149
+ Returns
150
+ -------
151
+ bool
152
+ Whether `obj` is a regex pattern.
153
+
154
+ Examples
155
+ --------
156
+ >>> from pandas.api.types import is_re
157
+ >>> import re
158
+ >>> is_re(re.compile(".*"))
159
+ True
160
+ >>> is_re("foo")
161
+ False
162
+ """
163
+ return isinstance(obj, Pattern)
164
+
165
+
166
+ def is_re_compilable(obj) -> bool:
167
+ """
168
+ Check if the object can be compiled into a regex pattern instance.
169
+
170
+ Parameters
171
+ ----------
172
+ obj : The object to check
173
+
174
+ Returns
175
+ -------
176
+ bool
177
+ Whether `obj` can be compiled as a regex pattern.
178
+
179
+ Examples
180
+ --------
181
+ >>> from pandas.api.types import is_re_compilable
182
+ >>> is_re_compilable(".*")
183
+ True
184
+ >>> is_re_compilable(1)
185
+ False
186
+ """
187
+ try:
188
+ re.compile(obj)
189
+ except TypeError:
190
+ return False
191
+ else:
192
+ return True
193
+
194
+
195
+ def is_array_like(obj) -> bool:
196
+ """
197
+ Check if the object is array-like.
198
+
199
+ For an object to be considered array-like, it must be list-like and
200
+ have a `dtype` attribute.
201
+
202
+ Parameters
203
+ ----------
204
+ obj : The object to check
205
+
206
+ Returns
207
+ -------
208
+ is_array_like : bool
209
+ Whether `obj` has array-like properties.
210
+
211
+ Examples
212
+ --------
213
+ >>> is_array_like(np.array([1, 2, 3]))
214
+ True
215
+ >>> is_array_like(pd.Series(["a", "b"]))
216
+ True
217
+ >>> is_array_like(pd.Index(["2016-01-01"]))
218
+ True
219
+ >>> is_array_like([1, 2, 3])
220
+ False
221
+ >>> is_array_like(("a", "b"))
222
+ False
223
+ """
224
+ return is_list_like(obj) and hasattr(obj, "dtype")
225
+
226
+
227
+ def is_nested_list_like(obj) -> bool:
228
+ """
229
+ Check if the object is list-like, and that all of its elements
230
+ are also list-like.
231
+
232
+ Parameters
233
+ ----------
234
+ obj : The object to check
235
+
236
+ Returns
237
+ -------
238
+ is_list_like : bool
239
+ Whether `obj` has list-like properties.
240
+
241
+ Examples
242
+ --------
243
+ >>> is_nested_list_like([[1, 2, 3]])
244
+ True
245
+ >>> is_nested_list_like([{1, 2, 3}, {1, 2, 3}])
246
+ True
247
+ >>> is_nested_list_like(["foo"])
248
+ False
249
+ >>> is_nested_list_like([])
250
+ False
251
+ >>> is_nested_list_like([[1, 2, 3], 1])
252
+ False
253
+
254
+ Notes
255
+ -----
256
+ This won't reliably detect whether a consumable iterator (e. g.
257
+ a generator) is a nested-list-like without consuming the iterator.
258
+ To avoid consuming it, we always return False if the outer container
259
+ doesn't define `__len__`.
260
+
261
+ See Also
262
+ --------
263
+ is_list_like
264
+ """
265
+ return (
266
+ is_list_like(obj)
267
+ and hasattr(obj, "__len__")
268
+ and len(obj) > 0
269
+ and all(is_list_like(item) for item in obj)
270
+ )
271
+
272
+
273
+ def is_dict_like(obj) -> bool:
274
+ """
275
+ Check if the object is dict-like.
276
+
277
+ Parameters
278
+ ----------
279
+ obj : The object to check
280
+
281
+ Returns
282
+ -------
283
+ bool
284
+ Whether `obj` has dict-like properties.
285
+
286
+ Examples
287
+ --------
288
+ >>> from pandas.api.types import is_dict_like
289
+ >>> is_dict_like({1: 2})
290
+ True
291
+ >>> is_dict_like([1, 2, 3])
292
+ False
293
+ >>> is_dict_like(dict)
294
+ False
295
+ >>> is_dict_like(dict())
296
+ True
297
+ """
298
+ dict_like_attrs = ("__getitem__", "keys", "__contains__")
299
+ return (
300
+ all(hasattr(obj, attr) for attr in dict_like_attrs)
301
+ # [GH 25196] exclude classes
302
+ and not isinstance(obj, type)
303
+ )
304
+
305
+
306
+ def is_named_tuple(obj) -> bool:
307
+ """
308
+ Check if the object is a named tuple.
309
+
310
+ Parameters
311
+ ----------
312
+ obj : The object to check
313
+
314
+ Returns
315
+ -------
316
+ bool
317
+ Whether `obj` is a named tuple.
318
+
319
+ Examples
320
+ --------
321
+ >>> from collections import namedtuple
322
+ >>> from pandas.api.types import is_named_tuple
323
+ >>> Point = namedtuple("Point", ["x", "y"])
324
+ >>> p = Point(1, 2)
325
+ >>>
326
+ >>> is_named_tuple(p)
327
+ True
328
+ >>> is_named_tuple((1, 2))
329
+ False
330
+ """
331
+ return isinstance(obj, abc.Sequence) and hasattr(obj, "_fields")
332
+
333
+
334
+ def is_hashable(obj) -> TypeGuard[Hashable]:
335
+ """
336
+ Return True if hash(obj) will succeed, False otherwise.
337
+
338
+ Some types will pass a test against collections.abc.Hashable but fail when
339
+ they are actually hashed with hash().
340
+
341
+ Distinguish between these and other types by trying the call to hash() and
342
+ seeing if they raise TypeError.
343
+
344
+ Returns
345
+ -------
346
+ bool
347
+
348
+ Examples
349
+ --------
350
+ >>> import collections
351
+ >>> from pandas.api.types import is_hashable
352
+ >>> a = ([],)
353
+ >>> isinstance(a, collections.abc.Hashable)
354
+ True
355
+ >>> is_hashable(a)
356
+ False
357
+ """
358
+ # Unfortunately, we can't use isinstance(obj, collections.abc.Hashable),
359
+ # which can be faster than calling hash. That is because numpy scalars
360
+ # fail this test.
361
+
362
+ # Reconsider this decision once this numpy bug is fixed:
363
+ # https://github.com/numpy/numpy/issues/5562
364
+
365
+ try:
366
+ hash(obj)
367
+ except TypeError:
368
+ return False
369
+ else:
370
+ return True
371
+
372
+
373
+ def is_sequence(obj) -> bool:
374
+ """
375
+ Check if the object is a sequence of objects.
376
+ String types are not included as sequences here.
377
+
378
+ Parameters
379
+ ----------
380
+ obj : The object to check
381
+
382
+ Returns
383
+ -------
384
+ is_sequence : bool
385
+ Whether `obj` is a sequence of objects.
386
+
387
+ Examples
388
+ --------
389
+ >>> l = [1, 2, 3]
390
+ >>>
391
+ >>> is_sequence(l)
392
+ True
393
+ >>> is_sequence(iter(l))
394
+ False
395
+ """
396
+ try:
397
+ iter(obj) # Can iterate over it.
398
+ len(obj) # Has a length associated with it.
399
+ return not isinstance(obj, (str, bytes))
400
+ except (TypeError, AttributeError):
401
+ return False
402
+
403
+
404
+ def is_dataclass(item) -> bool:
405
+ """
406
+ Checks if the object is a data-class instance
407
+
408
+ Parameters
409
+ ----------
410
+ item : object
411
+
412
+ Returns
413
+ --------
414
+ is_dataclass : bool
415
+ True if the item is an instance of a data-class,
416
+ will return false if you pass the data class itself
417
+
418
+ Examples
419
+ --------
420
+ >>> from dataclasses import dataclass
421
+ >>> @dataclass
422
+ ... class Point:
423
+ ... x: int
424
+ ... y: int
425
+
426
+ >>> is_dataclass(Point)
427
+ False
428
+ >>> is_dataclass(Point(0,2))
429
+ True
430
+
431
+ """
432
+ try:
433
+ import dataclasses
434
+
435
+ return dataclasses.is_dataclass(item) and not isinstance(item, type)
436
+ except ImportError:
437
+ return False
venv/lib/python3.10/site-packages/pandas/core/dtypes/missing.py ADDED
@@ -0,0 +1,810 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ missing types & inference
3
+ """
4
+ from __future__ import annotations
5
+
6
+ from decimal import Decimal
7
+ from functools import partial
8
+ from typing import (
9
+ TYPE_CHECKING,
10
+ overload,
11
+ )
12
+ import warnings
13
+
14
+ import numpy as np
15
+
16
+ from pandas._config import get_option
17
+
18
+ from pandas._libs import lib
19
+ import pandas._libs.missing as libmissing
20
+ from pandas._libs.tslibs import (
21
+ NaT,
22
+ iNaT,
23
+ )
24
+
25
+ from pandas.core.dtypes.common import (
26
+ DT64NS_DTYPE,
27
+ TD64NS_DTYPE,
28
+ ensure_object,
29
+ is_scalar,
30
+ is_string_or_object_np_dtype,
31
+ )
32
+ from pandas.core.dtypes.dtypes import (
33
+ CategoricalDtype,
34
+ DatetimeTZDtype,
35
+ ExtensionDtype,
36
+ IntervalDtype,
37
+ PeriodDtype,
38
+ )
39
+ from pandas.core.dtypes.generic import (
40
+ ABCDataFrame,
41
+ ABCExtensionArray,
42
+ ABCIndex,
43
+ ABCMultiIndex,
44
+ ABCSeries,
45
+ )
46
+ from pandas.core.dtypes.inference import is_list_like
47
+
48
+ if TYPE_CHECKING:
49
+ from re import Pattern
50
+
51
+ from pandas._typing import (
52
+ ArrayLike,
53
+ DtypeObj,
54
+ NDFrame,
55
+ NDFrameT,
56
+ Scalar,
57
+ npt,
58
+ )
59
+
60
+ from pandas import Series
61
+ from pandas.core.indexes.base import Index
62
+
63
+
64
+ isposinf_scalar = libmissing.isposinf_scalar
65
+ isneginf_scalar = libmissing.isneginf_scalar
66
+
67
+ nan_checker = np.isnan
68
+ INF_AS_NA = False
69
+ _dtype_object = np.dtype("object")
70
+ _dtype_str = np.dtype(str)
71
+
72
+
73
+ @overload
74
+ def isna(obj: Scalar | Pattern) -> bool:
75
+ ...
76
+
77
+
78
+ @overload
79
+ def isna(
80
+ obj: ArrayLike | Index | list,
81
+ ) -> npt.NDArray[np.bool_]:
82
+ ...
83
+
84
+
85
+ @overload
86
+ def isna(obj: NDFrameT) -> NDFrameT:
87
+ ...
88
+
89
+
90
+ # handle unions
91
+ @overload
92
+ def isna(obj: NDFrameT | ArrayLike | Index | list) -> NDFrameT | npt.NDArray[np.bool_]:
93
+ ...
94
+
95
+
96
+ @overload
97
+ def isna(obj: object) -> bool | npt.NDArray[np.bool_] | NDFrame:
98
+ ...
99
+
100
+
101
+ def isna(obj: object) -> bool | npt.NDArray[np.bool_] | NDFrame:
102
+ """
103
+ Detect missing values for an array-like object.
104
+
105
+ This function takes a scalar or array-like object and indicates
106
+ whether values are missing (``NaN`` in numeric arrays, ``None`` or ``NaN``
107
+ in object arrays, ``NaT`` in datetimelike).
108
+
109
+ Parameters
110
+ ----------
111
+ obj : scalar or array-like
112
+ Object to check for null or missing values.
113
+
114
+ Returns
115
+ -------
116
+ bool or array-like of bool
117
+ For scalar input, returns a scalar boolean.
118
+ For array input, returns an array of boolean indicating whether each
119
+ corresponding element is missing.
120
+
121
+ See Also
122
+ --------
123
+ notna : Boolean inverse of pandas.isna.
124
+ Series.isna : Detect missing values in a Series.
125
+ DataFrame.isna : Detect missing values in a DataFrame.
126
+ Index.isna : Detect missing values in an Index.
127
+
128
+ Examples
129
+ --------
130
+ Scalar arguments (including strings) result in a scalar boolean.
131
+
132
+ >>> pd.isna('dog')
133
+ False
134
+
135
+ >>> pd.isna(pd.NA)
136
+ True
137
+
138
+ >>> pd.isna(np.nan)
139
+ True
140
+
141
+ ndarrays result in an ndarray of booleans.
142
+
143
+ >>> array = np.array([[1, np.nan, 3], [4, 5, np.nan]])
144
+ >>> array
145
+ array([[ 1., nan, 3.],
146
+ [ 4., 5., nan]])
147
+ >>> pd.isna(array)
148
+ array([[False, True, False],
149
+ [False, False, True]])
150
+
151
+ For indexes, an ndarray of booleans is returned.
152
+
153
+ >>> index = pd.DatetimeIndex(["2017-07-05", "2017-07-06", None,
154
+ ... "2017-07-08"])
155
+ >>> index
156
+ DatetimeIndex(['2017-07-05', '2017-07-06', 'NaT', '2017-07-08'],
157
+ dtype='datetime64[ns]', freq=None)
158
+ >>> pd.isna(index)
159
+ array([False, False, True, False])
160
+
161
+ For Series and DataFrame, the same type is returned, containing booleans.
162
+
163
+ >>> df = pd.DataFrame([['ant', 'bee', 'cat'], ['dog', None, 'fly']])
164
+ >>> df
165
+ 0 1 2
166
+ 0 ant bee cat
167
+ 1 dog None fly
168
+ >>> pd.isna(df)
169
+ 0 1 2
170
+ 0 False False False
171
+ 1 False True False
172
+
173
+ >>> pd.isna(df[1])
174
+ 0 False
175
+ 1 True
176
+ Name: 1, dtype: bool
177
+ """
178
+ return _isna(obj)
179
+
180
+
181
+ isnull = isna
182
+
183
+
184
+ def _isna(obj, inf_as_na: bool = False):
185
+ """
186
+ Detect missing values, treating None, NaN or NA as null. Infinite
187
+ values will also be treated as null if inf_as_na is True.
188
+
189
+ Parameters
190
+ ----------
191
+ obj: ndarray or object value
192
+ Input array or scalar value.
193
+ inf_as_na: bool
194
+ Whether to treat infinity as null.
195
+
196
+ Returns
197
+ -------
198
+ boolean ndarray or boolean
199
+ """
200
+ if is_scalar(obj):
201
+ return libmissing.checknull(obj, inf_as_na=inf_as_na)
202
+ elif isinstance(obj, ABCMultiIndex):
203
+ raise NotImplementedError("isna is not defined for MultiIndex")
204
+ elif isinstance(obj, type):
205
+ return False
206
+ elif isinstance(obj, (np.ndarray, ABCExtensionArray)):
207
+ return _isna_array(obj, inf_as_na=inf_as_na)
208
+ elif isinstance(obj, ABCIndex):
209
+ # Try to use cached isna, which also short-circuits for integer dtypes
210
+ # and avoids materializing RangeIndex._values
211
+ if not obj._can_hold_na:
212
+ return obj.isna()
213
+ return _isna_array(obj._values, inf_as_na=inf_as_na)
214
+
215
+ elif isinstance(obj, ABCSeries):
216
+ result = _isna_array(obj._values, inf_as_na=inf_as_na)
217
+ # box
218
+ result = obj._constructor(result, index=obj.index, name=obj.name, copy=False)
219
+ return result
220
+ elif isinstance(obj, ABCDataFrame):
221
+ return obj.isna()
222
+ elif isinstance(obj, list):
223
+ return _isna_array(np.asarray(obj, dtype=object), inf_as_na=inf_as_na)
224
+ elif hasattr(obj, "__array__"):
225
+ return _isna_array(np.asarray(obj), inf_as_na=inf_as_na)
226
+ else:
227
+ return False
228
+
229
+
230
+ def _use_inf_as_na(key) -> None:
231
+ """
232
+ Option change callback for na/inf behaviour.
233
+
234
+ Choose which replacement for numpy.isnan / -numpy.isfinite is used.
235
+
236
+ Parameters
237
+ ----------
238
+ flag: bool
239
+ True means treat None, NaN, INF, -INF as null (old way),
240
+ False means None and NaN are null, but INF, -INF are not null
241
+ (new way).
242
+
243
+ Notes
244
+ -----
245
+ This approach to setting global module values is discussed and
246
+ approved here:
247
+
248
+ * https://stackoverflow.com/questions/4859217/
249
+ programmatically-creating-variables-in-python/4859312#4859312
250
+ """
251
+ inf_as_na = get_option(key)
252
+ globals()["_isna"] = partial(_isna, inf_as_na=inf_as_na)
253
+ if inf_as_na:
254
+ globals()["nan_checker"] = lambda x: ~np.isfinite(x)
255
+ globals()["INF_AS_NA"] = True
256
+ else:
257
+ globals()["nan_checker"] = np.isnan
258
+ globals()["INF_AS_NA"] = False
259
+
260
+
261
+ def _isna_array(values: ArrayLike, inf_as_na: bool = False):
262
+ """
263
+ Return an array indicating which values of the input array are NaN / NA.
264
+
265
+ Parameters
266
+ ----------
267
+ obj: ndarray or ExtensionArray
268
+ The input array whose elements are to be checked.
269
+ inf_as_na: bool
270
+ Whether or not to treat infinite values as NA.
271
+
272
+ Returns
273
+ -------
274
+ array-like
275
+ Array of boolean values denoting the NA status of each element.
276
+ """
277
+ dtype = values.dtype
278
+
279
+ if not isinstance(values, np.ndarray):
280
+ # i.e. ExtensionArray
281
+ if inf_as_na and isinstance(dtype, CategoricalDtype):
282
+ result = libmissing.isnaobj(values.to_numpy(), inf_as_na=inf_as_na)
283
+ else:
284
+ # error: Incompatible types in assignment (expression has type
285
+ # "Union[ndarray[Any, Any], ExtensionArraySupportsAnyAll]", variable has
286
+ # type "ndarray[Any, dtype[bool_]]")
287
+ result = values.isna() # type: ignore[assignment]
288
+ elif isinstance(values, np.rec.recarray):
289
+ # GH 48526
290
+ result = _isna_recarray_dtype(values, inf_as_na=inf_as_na)
291
+ elif is_string_or_object_np_dtype(values.dtype):
292
+ result = _isna_string_dtype(values, inf_as_na=inf_as_na)
293
+ elif dtype.kind in "mM":
294
+ # this is the NaT pattern
295
+ result = values.view("i8") == iNaT
296
+ else:
297
+ if inf_as_na:
298
+ result = ~np.isfinite(values)
299
+ else:
300
+ result = np.isnan(values)
301
+
302
+ return result
303
+
304
+
305
+ def _isna_string_dtype(values: np.ndarray, inf_as_na: bool) -> npt.NDArray[np.bool_]:
306
+ # Working around NumPy ticket 1542
307
+ dtype = values.dtype
308
+
309
+ if dtype.kind in ("S", "U"):
310
+ result = np.zeros(values.shape, dtype=bool)
311
+ else:
312
+ if values.ndim in {1, 2}:
313
+ result = libmissing.isnaobj(values, inf_as_na=inf_as_na)
314
+ else:
315
+ # 0-D, reached via e.g. mask_missing
316
+ result = libmissing.isnaobj(values.ravel(), inf_as_na=inf_as_na)
317
+ result = result.reshape(values.shape)
318
+
319
+ return result
320
+
321
+
322
+ def _has_record_inf_value(record_as_array: np.ndarray) -> np.bool_:
323
+ is_inf_in_record = np.zeros(len(record_as_array), dtype=bool)
324
+ for i, value in enumerate(record_as_array):
325
+ is_element_inf = False
326
+ try:
327
+ is_element_inf = np.isinf(value)
328
+ except TypeError:
329
+ is_element_inf = False
330
+ is_inf_in_record[i] = is_element_inf
331
+
332
+ return np.any(is_inf_in_record)
333
+
334
+
335
+ def _isna_recarray_dtype(
336
+ values: np.rec.recarray, inf_as_na: bool
337
+ ) -> npt.NDArray[np.bool_]:
338
+ result = np.zeros(values.shape, dtype=bool)
339
+ for i, record in enumerate(values):
340
+ record_as_array = np.array(record.tolist())
341
+ does_record_contain_nan = isna_all(record_as_array)
342
+ does_record_contain_inf = False
343
+ if inf_as_na:
344
+ does_record_contain_inf = bool(_has_record_inf_value(record_as_array))
345
+ result[i] = np.any(
346
+ np.logical_or(does_record_contain_nan, does_record_contain_inf)
347
+ )
348
+
349
+ return result
350
+
351
+
352
+ @overload
353
+ def notna(obj: Scalar) -> bool:
354
+ ...
355
+
356
+
357
+ @overload
358
+ def notna(
359
+ obj: ArrayLike | Index | list,
360
+ ) -> npt.NDArray[np.bool_]:
361
+ ...
362
+
363
+
364
+ @overload
365
+ def notna(obj: NDFrameT) -> NDFrameT:
366
+ ...
367
+
368
+
369
+ # handle unions
370
+ @overload
371
+ def notna(obj: NDFrameT | ArrayLike | Index | list) -> NDFrameT | npt.NDArray[np.bool_]:
372
+ ...
373
+
374
+
375
+ @overload
376
+ def notna(obj: object) -> bool | npt.NDArray[np.bool_] | NDFrame:
377
+ ...
378
+
379
+
380
+ def notna(obj: object) -> bool | npt.NDArray[np.bool_] | NDFrame:
381
+ """
382
+ Detect non-missing values for an array-like object.
383
+
384
+ This function takes a scalar or array-like object and indicates
385
+ whether values are valid (not missing, which is ``NaN`` in numeric
386
+ arrays, ``None`` or ``NaN`` in object arrays, ``NaT`` in datetimelike).
387
+
388
+ Parameters
389
+ ----------
390
+ obj : array-like or object value
391
+ Object to check for *not* null or *non*-missing values.
392
+
393
+ Returns
394
+ -------
395
+ bool or array-like of bool
396
+ For scalar input, returns a scalar boolean.
397
+ For array input, returns an array of boolean indicating whether each
398
+ corresponding element is valid.
399
+
400
+ See Also
401
+ --------
402
+ isna : Boolean inverse of pandas.notna.
403
+ Series.notna : Detect valid values in a Series.
404
+ DataFrame.notna : Detect valid values in a DataFrame.
405
+ Index.notna : Detect valid values in an Index.
406
+
407
+ Examples
408
+ --------
409
+ Scalar arguments (including strings) result in a scalar boolean.
410
+
411
+ >>> pd.notna('dog')
412
+ True
413
+
414
+ >>> pd.notna(pd.NA)
415
+ False
416
+
417
+ >>> pd.notna(np.nan)
418
+ False
419
+
420
+ ndarrays result in an ndarray of booleans.
421
+
422
+ >>> array = np.array([[1, np.nan, 3], [4, 5, np.nan]])
423
+ >>> array
424
+ array([[ 1., nan, 3.],
425
+ [ 4., 5., nan]])
426
+ >>> pd.notna(array)
427
+ array([[ True, False, True],
428
+ [ True, True, False]])
429
+
430
+ For indexes, an ndarray of booleans is returned.
431
+
432
+ >>> index = pd.DatetimeIndex(["2017-07-05", "2017-07-06", None,
433
+ ... "2017-07-08"])
434
+ >>> index
435
+ DatetimeIndex(['2017-07-05', '2017-07-06', 'NaT', '2017-07-08'],
436
+ dtype='datetime64[ns]', freq=None)
437
+ >>> pd.notna(index)
438
+ array([ True, True, False, True])
439
+
440
+ For Series and DataFrame, the same type is returned, containing booleans.
441
+
442
+ >>> df = pd.DataFrame([['ant', 'bee', 'cat'], ['dog', None, 'fly']])
443
+ >>> df
444
+ 0 1 2
445
+ 0 ant bee cat
446
+ 1 dog None fly
447
+ >>> pd.notna(df)
448
+ 0 1 2
449
+ 0 True True True
450
+ 1 True False True
451
+
452
+ >>> pd.notna(df[1])
453
+ 0 True
454
+ 1 False
455
+ Name: 1, dtype: bool
456
+ """
457
+ res = isna(obj)
458
+ if isinstance(res, bool):
459
+ return not res
460
+ return ~res
461
+
462
+
463
+ notnull = notna
464
+
465
+
466
+ def array_equivalent(
467
+ left,
468
+ right,
469
+ strict_nan: bool = False,
470
+ dtype_equal: bool = False,
471
+ ) -> bool:
472
+ """
473
+ True if two arrays, left and right, have equal non-NaN elements, and NaNs
474
+ in corresponding locations. False otherwise. It is assumed that left and
475
+ right are NumPy arrays of the same dtype. The behavior of this function
476
+ (particularly with respect to NaNs) is not defined if the dtypes are
477
+ different.
478
+
479
+ Parameters
480
+ ----------
481
+ left, right : ndarrays
482
+ strict_nan : bool, default False
483
+ If True, consider NaN and None to be different.
484
+ dtype_equal : bool, default False
485
+ Whether `left` and `right` are known to have the same dtype
486
+ according to `is_dtype_equal`. Some methods like `BlockManager.equals`.
487
+ require that the dtypes match. Setting this to ``True`` can improve
488
+ performance, but will give different results for arrays that are
489
+ equal but different dtypes.
490
+
491
+ Returns
492
+ -------
493
+ b : bool
494
+ Returns True if the arrays are equivalent.
495
+
496
+ Examples
497
+ --------
498
+ >>> array_equivalent(
499
+ ... np.array([1, 2, np.nan]),
500
+ ... np.array([1, 2, np.nan]))
501
+ True
502
+ >>> array_equivalent(
503
+ ... np.array([1, np.nan, 2]),
504
+ ... np.array([1, 2, np.nan]))
505
+ False
506
+ """
507
+ left, right = np.asarray(left), np.asarray(right)
508
+
509
+ # shape compat
510
+ if left.shape != right.shape:
511
+ return False
512
+
513
+ if dtype_equal:
514
+ # fastpath when we require that the dtypes match (Block.equals)
515
+ if left.dtype.kind in "fc":
516
+ return _array_equivalent_float(left, right)
517
+ elif left.dtype.kind in "mM":
518
+ return _array_equivalent_datetimelike(left, right)
519
+ elif is_string_or_object_np_dtype(left.dtype):
520
+ # TODO: fastpath for pandas' StringDtype
521
+ return _array_equivalent_object(left, right, strict_nan)
522
+ else:
523
+ return np.array_equal(left, right)
524
+
525
+ # Slow path when we allow comparing different dtypes.
526
+ # Object arrays can contain None, NaN and NaT.
527
+ # string dtypes must be come to this path for NumPy 1.7.1 compat
528
+ if left.dtype.kind in "OSU" or right.dtype.kind in "OSU":
529
+ # Note: `in "OSU"` is non-trivially faster than `in ["O", "S", "U"]`
530
+ # or `in ("O", "S", "U")`
531
+ return _array_equivalent_object(left, right, strict_nan)
532
+
533
+ # NaNs can occur in float and complex arrays.
534
+ if left.dtype.kind in "fc":
535
+ if not (left.size and right.size):
536
+ return True
537
+ return ((left == right) | (isna(left) & isna(right))).all()
538
+
539
+ elif left.dtype.kind in "mM" or right.dtype.kind in "mM":
540
+ # datetime64, timedelta64, Period
541
+ if left.dtype != right.dtype:
542
+ return False
543
+
544
+ left = left.view("i8")
545
+ right = right.view("i8")
546
+
547
+ # if we have structured dtypes, compare first
548
+ if (
549
+ left.dtype.type is np.void or right.dtype.type is np.void
550
+ ) and left.dtype != right.dtype:
551
+ return False
552
+
553
+ return np.array_equal(left, right)
554
+
555
+
556
+ def _array_equivalent_float(left: np.ndarray, right: np.ndarray) -> bool:
557
+ return bool(((left == right) | (np.isnan(left) & np.isnan(right))).all())
558
+
559
+
560
+ def _array_equivalent_datetimelike(left: np.ndarray, right: np.ndarray):
561
+ return np.array_equal(left.view("i8"), right.view("i8"))
562
+
563
+
564
+ def _array_equivalent_object(left: np.ndarray, right: np.ndarray, strict_nan: bool):
565
+ left = ensure_object(left)
566
+ right = ensure_object(right)
567
+
568
+ mask: npt.NDArray[np.bool_] | None = None
569
+ if strict_nan:
570
+ mask = isna(left) & isna(right)
571
+ if not mask.any():
572
+ mask = None
573
+
574
+ try:
575
+ if mask is None:
576
+ return lib.array_equivalent_object(left, right)
577
+ if not lib.array_equivalent_object(left[~mask], right[~mask]):
578
+ return False
579
+ left_remaining = left[mask]
580
+ right_remaining = right[mask]
581
+ except ValueError:
582
+ # can raise a ValueError if left and right cannot be
583
+ # compared (e.g. nested arrays)
584
+ left_remaining = left
585
+ right_remaining = right
586
+
587
+ for left_value, right_value in zip(left_remaining, right_remaining):
588
+ if left_value is NaT and right_value is not NaT:
589
+ return False
590
+
591
+ elif left_value is libmissing.NA and right_value is not libmissing.NA:
592
+ return False
593
+
594
+ elif isinstance(left_value, float) and np.isnan(left_value):
595
+ if not isinstance(right_value, float) or not np.isnan(right_value):
596
+ return False
597
+ else:
598
+ with warnings.catch_warnings():
599
+ # suppress numpy's "elementwise comparison failed"
600
+ warnings.simplefilter("ignore", DeprecationWarning)
601
+ try:
602
+ if np.any(np.asarray(left_value != right_value)):
603
+ return False
604
+ except TypeError as err:
605
+ if "boolean value of NA is ambiguous" in str(err):
606
+ return False
607
+ raise
608
+ except ValueError:
609
+ # numpy can raise a ValueError if left and right cannot be
610
+ # compared (e.g. nested arrays)
611
+ return False
612
+ return True
613
+
614
+
615
+ def array_equals(left: ArrayLike, right: ArrayLike) -> bool:
616
+ """
617
+ ExtensionArray-compatible implementation of array_equivalent.
618
+ """
619
+ if left.dtype != right.dtype:
620
+ return False
621
+ elif isinstance(left, ABCExtensionArray):
622
+ return left.equals(right)
623
+ else:
624
+ return array_equivalent(left, right, dtype_equal=True)
625
+
626
+
627
+ def infer_fill_value(val):
628
+ """
629
+ infer the fill value for the nan/NaT from the provided
630
+ scalar/ndarray/list-like if we are a NaT, return the correct dtyped
631
+ element to provide proper block construction
632
+ """
633
+ if not is_list_like(val):
634
+ val = [val]
635
+ val = np.asarray(val)
636
+ if val.dtype.kind in "mM":
637
+ return np.array("NaT", dtype=val.dtype)
638
+ elif val.dtype == object:
639
+ dtype = lib.infer_dtype(ensure_object(val), skipna=False)
640
+ if dtype in ["datetime", "datetime64"]:
641
+ return np.array("NaT", dtype=DT64NS_DTYPE)
642
+ elif dtype in ["timedelta", "timedelta64"]:
643
+ return np.array("NaT", dtype=TD64NS_DTYPE)
644
+ return np.array(np.nan, dtype=object)
645
+ elif val.dtype.kind == "U":
646
+ return np.array(np.nan, dtype=val.dtype)
647
+ return np.nan
648
+
649
+
650
+ def construct_1d_array_from_inferred_fill_value(
651
+ value: object, length: int
652
+ ) -> ArrayLike:
653
+ # Find our empty_value dtype by constructing an array
654
+ # from our value and doing a .take on it
655
+ from pandas.core.algorithms import take_nd
656
+ from pandas.core.construction import sanitize_array
657
+ from pandas.core.indexes.base import Index
658
+
659
+ arr = sanitize_array(value, Index(range(1)), copy=False)
660
+ taker = -1 * np.ones(length, dtype=np.intp)
661
+ return take_nd(arr, taker)
662
+
663
+
664
+ def maybe_fill(arr: np.ndarray) -> np.ndarray:
665
+ """
666
+ Fill numpy.ndarray with NaN, unless we have a integer or boolean dtype.
667
+ """
668
+ if arr.dtype.kind not in "iub":
669
+ arr.fill(np.nan)
670
+ return arr
671
+
672
+
673
+ def na_value_for_dtype(dtype: DtypeObj, compat: bool = True):
674
+ """
675
+ Return a dtype compat na value
676
+
677
+ Parameters
678
+ ----------
679
+ dtype : string / dtype
680
+ compat : bool, default True
681
+
682
+ Returns
683
+ -------
684
+ np.dtype or a pandas dtype
685
+
686
+ Examples
687
+ --------
688
+ >>> na_value_for_dtype(np.dtype('int64'))
689
+ 0
690
+ >>> na_value_for_dtype(np.dtype('int64'), compat=False)
691
+ nan
692
+ >>> na_value_for_dtype(np.dtype('float64'))
693
+ nan
694
+ >>> na_value_for_dtype(np.dtype('bool'))
695
+ False
696
+ >>> na_value_for_dtype(np.dtype('datetime64[ns]'))
697
+ numpy.datetime64('NaT')
698
+ """
699
+
700
+ if isinstance(dtype, ExtensionDtype):
701
+ return dtype.na_value
702
+ elif dtype.kind in "mM":
703
+ unit = np.datetime_data(dtype)[0]
704
+ return dtype.type("NaT", unit)
705
+ elif dtype.kind == "f":
706
+ return np.nan
707
+ elif dtype.kind in "iu":
708
+ if compat:
709
+ return 0
710
+ return np.nan
711
+ elif dtype.kind == "b":
712
+ if compat:
713
+ return False
714
+ return np.nan
715
+ return np.nan
716
+
717
+
718
+ def remove_na_arraylike(arr: Series | Index | np.ndarray):
719
+ """
720
+ Return array-like containing only true/non-NaN values, possibly empty.
721
+ """
722
+ if isinstance(arr.dtype, ExtensionDtype):
723
+ return arr[notna(arr)]
724
+ else:
725
+ return arr[notna(np.asarray(arr))]
726
+
727
+
728
+ def is_valid_na_for_dtype(obj, dtype: DtypeObj) -> bool:
729
+ """
730
+ isna check that excludes incompatible dtypes
731
+
732
+ Parameters
733
+ ----------
734
+ obj : object
735
+ dtype : np.datetime64, np.timedelta64, DatetimeTZDtype, or PeriodDtype
736
+
737
+ Returns
738
+ -------
739
+ bool
740
+ """
741
+ if not lib.is_scalar(obj) or not isna(obj):
742
+ return False
743
+ elif dtype.kind == "M":
744
+ if isinstance(dtype, np.dtype):
745
+ # i.e. not tzaware
746
+ return not isinstance(obj, (np.timedelta64, Decimal))
747
+ # we have to rule out tznaive dt64("NaT")
748
+ return not isinstance(obj, (np.timedelta64, np.datetime64, Decimal))
749
+ elif dtype.kind == "m":
750
+ return not isinstance(obj, (np.datetime64, Decimal))
751
+ elif dtype.kind in "iufc":
752
+ # Numeric
753
+ return obj is not NaT and not isinstance(obj, (np.datetime64, np.timedelta64))
754
+ elif dtype.kind == "b":
755
+ # We allow pd.NA, None, np.nan in BooleanArray (same as IntervalDtype)
756
+ return lib.is_float(obj) or obj is None or obj is libmissing.NA
757
+
758
+ elif dtype == _dtype_str:
759
+ # numpy string dtypes to avoid float np.nan
760
+ return not isinstance(obj, (np.datetime64, np.timedelta64, Decimal, float))
761
+
762
+ elif dtype == _dtype_object:
763
+ # This is needed for Categorical, but is kind of weird
764
+ return True
765
+
766
+ elif isinstance(dtype, PeriodDtype):
767
+ return not isinstance(obj, (np.datetime64, np.timedelta64, Decimal))
768
+
769
+ elif isinstance(dtype, IntervalDtype):
770
+ return lib.is_float(obj) or obj is None or obj is libmissing.NA
771
+
772
+ elif isinstance(dtype, CategoricalDtype):
773
+ return is_valid_na_for_dtype(obj, dtype.categories.dtype)
774
+
775
+ # fallback, default to allowing NaN, None, NA, NaT
776
+ return not isinstance(obj, (np.datetime64, np.timedelta64, Decimal))
777
+
778
+
779
+ def isna_all(arr: ArrayLike) -> bool:
780
+ """
781
+ Optimized equivalent to isna(arr).all()
782
+ """
783
+ total_len = len(arr)
784
+
785
+ # Usually it's enough to check but a small fraction of values to see if
786
+ # a block is NOT null, chunks should help in such cases.
787
+ # parameters 1000 and 40 were chosen arbitrarily
788
+ chunk_len = max(total_len // 40, 1000)
789
+
790
+ dtype = arr.dtype
791
+ if lib.is_np_dtype(dtype, "f"):
792
+ checker = nan_checker
793
+
794
+ elif (lib.is_np_dtype(dtype, "mM")) or isinstance(
795
+ dtype, (DatetimeTZDtype, PeriodDtype)
796
+ ):
797
+ # error: Incompatible types in assignment (expression has type
798
+ # "Callable[[Any], Any]", variable has type "ufunc")
799
+ checker = lambda x: np.asarray(x.view("i8")) == iNaT # type: ignore[assignment]
800
+
801
+ else:
802
+ # error: Incompatible types in assignment (expression has type "Callable[[Any],
803
+ # Any]", variable has type "ufunc")
804
+ checker = lambda x: _isna_array( # type: ignore[assignment]
805
+ x, inf_as_na=INF_AS_NA
806
+ )
807
+
808
+ return all(
809
+ checker(arr[i : i + chunk_len]).all() for i in range(0, total_len, chunk_len)
810
+ )
venv/lib/python3.10/site-packages/pandas/core/groupby/__init__.py ADDED
@@ -0,0 +1,15 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from pandas.core.groupby.generic import (
2
+ DataFrameGroupBy,
3
+ NamedAgg,
4
+ SeriesGroupBy,
5
+ )
6
+ from pandas.core.groupby.groupby import GroupBy
7
+ from pandas.core.groupby.grouper import Grouper
8
+
9
+ __all__ = [
10
+ "DataFrameGroupBy",
11
+ "NamedAgg",
12
+ "SeriesGroupBy",
13
+ "GroupBy",
14
+ "Grouper",
15
+ ]
venv/lib/python3.10/site-packages/pandas/core/groupby/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (462 Bytes). View file
 
venv/lib/python3.10/site-packages/pandas/core/groupby/__pycache__/base.cpython-310.pyc ADDED
Binary file (1.42 kB). View file
 
venv/lib/python3.10/site-packages/pandas/core/groupby/__pycache__/categorical.cpython-310.pyc ADDED
Binary file (2.3 kB). View file
 
venv/lib/python3.10/site-packages/pandas/core/groupby/__pycache__/groupby.cpython-310.pyc ADDED
Binary file (159 kB). View file
 
venv/lib/python3.10/site-packages/pandas/core/groupby/__pycache__/grouper.cpython-310.pyc ADDED
Binary file (26.3 kB). View file
 
venv/lib/python3.10/site-packages/pandas/core/groupby/__pycache__/indexing.cpython-310.pyc ADDED
Binary file (9.75 kB). View file
 
venv/lib/python3.10/site-packages/pandas/core/groupby/__pycache__/numba_.cpython-310.pyc ADDED
Binary file (4.52 kB). View file
 
venv/lib/python3.10/site-packages/pandas/core/groupby/__pycache__/ops.cpython-310.pyc ADDED
Binary file (29 kB). View file
 
venv/lib/python3.10/site-packages/pandas/core/groupby/base.py ADDED
@@ -0,0 +1,121 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ Provide basic components for groupby.
3
+ """
4
+ from __future__ import annotations
5
+
6
+ import dataclasses
7
+ from typing import TYPE_CHECKING
8
+
9
+ if TYPE_CHECKING:
10
+ from collections.abc import Hashable
11
+
12
+
13
+ @dataclasses.dataclass(order=True, frozen=True)
14
+ class OutputKey:
15
+ label: Hashable
16
+ position: int
17
+
18
+
19
+ # special case to prevent duplicate plots when catching exceptions when
20
+ # forwarding methods from NDFrames
21
+ plotting_methods = frozenset(["plot", "hist"])
22
+
23
+ # cythonized transformations or canned "agg+broadcast", which do not
24
+ # require postprocessing of the result by transform.
25
+ cythonized_kernels = frozenset(["cumprod", "cumsum", "shift", "cummin", "cummax"])
26
+
27
+ # List of aggregation/reduction functions.
28
+ # These map each group to a single numeric value
29
+ reduction_kernels = frozenset(
30
+ [
31
+ "all",
32
+ "any",
33
+ "corrwith",
34
+ "count",
35
+ "first",
36
+ "idxmax",
37
+ "idxmin",
38
+ "last",
39
+ "max",
40
+ "mean",
41
+ "median",
42
+ "min",
43
+ "nunique",
44
+ "prod",
45
+ # as long as `quantile`'s signature accepts only
46
+ # a single quantile value, it's a reduction.
47
+ # GH#27526 might change that.
48
+ "quantile",
49
+ "sem",
50
+ "size",
51
+ "skew",
52
+ "std",
53
+ "sum",
54
+ "var",
55
+ ]
56
+ )
57
+
58
+ # List of transformation functions.
59
+ # a transformation is a function that, for each group,
60
+ # produces a result that has the same shape as the group.
61
+
62
+
63
+ transformation_kernels = frozenset(
64
+ [
65
+ "bfill",
66
+ "cumcount",
67
+ "cummax",
68
+ "cummin",
69
+ "cumprod",
70
+ "cumsum",
71
+ "diff",
72
+ "ffill",
73
+ "fillna",
74
+ "ngroup",
75
+ "pct_change",
76
+ "rank",
77
+ "shift",
78
+ ]
79
+ )
80
+
81
+ # these are all the public methods on Grouper which don't belong
82
+ # in either of the above lists
83
+ groupby_other_methods = frozenset(
84
+ [
85
+ "agg",
86
+ "aggregate",
87
+ "apply",
88
+ "boxplot",
89
+ # corr and cov return ngroups*ncolumns rows, so they
90
+ # are neither a transformation nor a reduction
91
+ "corr",
92
+ "cov",
93
+ "describe",
94
+ "dtypes",
95
+ "expanding",
96
+ "ewm",
97
+ "filter",
98
+ "get_group",
99
+ "groups",
100
+ "head",
101
+ "hist",
102
+ "indices",
103
+ "ndim",
104
+ "ngroups",
105
+ "nth",
106
+ "ohlc",
107
+ "pipe",
108
+ "plot",
109
+ "resample",
110
+ "rolling",
111
+ "tail",
112
+ "take",
113
+ "transform",
114
+ "sample",
115
+ "value_counts",
116
+ ]
117
+ )
118
+ # Valid values of `name` for `groupby.transform(name)`
119
+ # NOTE: do NOT edit this directly. New additions should be inserted
120
+ # into the appropriate list above.
121
+ transform_kernel_allowlist = reduction_kernels | transformation_kernels
venv/lib/python3.10/site-packages/pandas/core/groupby/categorical.py ADDED
@@ -0,0 +1,87 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from __future__ import annotations
2
+
3
+ import numpy as np
4
+
5
+ from pandas.core.algorithms import unique1d
6
+ from pandas.core.arrays.categorical import (
7
+ Categorical,
8
+ CategoricalDtype,
9
+ recode_for_categories,
10
+ )
11
+
12
+
13
+ def recode_for_groupby(
14
+ c: Categorical, sort: bool, observed: bool
15
+ ) -> tuple[Categorical, Categorical | None]:
16
+ """
17
+ Code the categories to ensure we can groupby for categoricals.
18
+
19
+ If observed=True, we return a new Categorical with the observed
20
+ categories only.
21
+
22
+ If sort=False, return a copy of self, coded with categories as
23
+ returned by .unique(), followed by any categories not appearing in
24
+ the data. If sort=True, return self.
25
+
26
+ This method is needed solely to ensure the categorical index of the
27
+ GroupBy result has categories in the order of appearance in the data
28
+ (GH-8868).
29
+
30
+ Parameters
31
+ ----------
32
+ c : Categorical
33
+ sort : bool
34
+ The value of the sort parameter groupby was called with.
35
+ observed : bool
36
+ Account only for the observed values
37
+
38
+ Returns
39
+ -------
40
+ Categorical
41
+ If sort=False, the new categories are set to the order of
42
+ appearance in codes (unless ordered=True, in which case the
43
+ original order is preserved), followed by any unrepresented
44
+ categories in the original order.
45
+ Categorical or None
46
+ If we are observed, return the original categorical, otherwise None
47
+ """
48
+ # we only care about observed values
49
+ if observed:
50
+ # In cases with c.ordered, this is equivalent to
51
+ # return c.remove_unused_categories(), c
52
+
53
+ unique_codes = unique1d(c.codes)
54
+
55
+ take_codes = unique_codes[unique_codes != -1]
56
+ if sort:
57
+ take_codes = np.sort(take_codes)
58
+
59
+ # we recode according to the uniques
60
+ categories = c.categories.take(take_codes)
61
+ codes = recode_for_categories(c.codes, c.categories, categories)
62
+
63
+ # return a new categorical that maps our new codes
64
+ # and categories
65
+ dtype = CategoricalDtype(categories, ordered=c.ordered)
66
+ return Categorical._simple_new(codes, dtype=dtype), c
67
+
68
+ # Already sorted according to c.categories; all is fine
69
+ if sort:
70
+ return c, None
71
+
72
+ # sort=False should order groups in as-encountered order (GH-8868)
73
+
74
+ # xref GH:46909: Re-ordering codes faster than using (set|add|reorder)_categories
75
+ all_codes = np.arange(c.categories.nunique())
76
+ # GH 38140: exclude nan from indexer for categories
77
+ unique_notnan_codes = unique1d(c.codes[c.codes != -1])
78
+ if sort:
79
+ unique_notnan_codes = np.sort(unique_notnan_codes)
80
+ if len(all_codes) > len(unique_notnan_codes):
81
+ # GH 13179: All categories need to be present, even if missing from the data
82
+ missing_codes = np.setdiff1d(all_codes, unique_notnan_codes, assume_unique=True)
83
+ take_codes = np.concatenate((unique_notnan_codes, missing_codes))
84
+ else:
85
+ take_codes = unique_notnan_codes
86
+
87
+ return Categorical(c, c.unique().categories.take(take_codes)), None
venv/lib/python3.10/site-packages/pandas/core/groupby/generic.py ADDED
@@ -0,0 +1,2852 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ Define the SeriesGroupBy and DataFrameGroupBy
3
+ classes that hold the groupby interfaces (and some implementations).
4
+
5
+ These are user facing as the result of the ``df.groupby(...)`` operations,
6
+ which here returns a DataFrameGroupBy object.
7
+ """
8
+ from __future__ import annotations
9
+
10
+ from collections import abc
11
+ from functools import partial
12
+ from textwrap import dedent
13
+ from typing import (
14
+ TYPE_CHECKING,
15
+ Any,
16
+ Callable,
17
+ Literal,
18
+ NamedTuple,
19
+ TypeVar,
20
+ Union,
21
+ cast,
22
+ )
23
+ import warnings
24
+
25
+ import numpy as np
26
+
27
+ from pandas._libs import (
28
+ Interval,
29
+ lib,
30
+ )
31
+ from pandas._libs.hashtable import duplicated
32
+ from pandas.errors import SpecificationError
33
+ from pandas.util._decorators import (
34
+ Appender,
35
+ Substitution,
36
+ doc,
37
+ )
38
+ from pandas.util._exceptions import find_stack_level
39
+
40
+ from pandas.core.dtypes.common import (
41
+ ensure_int64,
42
+ is_bool,
43
+ is_dict_like,
44
+ is_integer_dtype,
45
+ is_list_like,
46
+ is_numeric_dtype,
47
+ is_scalar,
48
+ )
49
+ from pandas.core.dtypes.dtypes import (
50
+ CategoricalDtype,
51
+ IntervalDtype,
52
+ )
53
+ from pandas.core.dtypes.inference import is_hashable
54
+ from pandas.core.dtypes.missing import (
55
+ isna,
56
+ notna,
57
+ )
58
+
59
+ from pandas.core import algorithms
60
+ from pandas.core.apply import (
61
+ GroupByApply,
62
+ maybe_mangle_lambdas,
63
+ reconstruct_func,
64
+ validate_func_kwargs,
65
+ warn_alias_replacement,
66
+ )
67
+ import pandas.core.common as com
68
+ from pandas.core.frame import DataFrame
69
+ from pandas.core.groupby import (
70
+ base,
71
+ ops,
72
+ )
73
+ from pandas.core.groupby.groupby import (
74
+ GroupBy,
75
+ GroupByPlot,
76
+ _agg_template_frame,
77
+ _agg_template_series,
78
+ _apply_docs,
79
+ _transform_template,
80
+ )
81
+ from pandas.core.indexes.api import (
82
+ Index,
83
+ MultiIndex,
84
+ all_indexes_same,
85
+ default_index,
86
+ )
87
+ from pandas.core.series import Series
88
+ from pandas.core.sorting import get_group_index
89
+ from pandas.core.util.numba_ import maybe_use_numba
90
+
91
+ from pandas.plotting import boxplot_frame_groupby
92
+
93
+ if TYPE_CHECKING:
94
+ from collections.abc import (
95
+ Hashable,
96
+ Mapping,
97
+ Sequence,
98
+ )
99
+
100
+ from pandas._typing import (
101
+ ArrayLike,
102
+ Axis,
103
+ AxisInt,
104
+ CorrelationMethod,
105
+ FillnaOptions,
106
+ IndexLabel,
107
+ Manager,
108
+ Manager2D,
109
+ SingleManager,
110
+ TakeIndexer,
111
+ )
112
+
113
+ from pandas import Categorical
114
+ from pandas.core.generic import NDFrame
115
+
116
+ # TODO(typing) the return value on this callable should be any *scalar*.
117
+ AggScalar = Union[str, Callable[..., Any]]
118
+ # TODO: validate types on ScalarResult and move to _typing
119
+ # Blocked from using by https://github.com/python/mypy/issues/1484
120
+ # See note at _mangle_lambda_list
121
+ ScalarResult = TypeVar("ScalarResult")
122
+
123
+
124
+ class NamedAgg(NamedTuple):
125
+ """
126
+ Helper for column specific aggregation with control over output column names.
127
+
128
+ Subclass of typing.NamedTuple.
129
+
130
+ Parameters
131
+ ----------
132
+ column : Hashable
133
+ Column label in the DataFrame to apply aggfunc.
134
+ aggfunc : function or str
135
+ Function to apply to the provided column. If string, the name of a built-in
136
+ pandas function.
137
+
138
+ Examples
139
+ --------
140
+ >>> df = pd.DataFrame({"key": [1, 1, 2], "a": [-1, 0, 1], 1: [10, 11, 12]})
141
+ >>> agg_a = pd.NamedAgg(column="a", aggfunc="min")
142
+ >>> agg_1 = pd.NamedAgg(column=1, aggfunc=lambda x: np.mean(x))
143
+ >>> df.groupby("key").agg(result_a=agg_a, result_1=agg_1)
144
+ result_a result_1
145
+ key
146
+ 1 -1 10.5
147
+ 2 1 12.0
148
+ """
149
+
150
+ column: Hashable
151
+ aggfunc: AggScalar
152
+
153
+
154
+ class SeriesGroupBy(GroupBy[Series]):
155
+ def _wrap_agged_manager(self, mgr: Manager) -> Series:
156
+ out = self.obj._constructor_from_mgr(mgr, axes=mgr.axes)
157
+ out._name = self.obj.name
158
+ return out
159
+
160
+ def _get_data_to_aggregate(
161
+ self, *, numeric_only: bool = False, name: str | None = None
162
+ ) -> SingleManager:
163
+ ser = self._obj_with_exclusions
164
+ single = ser._mgr
165
+ if numeric_only and not is_numeric_dtype(ser.dtype):
166
+ # GH#41291 match Series behavior
167
+ kwd_name = "numeric_only"
168
+ raise TypeError(
169
+ f"Cannot use {kwd_name}=True with "
170
+ f"{type(self).__name__}.{name} and non-numeric dtypes."
171
+ )
172
+ return single
173
+
174
+ _agg_examples_doc = dedent(
175
+ """
176
+ Examples
177
+ --------
178
+ >>> s = pd.Series([1, 2, 3, 4])
179
+
180
+ >>> s
181
+ 0 1
182
+ 1 2
183
+ 2 3
184
+ 3 4
185
+ dtype: int64
186
+
187
+ >>> s.groupby([1, 1, 2, 2]).min()
188
+ 1 1
189
+ 2 3
190
+ dtype: int64
191
+
192
+ >>> s.groupby([1, 1, 2, 2]).agg('min')
193
+ 1 1
194
+ 2 3
195
+ dtype: int64
196
+
197
+ >>> s.groupby([1, 1, 2, 2]).agg(['min', 'max'])
198
+ min max
199
+ 1 1 2
200
+ 2 3 4
201
+
202
+ The output column names can be controlled by passing
203
+ the desired column names and aggregations as keyword arguments.
204
+
205
+ >>> s.groupby([1, 1, 2, 2]).agg(
206
+ ... minimum='min',
207
+ ... maximum='max',
208
+ ... )
209
+ minimum maximum
210
+ 1 1 2
211
+ 2 3 4
212
+
213
+ .. versionchanged:: 1.3.0
214
+
215
+ The resulting dtype will reflect the return value of the aggregating function.
216
+
217
+ >>> s.groupby([1, 1, 2, 2]).agg(lambda x: x.astype(float).min())
218
+ 1 1.0
219
+ 2 3.0
220
+ dtype: float64
221
+ """
222
+ )
223
+
224
+ @Appender(
225
+ _apply_docs["template"].format(
226
+ input="series", examples=_apply_docs["series_examples"]
227
+ )
228
+ )
229
+ def apply(self, func, *args, **kwargs) -> Series:
230
+ return super().apply(func, *args, **kwargs)
231
+
232
+ @doc(_agg_template_series, examples=_agg_examples_doc, klass="Series")
233
+ def aggregate(self, func=None, *args, engine=None, engine_kwargs=None, **kwargs):
234
+ relabeling = func is None
235
+ columns = None
236
+ if relabeling:
237
+ columns, func = validate_func_kwargs(kwargs)
238
+ kwargs = {}
239
+
240
+ if isinstance(func, str):
241
+ if maybe_use_numba(engine) and engine is not None:
242
+ # Not all agg functions support numba, only propagate numba kwargs
243
+ # if user asks for numba, and engine is not None
244
+ # (if engine is None, the called function will handle the case where
245
+ # numba is requested via the global option)
246
+ kwargs["engine"] = engine
247
+ if engine_kwargs is not None:
248
+ kwargs["engine_kwargs"] = engine_kwargs
249
+ return getattr(self, func)(*args, **kwargs)
250
+
251
+ elif isinstance(func, abc.Iterable):
252
+ # Catch instances of lists / tuples
253
+ # but not the class list / tuple itself.
254
+ func = maybe_mangle_lambdas(func)
255
+ kwargs["engine"] = engine
256
+ kwargs["engine_kwargs"] = engine_kwargs
257
+ ret = self._aggregate_multiple_funcs(func, *args, **kwargs)
258
+ if relabeling:
259
+ # columns is not narrowed by mypy from relabeling flag
260
+ assert columns is not None # for mypy
261
+ ret.columns = columns
262
+ if not self.as_index:
263
+ ret = ret.reset_index()
264
+ return ret
265
+
266
+ else:
267
+ cyfunc = com.get_cython_func(func)
268
+ if cyfunc and not args and not kwargs:
269
+ warn_alias_replacement(self, func, cyfunc)
270
+ return getattr(self, cyfunc)()
271
+
272
+ if maybe_use_numba(engine):
273
+ return self._aggregate_with_numba(
274
+ func, *args, engine_kwargs=engine_kwargs, **kwargs
275
+ )
276
+
277
+ if self.ngroups == 0:
278
+ # e.g. test_evaluate_with_empty_groups without any groups to
279
+ # iterate over, we have no output on which to do dtype
280
+ # inference. We default to using the existing dtype.
281
+ # xref GH#51445
282
+ obj = self._obj_with_exclusions
283
+ return self.obj._constructor(
284
+ [],
285
+ name=self.obj.name,
286
+ index=self._grouper.result_index,
287
+ dtype=obj.dtype,
288
+ )
289
+
290
+ if self._grouper.nkeys > 1:
291
+ return self._python_agg_general(func, *args, **kwargs)
292
+
293
+ try:
294
+ return self._python_agg_general(func, *args, **kwargs)
295
+ except KeyError:
296
+ # KeyError raised in test_groupby.test_basic is bc the func does
297
+ # a dictionary lookup on group.name, but group name is not
298
+ # pinned in _python_agg_general, only in _aggregate_named
299
+ result = self._aggregate_named(func, *args, **kwargs)
300
+
301
+ warnings.warn(
302
+ "Pinning the groupby key to each group in "
303
+ f"{type(self).__name__}.agg is deprecated, and cases that "
304
+ "relied on it will raise in a future version. "
305
+ "If your operation requires utilizing the groupby keys, "
306
+ "iterate over the groupby object instead.",
307
+ FutureWarning,
308
+ stacklevel=find_stack_level(),
309
+ )
310
+
311
+ # result is a dict whose keys are the elements of result_index
312
+ result = Series(result, index=self._grouper.result_index)
313
+ result = self._wrap_aggregated_output(result)
314
+ return result
315
+
316
+ agg = aggregate
317
+
318
+ def _python_agg_general(self, func, *args, **kwargs):
319
+ orig_func = func
320
+ func = com.is_builtin_func(func)
321
+ if orig_func != func:
322
+ alias = com._builtin_table_alias[func]
323
+ warn_alias_replacement(self, orig_func, alias)
324
+ f = lambda x: func(x, *args, **kwargs)
325
+
326
+ obj = self._obj_with_exclusions
327
+ result = self._grouper.agg_series(obj, f)
328
+ res = obj._constructor(result, name=obj.name)
329
+ return self._wrap_aggregated_output(res)
330
+
331
+ def _aggregate_multiple_funcs(self, arg, *args, **kwargs) -> DataFrame:
332
+ if isinstance(arg, dict):
333
+ if self.as_index:
334
+ # GH 15931
335
+ raise SpecificationError("nested renamer is not supported")
336
+ else:
337
+ # GH#50684 - This accidentally worked in 1.x
338
+ msg = (
339
+ "Passing a dictionary to SeriesGroupBy.agg is deprecated "
340
+ "and will raise in a future version of pandas. Pass a list "
341
+ "of aggregations instead."
342
+ )
343
+ warnings.warn(
344
+ message=msg,
345
+ category=FutureWarning,
346
+ stacklevel=find_stack_level(),
347
+ )
348
+ arg = list(arg.items())
349
+ elif any(isinstance(x, (tuple, list)) for x in arg):
350
+ arg = [(x, x) if not isinstance(x, (tuple, list)) else x for x in arg]
351
+ else:
352
+ # list of functions / function names
353
+ columns = (com.get_callable_name(f) or f for f in arg)
354
+ arg = zip(columns, arg)
355
+
356
+ results: dict[base.OutputKey, DataFrame | Series] = {}
357
+ with com.temp_setattr(self, "as_index", True):
358
+ # Combine results using the index, need to adjust index after
359
+ # if as_index=False (GH#50724)
360
+ for idx, (name, func) in enumerate(arg):
361
+ key = base.OutputKey(label=name, position=idx)
362
+ results[key] = self.aggregate(func, *args, **kwargs)
363
+
364
+ if any(isinstance(x, DataFrame) for x in results.values()):
365
+ from pandas import concat
366
+
367
+ res_df = concat(
368
+ results.values(), axis=1, keys=[key.label for key in results]
369
+ )
370
+ return res_df
371
+
372
+ indexed_output = {key.position: val for key, val in results.items()}
373
+ output = self.obj._constructor_expanddim(indexed_output, index=None)
374
+ output.columns = Index(key.label for key in results)
375
+
376
+ return output
377
+
378
+ def _wrap_applied_output(
379
+ self,
380
+ data: Series,
381
+ values: list[Any],
382
+ not_indexed_same: bool = False,
383
+ is_transform: bool = False,
384
+ ) -> DataFrame | Series:
385
+ """
386
+ Wrap the output of SeriesGroupBy.apply into the expected result.
387
+
388
+ Parameters
389
+ ----------
390
+ data : Series
391
+ Input data for groupby operation.
392
+ values : List[Any]
393
+ Applied output for each group.
394
+ not_indexed_same : bool, default False
395
+ Whether the applied outputs are not indexed the same as the group axes.
396
+
397
+ Returns
398
+ -------
399
+ DataFrame or Series
400
+ """
401
+ if len(values) == 0:
402
+ # GH #6265
403
+ if is_transform:
404
+ # GH#47787 see test_group_on_empty_multiindex
405
+ res_index = data.index
406
+ else:
407
+ res_index = self._grouper.result_index
408
+
409
+ return self.obj._constructor(
410
+ [],
411
+ name=self.obj.name,
412
+ index=res_index,
413
+ dtype=data.dtype,
414
+ )
415
+ assert values is not None
416
+
417
+ if isinstance(values[0], dict):
418
+ # GH #823 #24880
419
+ index = self._grouper.result_index
420
+ res_df = self.obj._constructor_expanddim(values, index=index)
421
+ res_df = self._reindex_output(res_df)
422
+ # if self.observed is False,
423
+ # keep all-NaN rows created while re-indexing
424
+ res_ser = res_df.stack(future_stack=True)
425
+ res_ser.name = self.obj.name
426
+ return res_ser
427
+ elif isinstance(values[0], (Series, DataFrame)):
428
+ result = self._concat_objects(
429
+ values,
430
+ not_indexed_same=not_indexed_same,
431
+ is_transform=is_transform,
432
+ )
433
+ if isinstance(result, Series):
434
+ result.name = self.obj.name
435
+ if not self.as_index and not_indexed_same:
436
+ result = self._insert_inaxis_grouper(result)
437
+ result.index = default_index(len(result))
438
+ return result
439
+ else:
440
+ # GH #6265 #24880
441
+ result = self.obj._constructor(
442
+ data=values, index=self._grouper.result_index, name=self.obj.name
443
+ )
444
+ if not self.as_index:
445
+ result = self._insert_inaxis_grouper(result)
446
+ result.index = default_index(len(result))
447
+ return self._reindex_output(result)
448
+
449
+ def _aggregate_named(self, func, *args, **kwargs):
450
+ # Note: this is very similar to _aggregate_series_pure_python,
451
+ # but that does not pin group.name
452
+ result = {}
453
+ initialized = False
454
+
455
+ for name, group in self._grouper.get_iterator(
456
+ self._obj_with_exclusions, axis=self.axis
457
+ ):
458
+ # needed for pandas/tests/groupby/test_groupby.py::test_basic_aggregations
459
+ object.__setattr__(group, "name", name)
460
+
461
+ output = func(group, *args, **kwargs)
462
+ output = ops.extract_result(output)
463
+ if not initialized:
464
+ # We only do this validation on the first iteration
465
+ ops.check_result_array(output, group.dtype)
466
+ initialized = True
467
+ result[name] = output
468
+
469
+ return result
470
+
471
+ __examples_series_doc = dedent(
472
+ """
473
+ >>> ser = pd.Series([390.0, 350.0, 30.0, 20.0],
474
+ ... index=["Falcon", "Falcon", "Parrot", "Parrot"],
475
+ ... name="Max Speed")
476
+ >>> grouped = ser.groupby([1, 1, 2, 2])
477
+ >>> grouped.transform(lambda x: (x - x.mean()) / x.std())
478
+ Falcon 0.707107
479
+ Falcon -0.707107
480
+ Parrot 0.707107
481
+ Parrot -0.707107
482
+ Name: Max Speed, dtype: float64
483
+
484
+ Broadcast result of the transformation
485
+
486
+ >>> grouped.transform(lambda x: x.max() - x.min())
487
+ Falcon 40.0
488
+ Falcon 40.0
489
+ Parrot 10.0
490
+ Parrot 10.0
491
+ Name: Max Speed, dtype: float64
492
+
493
+ >>> grouped.transform("mean")
494
+ Falcon 370.0
495
+ Falcon 370.0
496
+ Parrot 25.0
497
+ Parrot 25.0
498
+ Name: Max Speed, dtype: float64
499
+
500
+ .. versionchanged:: 1.3.0
501
+
502
+ The resulting dtype will reflect the return value of the passed ``func``,
503
+ for example:
504
+
505
+ >>> grouped.transform(lambda x: x.astype(int).max())
506
+ Falcon 390
507
+ Falcon 390
508
+ Parrot 30
509
+ Parrot 30
510
+ Name: Max Speed, dtype: int64
511
+ """
512
+ )
513
+
514
+ @Substitution(klass="Series", example=__examples_series_doc)
515
+ @Appender(_transform_template)
516
+ def transform(self, func, *args, engine=None, engine_kwargs=None, **kwargs):
517
+ return self._transform(
518
+ func, *args, engine=engine, engine_kwargs=engine_kwargs, **kwargs
519
+ )
520
+
521
+ def _cython_transform(
522
+ self, how: str, numeric_only: bool = False, axis: AxisInt = 0, **kwargs
523
+ ):
524
+ assert axis == 0 # handled by caller
525
+
526
+ obj = self._obj_with_exclusions
527
+
528
+ try:
529
+ result = self._grouper._cython_operation(
530
+ "transform", obj._values, how, axis, **kwargs
531
+ )
532
+ except NotImplementedError as err:
533
+ # e.g. test_groupby_raises_string
534
+ raise TypeError(f"{how} is not supported for {obj.dtype} dtype") from err
535
+
536
+ return obj._constructor(result, index=self.obj.index, name=obj.name)
537
+
538
+ def _transform_general(
539
+ self, func: Callable, engine, engine_kwargs, *args, **kwargs
540
+ ) -> Series:
541
+ """
542
+ Transform with a callable `func`.
543
+ """
544
+ if maybe_use_numba(engine):
545
+ return self._transform_with_numba(
546
+ func, *args, engine_kwargs=engine_kwargs, **kwargs
547
+ )
548
+ assert callable(func)
549
+ klass = type(self.obj)
550
+
551
+ results = []
552
+ for name, group in self._grouper.get_iterator(
553
+ self._obj_with_exclusions, axis=self.axis
554
+ ):
555
+ # this setattr is needed for test_transform_lambda_with_datetimetz
556
+ object.__setattr__(group, "name", name)
557
+ res = func(group, *args, **kwargs)
558
+
559
+ results.append(klass(res, index=group.index))
560
+
561
+ # check for empty "results" to avoid concat ValueError
562
+ if results:
563
+ from pandas.core.reshape.concat import concat
564
+
565
+ concatenated = concat(results)
566
+ result = self._set_result_index_ordered(concatenated)
567
+ else:
568
+ result = self.obj._constructor(dtype=np.float64)
569
+
570
+ result.name = self.obj.name
571
+ return result
572
+
573
+ def filter(self, func, dropna: bool = True, *args, **kwargs):
574
+ """
575
+ Filter elements from groups that don't satisfy a criterion.
576
+
577
+ Elements from groups are filtered if they do not satisfy the
578
+ boolean criterion specified by func.
579
+
580
+ Parameters
581
+ ----------
582
+ func : function
583
+ Criterion to apply to each group. Should return True or False.
584
+ dropna : bool
585
+ Drop groups that do not pass the filter. True by default; if False,
586
+ groups that evaluate False are filled with NaNs.
587
+
588
+ Returns
589
+ -------
590
+ Series
591
+
592
+ Notes
593
+ -----
594
+ Functions that mutate the passed object can produce unexpected
595
+ behavior or errors and are not supported. See :ref:`gotchas.udf-mutation`
596
+ for more details.
597
+
598
+ Examples
599
+ --------
600
+ >>> df = pd.DataFrame({'A' : ['foo', 'bar', 'foo', 'bar',
601
+ ... 'foo', 'bar'],
602
+ ... 'B' : [1, 2, 3, 4, 5, 6],
603
+ ... 'C' : [2.0, 5., 8., 1., 2., 9.]})
604
+ >>> grouped = df.groupby('A')
605
+ >>> df.groupby('A').B.filter(lambda x: x.mean() > 3.)
606
+ 1 2
607
+ 3 4
608
+ 5 6
609
+ Name: B, dtype: int64
610
+ """
611
+ if isinstance(func, str):
612
+ wrapper = lambda x: getattr(x, func)(*args, **kwargs)
613
+ else:
614
+ wrapper = lambda x: func(x, *args, **kwargs)
615
+
616
+ # Interpret np.nan as False.
617
+ def true_and_notna(x) -> bool:
618
+ b = wrapper(x)
619
+ return notna(b) and b
620
+
621
+ try:
622
+ indices = [
623
+ self._get_index(name)
624
+ for name, group in self._grouper.get_iterator(
625
+ self._obj_with_exclusions, axis=self.axis
626
+ )
627
+ if true_and_notna(group)
628
+ ]
629
+ except (ValueError, TypeError) as err:
630
+ raise TypeError("the filter must return a boolean result") from err
631
+
632
+ filtered = self._apply_filter(indices, dropna)
633
+ return filtered
634
+
635
+ def nunique(self, dropna: bool = True) -> Series | DataFrame:
636
+ """
637
+ Return number of unique elements in the group.
638
+
639
+ Returns
640
+ -------
641
+ Series
642
+ Number of unique values within each group.
643
+
644
+ Examples
645
+ --------
646
+ For SeriesGroupby:
647
+
648
+ >>> lst = ['a', 'a', 'b', 'b']
649
+ >>> ser = pd.Series([1, 2, 3, 3], index=lst)
650
+ >>> ser
651
+ a 1
652
+ a 2
653
+ b 3
654
+ b 3
655
+ dtype: int64
656
+ >>> ser.groupby(level=0).nunique()
657
+ a 2
658
+ b 1
659
+ dtype: int64
660
+
661
+ For Resampler:
662
+
663
+ >>> ser = pd.Series([1, 2, 3, 3], index=pd.DatetimeIndex(
664
+ ... ['2023-01-01', '2023-01-15', '2023-02-01', '2023-02-15']))
665
+ >>> ser
666
+ 2023-01-01 1
667
+ 2023-01-15 2
668
+ 2023-02-01 3
669
+ 2023-02-15 3
670
+ dtype: int64
671
+ >>> ser.resample('MS').nunique()
672
+ 2023-01-01 2
673
+ 2023-02-01 1
674
+ Freq: MS, dtype: int64
675
+ """
676
+ ids, _, ngroups = self._grouper.group_info
677
+ val = self.obj._values
678
+ codes, uniques = algorithms.factorize(val, use_na_sentinel=dropna, sort=False)
679
+
680
+ if self._grouper.has_dropped_na:
681
+ mask = ids >= 0
682
+ ids = ids[mask]
683
+ codes = codes[mask]
684
+
685
+ group_index = get_group_index(
686
+ labels=[ids, codes],
687
+ shape=(ngroups, len(uniques)),
688
+ sort=False,
689
+ xnull=dropna,
690
+ )
691
+
692
+ if dropna:
693
+ mask = group_index >= 0
694
+ if (~mask).any():
695
+ ids = ids[mask]
696
+ group_index = group_index[mask]
697
+
698
+ mask = duplicated(group_index, "first")
699
+ res = np.bincount(ids[~mask], minlength=ngroups)
700
+ res = ensure_int64(res)
701
+
702
+ ri = self._grouper.result_index
703
+ result: Series | DataFrame = self.obj._constructor(
704
+ res, index=ri, name=self.obj.name
705
+ )
706
+ if not self.as_index:
707
+ result = self._insert_inaxis_grouper(result)
708
+ result.index = default_index(len(result))
709
+ return self._reindex_output(result, fill_value=0)
710
+
711
+ @doc(Series.describe)
712
+ def describe(self, percentiles=None, include=None, exclude=None) -> Series:
713
+ return super().describe(
714
+ percentiles=percentiles, include=include, exclude=exclude
715
+ )
716
+
717
+ def value_counts(
718
+ self,
719
+ normalize: bool = False,
720
+ sort: bool = True,
721
+ ascending: bool = False,
722
+ bins=None,
723
+ dropna: bool = True,
724
+ ) -> Series | DataFrame:
725
+ name = "proportion" if normalize else "count"
726
+
727
+ if bins is None:
728
+ result = self._value_counts(
729
+ normalize=normalize, sort=sort, ascending=ascending, dropna=dropna
730
+ )
731
+ result.name = name
732
+ return result
733
+
734
+ from pandas.core.reshape.merge import get_join_indexers
735
+ from pandas.core.reshape.tile import cut
736
+
737
+ ids, _, _ = self._grouper.group_info
738
+ val = self.obj._values
739
+
740
+ index_names = self._grouper.names + [self.obj.name]
741
+
742
+ if isinstance(val.dtype, CategoricalDtype) or (
743
+ bins is not None and not np.iterable(bins)
744
+ ):
745
+ # scalar bins cannot be done at top level
746
+ # in a backward compatible way
747
+ # GH38672 relates to categorical dtype
748
+ ser = self.apply(
749
+ Series.value_counts,
750
+ normalize=normalize,
751
+ sort=sort,
752
+ ascending=ascending,
753
+ bins=bins,
754
+ )
755
+ ser.name = name
756
+ ser.index.names = index_names
757
+ return ser
758
+
759
+ # groupby removes null keys from groupings
760
+ mask = ids != -1
761
+ ids, val = ids[mask], val[mask]
762
+
763
+ lab: Index | np.ndarray
764
+ if bins is None:
765
+ lab, lev = algorithms.factorize(val, sort=True)
766
+ llab = lambda lab, inc: lab[inc]
767
+ else:
768
+ # lab is a Categorical with categories an IntervalIndex
769
+ cat_ser = cut(Series(val, copy=False), bins, include_lowest=True)
770
+ cat_obj = cast("Categorical", cat_ser._values)
771
+ lev = cat_obj.categories
772
+ lab = lev.take(
773
+ cat_obj.codes,
774
+ allow_fill=True,
775
+ fill_value=lev._na_value,
776
+ )
777
+ llab = lambda lab, inc: lab[inc]._multiindex.codes[-1]
778
+
779
+ if isinstance(lab.dtype, IntervalDtype):
780
+ # TODO: should we do this inside II?
781
+ lab_interval = cast(Interval, lab)
782
+
783
+ sorter = np.lexsort((lab_interval.left, lab_interval.right, ids))
784
+ else:
785
+ sorter = np.lexsort((lab, ids))
786
+
787
+ ids, lab = ids[sorter], lab[sorter]
788
+
789
+ # group boundaries are where group ids change
790
+ idchanges = 1 + np.nonzero(ids[1:] != ids[:-1])[0]
791
+ idx = np.r_[0, idchanges]
792
+ if not len(ids):
793
+ idx = idchanges
794
+
795
+ # new values are where sorted labels change
796
+ lchanges = llab(lab, slice(1, None)) != llab(lab, slice(None, -1))
797
+ inc = np.r_[True, lchanges]
798
+ if not len(val):
799
+ inc = lchanges
800
+ inc[idx] = True # group boundaries are also new values
801
+ out = np.diff(np.nonzero(np.r_[inc, True])[0]) # value counts
802
+
803
+ # num. of times each group should be repeated
804
+ rep = partial(np.repeat, repeats=np.add.reduceat(inc, idx))
805
+
806
+ # multi-index components
807
+ codes = self._grouper.reconstructed_codes
808
+ codes = [rep(level_codes) for level_codes in codes] + [llab(lab, inc)]
809
+ levels = [ping._group_index for ping in self._grouper.groupings] + [lev]
810
+
811
+ if dropna:
812
+ mask = codes[-1] != -1
813
+ if mask.all():
814
+ dropna = False
815
+ else:
816
+ out, codes = out[mask], [level_codes[mask] for level_codes in codes]
817
+
818
+ if normalize:
819
+ out = out.astype("float")
820
+ d = np.diff(np.r_[idx, len(ids)])
821
+ if dropna:
822
+ m = ids[lab == -1]
823
+ np.add.at(d, m, -1)
824
+ acc = rep(d)[mask]
825
+ else:
826
+ acc = rep(d)
827
+ out /= acc
828
+
829
+ if sort and bins is None:
830
+ cat = ids[inc][mask] if dropna else ids[inc]
831
+ sorter = np.lexsort((out if ascending else -out, cat))
832
+ out, codes[-1] = out[sorter], codes[-1][sorter]
833
+
834
+ if bins is not None:
835
+ # for compat. with libgroupby.value_counts need to ensure every
836
+ # bin is present at every index level, null filled with zeros
837
+ diff = np.zeros(len(out), dtype="bool")
838
+ for level_codes in codes[:-1]:
839
+ diff |= np.r_[True, level_codes[1:] != level_codes[:-1]]
840
+
841
+ ncat, nbin = diff.sum(), len(levels[-1])
842
+
843
+ left = [np.repeat(np.arange(ncat), nbin), np.tile(np.arange(nbin), ncat)]
844
+
845
+ right = [diff.cumsum() - 1, codes[-1]]
846
+
847
+ # error: Argument 1 to "get_join_indexers" has incompatible type
848
+ # "List[ndarray[Any, Any]]"; expected "List[Union[Union[ExtensionArray,
849
+ # ndarray[Any, Any]], Index, Series]]
850
+ _, idx = get_join_indexers(
851
+ left, right, sort=False, how="left" # type: ignore[arg-type]
852
+ )
853
+ if idx is not None:
854
+ out = np.where(idx != -1, out[idx], 0)
855
+
856
+ if sort:
857
+ sorter = np.lexsort((out if ascending else -out, left[0]))
858
+ out, left[-1] = out[sorter], left[-1][sorter]
859
+
860
+ # build the multi-index w/ full levels
861
+ def build_codes(lev_codes: np.ndarray) -> np.ndarray:
862
+ return np.repeat(lev_codes[diff], nbin)
863
+
864
+ codes = [build_codes(lev_codes) for lev_codes in codes[:-1]]
865
+ codes.append(left[-1])
866
+
867
+ mi = MultiIndex(
868
+ levels=levels, codes=codes, names=index_names, verify_integrity=False
869
+ )
870
+
871
+ if is_integer_dtype(out.dtype):
872
+ out = ensure_int64(out)
873
+ result = self.obj._constructor(out, index=mi, name=name)
874
+ if not self.as_index:
875
+ result = result.reset_index()
876
+ return result
877
+
878
+ def fillna(
879
+ self,
880
+ value: object | ArrayLike | None = None,
881
+ method: FillnaOptions | None = None,
882
+ axis: Axis | None | lib.NoDefault = lib.no_default,
883
+ inplace: bool = False,
884
+ limit: int | None = None,
885
+ downcast: dict | None | lib.NoDefault = lib.no_default,
886
+ ) -> Series | None:
887
+ """
888
+ Fill NA/NaN values using the specified method within groups.
889
+
890
+ .. deprecated:: 2.2.0
891
+ This method is deprecated and will be removed in a future version.
892
+ Use the :meth:`.SeriesGroupBy.ffill` or :meth:`.SeriesGroupBy.bfill`
893
+ for forward or backward filling instead. If you want to fill with a
894
+ single value, use :meth:`Series.fillna` instead.
895
+
896
+ Parameters
897
+ ----------
898
+ value : scalar, dict, Series, or DataFrame
899
+ Value to use to fill holes (e.g. 0), alternately a
900
+ dict/Series/DataFrame of values specifying which value to use for
901
+ each index (for a Series) or column (for a DataFrame). Values not
902
+ in the dict/Series/DataFrame will not be filled. This value cannot
903
+ be a list. Users wanting to use the ``value`` argument and not ``method``
904
+ should prefer :meth:`.Series.fillna` as this
905
+ will produce the same result and be more performant.
906
+ method : {{'bfill', 'ffill', None}}, default None
907
+ Method to use for filling holes. ``'ffill'`` will propagate
908
+ the last valid observation forward within a group.
909
+ ``'bfill'`` will use next valid observation to fill the gap.
910
+ axis : {0 or 'index', 1 or 'columns'}
911
+ Unused, only for compatibility with :meth:`DataFrameGroupBy.fillna`.
912
+ inplace : bool, default False
913
+ Broken. Do not set to True.
914
+ limit : int, default None
915
+ If method is specified, this is the maximum number of consecutive
916
+ NaN values to forward/backward fill within a group. In other words,
917
+ if there is a gap with more than this number of consecutive NaNs,
918
+ it will only be partially filled. If method is not specified, this is the
919
+ maximum number of entries along the entire axis where NaNs will be
920
+ filled. Must be greater than 0 if not None.
921
+ downcast : dict, default is None
922
+ A dict of item->dtype of what to downcast if possible,
923
+ or the string 'infer' which will try to downcast to an appropriate
924
+ equal type (e.g. float64 to int64 if possible).
925
+
926
+ Returns
927
+ -------
928
+ Series
929
+ Object with missing values filled within groups.
930
+
931
+ See Also
932
+ --------
933
+ ffill : Forward fill values within a group.
934
+ bfill : Backward fill values within a group.
935
+
936
+ Examples
937
+ --------
938
+ For SeriesGroupBy:
939
+
940
+ >>> lst = ['cat', 'cat', 'cat', 'mouse', 'mouse']
941
+ >>> ser = pd.Series([1, None, None, 2, None], index=lst)
942
+ >>> ser
943
+ cat 1.0
944
+ cat NaN
945
+ cat NaN
946
+ mouse 2.0
947
+ mouse NaN
948
+ dtype: float64
949
+ >>> ser.groupby(level=0).fillna(0, limit=1)
950
+ cat 1.0
951
+ cat 0.0
952
+ cat NaN
953
+ mouse 2.0
954
+ mouse 0.0
955
+ dtype: float64
956
+ """
957
+ warnings.warn(
958
+ f"{type(self).__name__}.fillna is deprecated and "
959
+ "will be removed in a future version. Use obj.ffill() or obj.bfill() "
960
+ "for forward or backward filling instead. If you want to fill with a "
961
+ f"single value, use {type(self.obj).__name__}.fillna instead",
962
+ FutureWarning,
963
+ stacklevel=find_stack_level(),
964
+ )
965
+ result = self._op_via_apply(
966
+ "fillna",
967
+ value=value,
968
+ method=method,
969
+ axis=axis,
970
+ inplace=inplace,
971
+ limit=limit,
972
+ downcast=downcast,
973
+ )
974
+ return result
975
+
976
+ def take(
977
+ self,
978
+ indices: TakeIndexer,
979
+ axis: Axis | lib.NoDefault = lib.no_default,
980
+ **kwargs,
981
+ ) -> Series:
982
+ """
983
+ Return the elements in the given *positional* indices in each group.
984
+
985
+ This means that we are not indexing according to actual values in
986
+ the index attribute of the object. We are indexing according to the
987
+ actual position of the element in the object.
988
+
989
+ If a requested index does not exist for some group, this method will raise.
990
+ To get similar behavior that ignores indices that don't exist, see
991
+ :meth:`.SeriesGroupBy.nth`.
992
+
993
+ Parameters
994
+ ----------
995
+ indices : array-like
996
+ An array of ints indicating which positions to take in each group.
997
+ axis : {0 or 'index', 1 or 'columns', None}, default 0
998
+ The axis on which to select elements. ``0`` means that we are
999
+ selecting rows, ``1`` means that we are selecting columns.
1000
+ For `SeriesGroupBy` this parameter is unused and defaults to 0.
1001
+
1002
+ .. deprecated:: 2.1.0
1003
+ For axis=1, operate on the underlying object instead. Otherwise
1004
+ the axis keyword is not necessary.
1005
+
1006
+ **kwargs
1007
+ For compatibility with :meth:`numpy.take`. Has no effect on the
1008
+ output.
1009
+
1010
+ Returns
1011
+ -------
1012
+ Series
1013
+ A Series containing the elements taken from each group.
1014
+
1015
+ See Also
1016
+ --------
1017
+ Series.take : Take elements from a Series along an axis.
1018
+ Series.loc : Select a subset of a DataFrame by labels.
1019
+ Series.iloc : Select a subset of a DataFrame by positions.
1020
+ numpy.take : Take elements from an array along an axis.
1021
+ SeriesGroupBy.nth : Similar to take, won't raise if indices don't exist.
1022
+
1023
+ Examples
1024
+ --------
1025
+ >>> df = pd.DataFrame([('falcon', 'bird', 389.0),
1026
+ ... ('parrot', 'bird', 24.0),
1027
+ ... ('lion', 'mammal', 80.5),
1028
+ ... ('monkey', 'mammal', np.nan),
1029
+ ... ('rabbit', 'mammal', 15.0)],
1030
+ ... columns=['name', 'class', 'max_speed'],
1031
+ ... index=[4, 3, 2, 1, 0])
1032
+ >>> df
1033
+ name class max_speed
1034
+ 4 falcon bird 389.0
1035
+ 3 parrot bird 24.0
1036
+ 2 lion mammal 80.5
1037
+ 1 monkey mammal NaN
1038
+ 0 rabbit mammal 15.0
1039
+ >>> gb = df["name"].groupby([1, 1, 2, 2, 2])
1040
+
1041
+ Take elements at positions 0 and 1 along the axis 0 in each group (default).
1042
+
1043
+ >>> gb.take([0, 1])
1044
+ 1 4 falcon
1045
+ 3 parrot
1046
+ 2 2 lion
1047
+ 1 monkey
1048
+ Name: name, dtype: object
1049
+
1050
+ We may take elements using negative integers for positive indices,
1051
+ starting from the end of the object, just like with Python lists.
1052
+
1053
+ >>> gb.take([-1, -2])
1054
+ 1 3 parrot
1055
+ 4 falcon
1056
+ 2 0 rabbit
1057
+ 1 monkey
1058
+ Name: name, dtype: object
1059
+ """
1060
+ result = self._op_via_apply("take", indices=indices, axis=axis, **kwargs)
1061
+ return result
1062
+
1063
+ def skew(
1064
+ self,
1065
+ axis: Axis | lib.NoDefault = lib.no_default,
1066
+ skipna: bool = True,
1067
+ numeric_only: bool = False,
1068
+ **kwargs,
1069
+ ) -> Series:
1070
+ """
1071
+ Return unbiased skew within groups.
1072
+
1073
+ Normalized by N-1.
1074
+
1075
+ Parameters
1076
+ ----------
1077
+ axis : {0 or 'index', 1 or 'columns', None}, default 0
1078
+ Axis for the function to be applied on.
1079
+ This parameter is only for compatibility with DataFrame and is unused.
1080
+
1081
+ .. deprecated:: 2.1.0
1082
+ For axis=1, operate on the underlying object instead. Otherwise
1083
+ the axis keyword is not necessary.
1084
+
1085
+ skipna : bool, default True
1086
+ Exclude NA/null values when computing the result.
1087
+
1088
+ numeric_only : bool, default False
1089
+ Include only float, int, boolean columns. Not implemented for Series.
1090
+
1091
+ **kwargs
1092
+ Additional keyword arguments to be passed to the function.
1093
+
1094
+ Returns
1095
+ -------
1096
+ Series
1097
+
1098
+ See Also
1099
+ --------
1100
+ Series.skew : Return unbiased skew over requested axis.
1101
+
1102
+ Examples
1103
+ --------
1104
+ >>> ser = pd.Series([390., 350., 357., np.nan, 22., 20., 30.],
1105
+ ... index=['Falcon', 'Falcon', 'Falcon', 'Falcon',
1106
+ ... 'Parrot', 'Parrot', 'Parrot'],
1107
+ ... name="Max Speed")
1108
+ >>> ser
1109
+ Falcon 390.0
1110
+ Falcon 350.0
1111
+ Falcon 357.0
1112
+ Falcon NaN
1113
+ Parrot 22.0
1114
+ Parrot 20.0
1115
+ Parrot 30.0
1116
+ Name: Max Speed, dtype: float64
1117
+ >>> ser.groupby(level=0).skew()
1118
+ Falcon 1.525174
1119
+ Parrot 1.457863
1120
+ Name: Max Speed, dtype: float64
1121
+ >>> ser.groupby(level=0).skew(skipna=False)
1122
+ Falcon NaN
1123
+ Parrot 1.457863
1124
+ Name: Max Speed, dtype: float64
1125
+ """
1126
+ if axis is lib.no_default:
1127
+ axis = 0
1128
+
1129
+ if axis != 0:
1130
+ result = self._op_via_apply(
1131
+ "skew",
1132
+ axis=axis,
1133
+ skipna=skipna,
1134
+ numeric_only=numeric_only,
1135
+ **kwargs,
1136
+ )
1137
+ return result
1138
+
1139
+ def alt(obj):
1140
+ # This should not be reached since the cython path should raise
1141
+ # TypeError and not NotImplementedError.
1142
+ raise TypeError(f"'skew' is not supported for dtype={obj.dtype}")
1143
+
1144
+ return self._cython_agg_general(
1145
+ "skew", alt=alt, skipna=skipna, numeric_only=numeric_only, **kwargs
1146
+ )
1147
+
1148
+ @property
1149
+ @doc(Series.plot.__doc__)
1150
+ def plot(self) -> GroupByPlot:
1151
+ result = GroupByPlot(self)
1152
+ return result
1153
+
1154
+ @doc(Series.nlargest.__doc__)
1155
+ def nlargest(
1156
+ self, n: int = 5, keep: Literal["first", "last", "all"] = "first"
1157
+ ) -> Series:
1158
+ f = partial(Series.nlargest, n=n, keep=keep)
1159
+ data = self._obj_with_exclusions
1160
+ # Don't change behavior if result index happens to be the same, i.e.
1161
+ # already ordered and n >= all group sizes.
1162
+ result = self._python_apply_general(f, data, not_indexed_same=True)
1163
+ return result
1164
+
1165
+ @doc(Series.nsmallest.__doc__)
1166
+ def nsmallest(
1167
+ self, n: int = 5, keep: Literal["first", "last", "all"] = "first"
1168
+ ) -> Series:
1169
+ f = partial(Series.nsmallest, n=n, keep=keep)
1170
+ data = self._obj_with_exclusions
1171
+ # Don't change behavior if result index happens to be the same, i.e.
1172
+ # already ordered and n >= all group sizes.
1173
+ result = self._python_apply_general(f, data, not_indexed_same=True)
1174
+ return result
1175
+
1176
+ @doc(Series.idxmin.__doc__)
1177
+ def idxmin(
1178
+ self, axis: Axis | lib.NoDefault = lib.no_default, skipna: bool = True
1179
+ ) -> Series:
1180
+ return self._idxmax_idxmin("idxmin", axis=axis, skipna=skipna)
1181
+
1182
+ @doc(Series.idxmax.__doc__)
1183
+ def idxmax(
1184
+ self, axis: Axis | lib.NoDefault = lib.no_default, skipna: bool = True
1185
+ ) -> Series:
1186
+ return self._idxmax_idxmin("idxmax", axis=axis, skipna=skipna)
1187
+
1188
+ @doc(Series.corr.__doc__)
1189
+ def corr(
1190
+ self,
1191
+ other: Series,
1192
+ method: CorrelationMethod = "pearson",
1193
+ min_periods: int | None = None,
1194
+ ) -> Series:
1195
+ result = self._op_via_apply(
1196
+ "corr", other=other, method=method, min_periods=min_periods
1197
+ )
1198
+ return result
1199
+
1200
+ @doc(Series.cov.__doc__)
1201
+ def cov(
1202
+ self, other: Series, min_periods: int | None = None, ddof: int | None = 1
1203
+ ) -> Series:
1204
+ result = self._op_via_apply(
1205
+ "cov", other=other, min_periods=min_periods, ddof=ddof
1206
+ )
1207
+ return result
1208
+
1209
+ @property
1210
+ def is_monotonic_increasing(self) -> Series:
1211
+ """
1212
+ Return whether each group's values are monotonically increasing.
1213
+
1214
+ Returns
1215
+ -------
1216
+ Series
1217
+
1218
+ Examples
1219
+ --------
1220
+ >>> s = pd.Series([2, 1, 3, 4], index=['Falcon', 'Falcon', 'Parrot', 'Parrot'])
1221
+ >>> s.groupby(level=0).is_monotonic_increasing
1222
+ Falcon False
1223
+ Parrot True
1224
+ dtype: bool
1225
+ """
1226
+ return self.apply(lambda ser: ser.is_monotonic_increasing)
1227
+
1228
+ @property
1229
+ def is_monotonic_decreasing(self) -> Series:
1230
+ """
1231
+ Return whether each group's values are monotonically decreasing.
1232
+
1233
+ Returns
1234
+ -------
1235
+ Series
1236
+
1237
+ Examples
1238
+ --------
1239
+ >>> s = pd.Series([2, 1, 3, 4], index=['Falcon', 'Falcon', 'Parrot', 'Parrot'])
1240
+ >>> s.groupby(level=0).is_monotonic_decreasing
1241
+ Falcon True
1242
+ Parrot False
1243
+ dtype: bool
1244
+ """
1245
+ return self.apply(lambda ser: ser.is_monotonic_decreasing)
1246
+
1247
+ @doc(Series.hist.__doc__)
1248
+ def hist(
1249
+ self,
1250
+ by=None,
1251
+ ax=None,
1252
+ grid: bool = True,
1253
+ xlabelsize: int | None = None,
1254
+ xrot: float | None = None,
1255
+ ylabelsize: int | None = None,
1256
+ yrot: float | None = None,
1257
+ figsize: tuple[int, int] | None = None,
1258
+ bins: int | Sequence[int] = 10,
1259
+ backend: str | None = None,
1260
+ legend: bool = False,
1261
+ **kwargs,
1262
+ ):
1263
+ result = self._op_via_apply(
1264
+ "hist",
1265
+ by=by,
1266
+ ax=ax,
1267
+ grid=grid,
1268
+ xlabelsize=xlabelsize,
1269
+ xrot=xrot,
1270
+ ylabelsize=ylabelsize,
1271
+ yrot=yrot,
1272
+ figsize=figsize,
1273
+ bins=bins,
1274
+ backend=backend,
1275
+ legend=legend,
1276
+ **kwargs,
1277
+ )
1278
+ return result
1279
+
1280
+ @property
1281
+ @doc(Series.dtype.__doc__)
1282
+ def dtype(self) -> Series:
1283
+ return self.apply(lambda ser: ser.dtype)
1284
+
1285
+ def unique(self) -> Series:
1286
+ """
1287
+ Return unique values for each group.
1288
+
1289
+ It returns unique values for each of the grouped values. Returned in
1290
+ order of appearance. Hash table-based unique, therefore does NOT sort.
1291
+
1292
+ Returns
1293
+ -------
1294
+ Series
1295
+ Unique values for each of the grouped values.
1296
+
1297
+ See Also
1298
+ --------
1299
+ Series.unique : Return unique values of Series object.
1300
+
1301
+ Examples
1302
+ --------
1303
+ >>> df = pd.DataFrame([('Chihuahua', 'dog', 6.1),
1304
+ ... ('Beagle', 'dog', 15.2),
1305
+ ... ('Chihuahua', 'dog', 6.9),
1306
+ ... ('Persian', 'cat', 9.2),
1307
+ ... ('Chihuahua', 'dog', 7),
1308
+ ... ('Persian', 'cat', 8.8)],
1309
+ ... columns=['breed', 'animal', 'height_in'])
1310
+ >>> df
1311
+ breed animal height_in
1312
+ 0 Chihuahua dog 6.1
1313
+ 1 Beagle dog 15.2
1314
+ 2 Chihuahua dog 6.9
1315
+ 3 Persian cat 9.2
1316
+ 4 Chihuahua dog 7.0
1317
+ 5 Persian cat 8.8
1318
+ >>> ser = df.groupby('animal')['breed'].unique()
1319
+ >>> ser
1320
+ animal
1321
+ cat [Persian]
1322
+ dog [Chihuahua, Beagle]
1323
+ Name: breed, dtype: object
1324
+ """
1325
+ result = self._op_via_apply("unique")
1326
+ return result
1327
+
1328
+
1329
+ class DataFrameGroupBy(GroupBy[DataFrame]):
1330
+ _agg_examples_doc = dedent(
1331
+ """
1332
+ Examples
1333
+ --------
1334
+ >>> data = {"A": [1, 1, 2, 2],
1335
+ ... "B": [1, 2, 3, 4],
1336
+ ... "C": [0.362838, 0.227877, 1.267767, -0.562860]}
1337
+ >>> df = pd.DataFrame(data)
1338
+ >>> df
1339
+ A B C
1340
+ 0 1 1 0.362838
1341
+ 1 1 2 0.227877
1342
+ 2 2 3 1.267767
1343
+ 3 2 4 -0.562860
1344
+
1345
+ The aggregation is for each column.
1346
+
1347
+ >>> df.groupby('A').agg('min')
1348
+ B C
1349
+ A
1350
+ 1 1 0.227877
1351
+ 2 3 -0.562860
1352
+
1353
+ Multiple aggregations
1354
+
1355
+ >>> df.groupby('A').agg(['min', 'max'])
1356
+ B C
1357
+ min max min max
1358
+ A
1359
+ 1 1 2 0.227877 0.362838
1360
+ 2 3 4 -0.562860 1.267767
1361
+
1362
+ Select a column for aggregation
1363
+
1364
+ >>> df.groupby('A').B.agg(['min', 'max'])
1365
+ min max
1366
+ A
1367
+ 1 1 2
1368
+ 2 3 4
1369
+
1370
+ User-defined function for aggregation
1371
+
1372
+ >>> df.groupby('A').agg(lambda x: sum(x) + 2)
1373
+ B C
1374
+ A
1375
+ 1 5 2.590715
1376
+ 2 9 2.704907
1377
+
1378
+ Different aggregations per column
1379
+
1380
+ >>> df.groupby('A').agg({'B': ['min', 'max'], 'C': 'sum'})
1381
+ B C
1382
+ min max sum
1383
+ A
1384
+ 1 1 2 0.590715
1385
+ 2 3 4 0.704907
1386
+
1387
+ To control the output names with different aggregations per column,
1388
+ pandas supports "named aggregation"
1389
+
1390
+ >>> df.groupby("A").agg(
1391
+ ... b_min=pd.NamedAgg(column="B", aggfunc="min"),
1392
+ ... c_sum=pd.NamedAgg(column="C", aggfunc="sum")
1393
+ ... )
1394
+ b_min c_sum
1395
+ A
1396
+ 1 1 0.590715
1397
+ 2 3 0.704907
1398
+
1399
+ - The keywords are the *output* column names
1400
+ - The values are tuples whose first element is the column to select
1401
+ and the second element is the aggregation to apply to that column.
1402
+ Pandas provides the ``pandas.NamedAgg`` namedtuple with the fields
1403
+ ``['column', 'aggfunc']`` to make it clearer what the arguments are.
1404
+ As usual, the aggregation can be a callable or a string alias.
1405
+
1406
+ See :ref:`groupby.aggregate.named` for more.
1407
+
1408
+ .. versionchanged:: 1.3.0
1409
+
1410
+ The resulting dtype will reflect the return value of the aggregating function.
1411
+
1412
+ >>> df.groupby("A")[["B"]].agg(lambda x: x.astype(float).min())
1413
+ B
1414
+ A
1415
+ 1 1.0
1416
+ 2 3.0
1417
+ """
1418
+ )
1419
+
1420
+ @doc(_agg_template_frame, examples=_agg_examples_doc, klass="DataFrame")
1421
+ def aggregate(self, func=None, *args, engine=None, engine_kwargs=None, **kwargs):
1422
+ relabeling, func, columns, order = reconstruct_func(func, **kwargs)
1423
+ func = maybe_mangle_lambdas(func)
1424
+
1425
+ if maybe_use_numba(engine):
1426
+ # Not all agg functions support numba, only propagate numba kwargs
1427
+ # if user asks for numba
1428
+ kwargs["engine"] = engine
1429
+ kwargs["engine_kwargs"] = engine_kwargs
1430
+
1431
+ op = GroupByApply(self, func, args=args, kwargs=kwargs)
1432
+ result = op.agg()
1433
+ if not is_dict_like(func) and result is not None:
1434
+ # GH #52849
1435
+ if not self.as_index and is_list_like(func):
1436
+ return result.reset_index()
1437
+ else:
1438
+ return result
1439
+ elif relabeling:
1440
+ # this should be the only (non-raising) case with relabeling
1441
+ # used reordered index of columns
1442
+ result = cast(DataFrame, result)
1443
+ result = result.iloc[:, order]
1444
+ result = cast(DataFrame, result)
1445
+ # error: Incompatible types in assignment (expression has type
1446
+ # "Optional[List[str]]", variable has type
1447
+ # "Union[Union[Union[ExtensionArray, ndarray[Any, Any]],
1448
+ # Index, Series], Sequence[Any]]")
1449
+ result.columns = columns # type: ignore[assignment]
1450
+
1451
+ if result is None:
1452
+ # Remove the kwargs we inserted
1453
+ # (already stored in engine, engine_kwargs arguments)
1454
+ if "engine" in kwargs:
1455
+ del kwargs["engine"]
1456
+ del kwargs["engine_kwargs"]
1457
+ # at this point func is not a str, list-like, dict-like,
1458
+ # or a known callable(e.g. sum)
1459
+ if maybe_use_numba(engine):
1460
+ return self._aggregate_with_numba(
1461
+ func, *args, engine_kwargs=engine_kwargs, **kwargs
1462
+ )
1463
+ # grouper specific aggregations
1464
+ if self._grouper.nkeys > 1:
1465
+ # test_groupby_as_index_series_scalar gets here with 'not self.as_index'
1466
+ return self._python_agg_general(func, *args, **kwargs)
1467
+ elif args or kwargs:
1468
+ # test_pass_args_kwargs gets here (with and without as_index)
1469
+ # can't return early
1470
+ result = self._aggregate_frame(func, *args, **kwargs)
1471
+
1472
+ elif self.axis == 1:
1473
+ # _aggregate_multiple_funcs does not allow self.axis == 1
1474
+ # Note: axis == 1 precludes 'not self.as_index', see __init__
1475
+ result = self._aggregate_frame(func)
1476
+ return result
1477
+
1478
+ else:
1479
+ # try to treat as if we are passing a list
1480
+ gba = GroupByApply(self, [func], args=(), kwargs={})
1481
+ try:
1482
+ result = gba.agg()
1483
+
1484
+ except ValueError as err:
1485
+ if "No objects to concatenate" not in str(err):
1486
+ raise
1487
+ # _aggregate_frame can fail with e.g. func=Series.mode,
1488
+ # where it expects 1D values but would be getting 2D values
1489
+ # In other tests, using aggregate_frame instead of GroupByApply
1490
+ # would give correct values but incorrect dtypes
1491
+ # object vs float64 in test_cython_agg_empty_buckets
1492
+ # float64 vs int64 in test_category_order_apply
1493
+ result = self._aggregate_frame(func)
1494
+
1495
+ else:
1496
+ # GH#32040, GH#35246
1497
+ # e.g. test_groupby_as_index_select_column_sum_empty_df
1498
+ result = cast(DataFrame, result)
1499
+ result.columns = self._obj_with_exclusions.columns.copy()
1500
+
1501
+ if not self.as_index:
1502
+ result = self._insert_inaxis_grouper(result)
1503
+ result.index = default_index(len(result))
1504
+
1505
+ return result
1506
+
1507
+ agg = aggregate
1508
+
1509
+ def _python_agg_general(self, func, *args, **kwargs):
1510
+ orig_func = func
1511
+ func = com.is_builtin_func(func)
1512
+ if orig_func != func:
1513
+ alias = com._builtin_table_alias[func]
1514
+ warn_alias_replacement(self, orig_func, alias)
1515
+ f = lambda x: func(x, *args, **kwargs)
1516
+
1517
+ if self.ngroups == 0:
1518
+ # e.g. test_evaluate_with_empty_groups different path gets different
1519
+ # result dtype in empty case.
1520
+ return self._python_apply_general(f, self._selected_obj, is_agg=True)
1521
+
1522
+ obj = self._obj_with_exclusions
1523
+ if self.axis == 1:
1524
+ obj = obj.T
1525
+
1526
+ if not len(obj.columns):
1527
+ # e.g. test_margins_no_values_no_cols
1528
+ return self._python_apply_general(f, self._selected_obj)
1529
+
1530
+ output: dict[int, ArrayLike] = {}
1531
+ for idx, (name, ser) in enumerate(obj.items()):
1532
+ result = self._grouper.agg_series(ser, f)
1533
+ output[idx] = result
1534
+
1535
+ res = self.obj._constructor(output)
1536
+ res.columns = obj.columns.copy(deep=False)
1537
+ return self._wrap_aggregated_output(res)
1538
+
1539
+ def _aggregate_frame(self, func, *args, **kwargs) -> DataFrame:
1540
+ if self._grouper.nkeys != 1:
1541
+ raise AssertionError("Number of keys must be 1")
1542
+
1543
+ obj = self._obj_with_exclusions
1544
+
1545
+ result: dict[Hashable, NDFrame | np.ndarray] = {}
1546
+ for name, grp_df in self._grouper.get_iterator(obj, self.axis):
1547
+ fres = func(grp_df, *args, **kwargs)
1548
+ result[name] = fres
1549
+
1550
+ result_index = self._grouper.result_index
1551
+ other_ax = obj.axes[1 - self.axis]
1552
+ out = self.obj._constructor(result, index=other_ax, columns=result_index)
1553
+ if self.axis == 0:
1554
+ out = out.T
1555
+
1556
+ return out
1557
+
1558
+ def _wrap_applied_output(
1559
+ self,
1560
+ data: DataFrame,
1561
+ values: list,
1562
+ not_indexed_same: bool = False,
1563
+ is_transform: bool = False,
1564
+ ):
1565
+ if len(values) == 0:
1566
+ if is_transform:
1567
+ # GH#47787 see test_group_on_empty_multiindex
1568
+ res_index = data.index
1569
+ else:
1570
+ res_index = self._grouper.result_index
1571
+
1572
+ result = self.obj._constructor(index=res_index, columns=data.columns)
1573
+ result = result.astype(data.dtypes, copy=False)
1574
+ return result
1575
+
1576
+ # GH12824
1577
+ # using values[0] here breaks test_groupby_apply_none_first
1578
+ first_not_none = next(com.not_none(*values), None)
1579
+
1580
+ if first_not_none is None:
1581
+ # GH9684 - All values are None, return an empty frame.
1582
+ return self.obj._constructor()
1583
+ elif isinstance(first_not_none, DataFrame):
1584
+ return self._concat_objects(
1585
+ values,
1586
+ not_indexed_same=not_indexed_same,
1587
+ is_transform=is_transform,
1588
+ )
1589
+
1590
+ key_index = self._grouper.result_index if self.as_index else None
1591
+
1592
+ if isinstance(first_not_none, (np.ndarray, Index)):
1593
+ # GH#1738: values is list of arrays of unequal lengths
1594
+ # fall through to the outer else clause
1595
+ # TODO: sure this is right? we used to do this
1596
+ # after raising AttributeError above
1597
+ # GH 18930
1598
+ if not is_hashable(self._selection):
1599
+ # error: Need type annotation for "name"
1600
+ name = tuple(self._selection) # type: ignore[var-annotated, arg-type]
1601
+ else:
1602
+ # error: Incompatible types in assignment
1603
+ # (expression has type "Hashable", variable
1604
+ # has type "Tuple[Any, ...]")
1605
+ name = self._selection # type: ignore[assignment]
1606
+ return self.obj._constructor_sliced(values, index=key_index, name=name)
1607
+ elif not isinstance(first_not_none, Series):
1608
+ # values are not series or array-like but scalars
1609
+ # self._selection not passed through to Series as the
1610
+ # result should not take the name of original selection
1611
+ # of columns
1612
+ if self.as_index:
1613
+ return self.obj._constructor_sliced(values, index=key_index)
1614
+ else:
1615
+ result = self.obj._constructor(values, columns=[self._selection])
1616
+ result = self._insert_inaxis_grouper(result)
1617
+ return result
1618
+ else:
1619
+ # values are Series
1620
+ return self._wrap_applied_output_series(
1621
+ values,
1622
+ not_indexed_same,
1623
+ first_not_none,
1624
+ key_index,
1625
+ is_transform,
1626
+ )
1627
+
1628
+ def _wrap_applied_output_series(
1629
+ self,
1630
+ values: list[Series],
1631
+ not_indexed_same: bool,
1632
+ first_not_none,
1633
+ key_index: Index | None,
1634
+ is_transform: bool,
1635
+ ) -> DataFrame | Series:
1636
+ kwargs = first_not_none._construct_axes_dict()
1637
+ backup = Series(**kwargs)
1638
+ values = [x if (x is not None) else backup for x in values]
1639
+
1640
+ all_indexed_same = all_indexes_same(x.index for x in values)
1641
+
1642
+ if not all_indexed_same:
1643
+ # GH 8467
1644
+ return self._concat_objects(
1645
+ values,
1646
+ not_indexed_same=True,
1647
+ is_transform=is_transform,
1648
+ )
1649
+
1650
+ # Combine values
1651
+ # vstack+constructor is faster than concat and handles MI-columns
1652
+ stacked_values = np.vstack([np.asarray(v) for v in values])
1653
+
1654
+ if self.axis == 0:
1655
+ index = key_index
1656
+ columns = first_not_none.index.copy()
1657
+ if columns.name is None:
1658
+ # GH6124 - propagate name of Series when it's consistent
1659
+ names = {v.name for v in values}
1660
+ if len(names) == 1:
1661
+ columns.name = next(iter(names))
1662
+ else:
1663
+ index = first_not_none.index
1664
+ columns = key_index
1665
+ stacked_values = stacked_values.T
1666
+
1667
+ if stacked_values.dtype == object:
1668
+ # We'll have the DataFrame constructor do inference
1669
+ stacked_values = stacked_values.tolist()
1670
+ result = self.obj._constructor(stacked_values, index=index, columns=columns)
1671
+
1672
+ if not self.as_index:
1673
+ result = self._insert_inaxis_grouper(result)
1674
+
1675
+ return self._reindex_output(result)
1676
+
1677
+ def _cython_transform(
1678
+ self,
1679
+ how: str,
1680
+ numeric_only: bool = False,
1681
+ axis: AxisInt = 0,
1682
+ **kwargs,
1683
+ ) -> DataFrame:
1684
+ assert axis == 0 # handled by caller
1685
+
1686
+ # With self.axis == 0, we have multi-block tests
1687
+ # e.g. test_rank_min_int, test_cython_transform_frame
1688
+ # test_transform_numeric_ret
1689
+ # With self.axis == 1, _get_data_to_aggregate does a transpose
1690
+ # so we always have a single block.
1691
+ mgr: Manager2D = self._get_data_to_aggregate(
1692
+ numeric_only=numeric_only, name=how
1693
+ )
1694
+
1695
+ def arr_func(bvalues: ArrayLike) -> ArrayLike:
1696
+ return self._grouper._cython_operation(
1697
+ "transform", bvalues, how, 1, **kwargs
1698
+ )
1699
+
1700
+ # We could use `mgr.apply` here and not have to set_axis, but
1701
+ # we would have to do shape gymnastics for ArrayManager compat
1702
+ res_mgr = mgr.grouped_reduce(arr_func)
1703
+ res_mgr.set_axis(1, mgr.axes[1])
1704
+
1705
+ res_df = self.obj._constructor_from_mgr(res_mgr, axes=res_mgr.axes)
1706
+ res_df = self._maybe_transpose_result(res_df)
1707
+ return res_df
1708
+
1709
+ def _transform_general(self, func, engine, engine_kwargs, *args, **kwargs):
1710
+ if maybe_use_numba(engine):
1711
+ return self._transform_with_numba(
1712
+ func, *args, engine_kwargs=engine_kwargs, **kwargs
1713
+ )
1714
+ from pandas.core.reshape.concat import concat
1715
+
1716
+ applied = []
1717
+ obj = self._obj_with_exclusions
1718
+ gen = self._grouper.get_iterator(obj, axis=self.axis)
1719
+ fast_path, slow_path = self._define_paths(func, *args, **kwargs)
1720
+
1721
+ # Determine whether to use slow or fast path by evaluating on the first group.
1722
+ # Need to handle the case of an empty generator and process the result so that
1723
+ # it does not need to be computed again.
1724
+ try:
1725
+ name, group = next(gen)
1726
+ except StopIteration:
1727
+ pass
1728
+ else:
1729
+ # 2023-02-27 No tests broken by disabling this pinning
1730
+ object.__setattr__(group, "name", name)
1731
+ try:
1732
+ path, res = self._choose_path(fast_path, slow_path, group)
1733
+ except ValueError as err:
1734
+ # e.g. test_transform_with_non_scalar_group
1735
+ msg = "transform must return a scalar value for each group"
1736
+ raise ValueError(msg) from err
1737
+ if group.size > 0:
1738
+ res = _wrap_transform_general_frame(self.obj, group, res)
1739
+ applied.append(res)
1740
+
1741
+ # Compute and process with the remaining groups
1742
+ for name, group in gen:
1743
+ if group.size == 0:
1744
+ continue
1745
+ # 2023-02-27 No tests broken by disabling this pinning
1746
+ object.__setattr__(group, "name", name)
1747
+ res = path(group)
1748
+
1749
+ res = _wrap_transform_general_frame(self.obj, group, res)
1750
+ applied.append(res)
1751
+
1752
+ concat_index = obj.columns if self.axis == 0 else obj.index
1753
+ other_axis = 1 if self.axis == 0 else 0 # switches between 0 & 1
1754
+ concatenated = concat(applied, axis=self.axis, verify_integrity=False)
1755
+ concatenated = concatenated.reindex(concat_index, axis=other_axis, copy=False)
1756
+ return self._set_result_index_ordered(concatenated)
1757
+
1758
+ __examples_dataframe_doc = dedent(
1759
+ """
1760
+ >>> df = pd.DataFrame({'A' : ['foo', 'bar', 'foo', 'bar',
1761
+ ... 'foo', 'bar'],
1762
+ ... 'B' : ['one', 'one', 'two', 'three',
1763
+ ... 'two', 'two'],
1764
+ ... 'C' : [1, 5, 5, 2, 5, 5],
1765
+ ... 'D' : [2.0, 5., 8., 1., 2., 9.]})
1766
+ >>> grouped = df.groupby('A')[['C', 'D']]
1767
+ >>> grouped.transform(lambda x: (x - x.mean()) / x.std())
1768
+ C D
1769
+ 0 -1.154701 -0.577350
1770
+ 1 0.577350 0.000000
1771
+ 2 0.577350 1.154701
1772
+ 3 -1.154701 -1.000000
1773
+ 4 0.577350 -0.577350
1774
+ 5 0.577350 1.000000
1775
+
1776
+ Broadcast result of the transformation
1777
+
1778
+ >>> grouped.transform(lambda x: x.max() - x.min())
1779
+ C D
1780
+ 0 4.0 6.0
1781
+ 1 3.0 8.0
1782
+ 2 4.0 6.0
1783
+ 3 3.0 8.0
1784
+ 4 4.0 6.0
1785
+ 5 3.0 8.0
1786
+
1787
+ >>> grouped.transform("mean")
1788
+ C D
1789
+ 0 3.666667 4.0
1790
+ 1 4.000000 5.0
1791
+ 2 3.666667 4.0
1792
+ 3 4.000000 5.0
1793
+ 4 3.666667 4.0
1794
+ 5 4.000000 5.0
1795
+
1796
+ .. versionchanged:: 1.3.0
1797
+
1798
+ The resulting dtype will reflect the return value of the passed ``func``,
1799
+ for example:
1800
+
1801
+ >>> grouped.transform(lambda x: x.astype(int).max())
1802
+ C D
1803
+ 0 5 8
1804
+ 1 5 9
1805
+ 2 5 8
1806
+ 3 5 9
1807
+ 4 5 8
1808
+ 5 5 9
1809
+ """
1810
+ )
1811
+
1812
+ @Substitution(klass="DataFrame", example=__examples_dataframe_doc)
1813
+ @Appender(_transform_template)
1814
+ def transform(self, func, *args, engine=None, engine_kwargs=None, **kwargs):
1815
+ return self._transform(
1816
+ func, *args, engine=engine, engine_kwargs=engine_kwargs, **kwargs
1817
+ )
1818
+
1819
+ def _define_paths(self, func, *args, **kwargs):
1820
+ if isinstance(func, str):
1821
+ fast_path = lambda group: getattr(group, func)(*args, **kwargs)
1822
+ slow_path = lambda group: group.apply(
1823
+ lambda x: getattr(x, func)(*args, **kwargs), axis=self.axis
1824
+ )
1825
+ else:
1826
+ fast_path = lambda group: func(group, *args, **kwargs)
1827
+ slow_path = lambda group: group.apply(
1828
+ lambda x: func(x, *args, **kwargs), axis=self.axis
1829
+ )
1830
+ return fast_path, slow_path
1831
+
1832
+ def _choose_path(self, fast_path: Callable, slow_path: Callable, group: DataFrame):
1833
+ path = slow_path
1834
+ res = slow_path(group)
1835
+
1836
+ if self.ngroups == 1:
1837
+ # no need to evaluate multiple paths when only
1838
+ # a single group exists
1839
+ return path, res
1840
+
1841
+ # if we make it here, test if we can use the fast path
1842
+ try:
1843
+ res_fast = fast_path(group)
1844
+ except AssertionError:
1845
+ raise # pragma: no cover
1846
+ except Exception:
1847
+ # GH#29631 For user-defined function, we can't predict what may be
1848
+ # raised; see test_transform.test_transform_fastpath_raises
1849
+ return path, res
1850
+
1851
+ # verify fast path returns either:
1852
+ # a DataFrame with columns equal to group.columns
1853
+ # OR a Series with index equal to group.columns
1854
+ if isinstance(res_fast, DataFrame):
1855
+ if not res_fast.columns.equals(group.columns):
1856
+ return path, res
1857
+ elif isinstance(res_fast, Series):
1858
+ if not res_fast.index.equals(group.columns):
1859
+ return path, res
1860
+ else:
1861
+ return path, res
1862
+
1863
+ if res_fast.equals(res):
1864
+ path = fast_path
1865
+
1866
+ return path, res
1867
+
1868
+ def filter(self, func, dropna: bool = True, *args, **kwargs):
1869
+ """
1870
+ Filter elements from groups that don't satisfy a criterion.
1871
+
1872
+ Elements from groups are filtered if they do not satisfy the
1873
+ boolean criterion specified by func.
1874
+
1875
+ Parameters
1876
+ ----------
1877
+ func : function
1878
+ Criterion to apply to each group. Should return True or False.
1879
+ dropna : bool
1880
+ Drop groups that do not pass the filter. True by default; if False,
1881
+ groups that evaluate False are filled with NaNs.
1882
+
1883
+ Returns
1884
+ -------
1885
+ DataFrame
1886
+
1887
+ Notes
1888
+ -----
1889
+ Each subframe is endowed the attribute 'name' in case you need to know
1890
+ which group you are working on.
1891
+
1892
+ Functions that mutate the passed object can produce unexpected
1893
+ behavior or errors and are not supported. See :ref:`gotchas.udf-mutation`
1894
+ for more details.
1895
+
1896
+ Examples
1897
+ --------
1898
+ >>> df = pd.DataFrame({'A' : ['foo', 'bar', 'foo', 'bar',
1899
+ ... 'foo', 'bar'],
1900
+ ... 'B' : [1, 2, 3, 4, 5, 6],
1901
+ ... 'C' : [2.0, 5., 8., 1., 2., 9.]})
1902
+ >>> grouped = df.groupby('A')
1903
+ >>> grouped.filter(lambda x: x['B'].mean() > 3.)
1904
+ A B C
1905
+ 1 bar 2 5.0
1906
+ 3 bar 4 1.0
1907
+ 5 bar 6 9.0
1908
+ """
1909
+ indices = []
1910
+
1911
+ obj = self._selected_obj
1912
+ gen = self._grouper.get_iterator(obj, axis=self.axis)
1913
+
1914
+ for name, group in gen:
1915
+ # 2023-02-27 no tests are broken this pinning, but it is documented in the
1916
+ # docstring above.
1917
+ object.__setattr__(group, "name", name)
1918
+
1919
+ res = func(group, *args, **kwargs)
1920
+
1921
+ try:
1922
+ res = res.squeeze()
1923
+ except AttributeError: # allow e.g., scalars and frames to pass
1924
+ pass
1925
+
1926
+ # interpret the result of the filter
1927
+ if is_bool(res) or (is_scalar(res) and isna(res)):
1928
+ if notna(res) and res:
1929
+ indices.append(self._get_index(name))
1930
+ else:
1931
+ # non scalars aren't allowed
1932
+ raise TypeError(
1933
+ f"filter function returned a {type(res).__name__}, "
1934
+ "but expected a scalar bool"
1935
+ )
1936
+
1937
+ return self._apply_filter(indices, dropna)
1938
+
1939
+ def __getitem__(self, key) -> DataFrameGroupBy | SeriesGroupBy:
1940
+ if self.axis == 1:
1941
+ # GH 37725
1942
+ raise ValueError("Cannot subset columns when using axis=1")
1943
+ # per GH 23566
1944
+ if isinstance(key, tuple) and len(key) > 1:
1945
+ # if len == 1, then it becomes a SeriesGroupBy and this is actually
1946
+ # valid syntax, so don't raise
1947
+ raise ValueError(
1948
+ "Cannot subset columns with a tuple with more than one element. "
1949
+ "Use a list instead."
1950
+ )
1951
+ return super().__getitem__(key)
1952
+
1953
+ def _gotitem(self, key, ndim: int, subset=None):
1954
+ """
1955
+ sub-classes to define
1956
+ return a sliced object
1957
+
1958
+ Parameters
1959
+ ----------
1960
+ key : string / list of selections
1961
+ ndim : {1, 2}
1962
+ requested ndim of result
1963
+ subset : object, default None
1964
+ subset to act on
1965
+ """
1966
+ if ndim == 2:
1967
+ if subset is None:
1968
+ subset = self.obj
1969
+ return DataFrameGroupBy(
1970
+ subset,
1971
+ self.keys,
1972
+ axis=self.axis,
1973
+ level=self.level,
1974
+ grouper=self._grouper,
1975
+ exclusions=self.exclusions,
1976
+ selection=key,
1977
+ as_index=self.as_index,
1978
+ sort=self.sort,
1979
+ group_keys=self.group_keys,
1980
+ observed=self.observed,
1981
+ dropna=self.dropna,
1982
+ )
1983
+ elif ndim == 1:
1984
+ if subset is None:
1985
+ subset = self.obj[key]
1986
+ return SeriesGroupBy(
1987
+ subset,
1988
+ self.keys,
1989
+ level=self.level,
1990
+ grouper=self._grouper,
1991
+ exclusions=self.exclusions,
1992
+ selection=key,
1993
+ as_index=self.as_index,
1994
+ sort=self.sort,
1995
+ group_keys=self.group_keys,
1996
+ observed=self.observed,
1997
+ dropna=self.dropna,
1998
+ )
1999
+
2000
+ raise AssertionError("invalid ndim for _gotitem")
2001
+
2002
+ def _get_data_to_aggregate(
2003
+ self, *, numeric_only: bool = False, name: str | None = None
2004
+ ) -> Manager2D:
2005
+ obj = self._obj_with_exclusions
2006
+ if self.axis == 1:
2007
+ mgr = obj.T._mgr
2008
+ else:
2009
+ mgr = obj._mgr
2010
+
2011
+ if numeric_only:
2012
+ mgr = mgr.get_numeric_data()
2013
+ return mgr
2014
+
2015
+ def _wrap_agged_manager(self, mgr: Manager2D) -> DataFrame:
2016
+ return self.obj._constructor_from_mgr(mgr, axes=mgr.axes)
2017
+
2018
+ def _apply_to_column_groupbys(self, func) -> DataFrame:
2019
+ from pandas.core.reshape.concat import concat
2020
+
2021
+ obj = self._obj_with_exclusions
2022
+ columns = obj.columns
2023
+ sgbs = [
2024
+ SeriesGroupBy(
2025
+ obj.iloc[:, i],
2026
+ selection=colname,
2027
+ grouper=self._grouper,
2028
+ exclusions=self.exclusions,
2029
+ observed=self.observed,
2030
+ )
2031
+ for i, colname in enumerate(obj.columns)
2032
+ ]
2033
+ results = [func(sgb) for sgb in sgbs]
2034
+
2035
+ if not len(results):
2036
+ # concat would raise
2037
+ res_df = DataFrame([], columns=columns, index=self._grouper.result_index)
2038
+ else:
2039
+ res_df = concat(results, keys=columns, axis=1)
2040
+
2041
+ if not self.as_index:
2042
+ res_df.index = default_index(len(res_df))
2043
+ res_df = self._insert_inaxis_grouper(res_df)
2044
+ return res_df
2045
+
2046
+ def nunique(self, dropna: bool = True) -> DataFrame:
2047
+ """
2048
+ Return DataFrame with counts of unique elements in each position.
2049
+
2050
+ Parameters
2051
+ ----------
2052
+ dropna : bool, default True
2053
+ Don't include NaN in the counts.
2054
+
2055
+ Returns
2056
+ -------
2057
+ nunique: DataFrame
2058
+
2059
+ Examples
2060
+ --------
2061
+ >>> df = pd.DataFrame({'id': ['spam', 'egg', 'egg', 'spam',
2062
+ ... 'ham', 'ham'],
2063
+ ... 'value1': [1, 5, 5, 2, 5, 5],
2064
+ ... 'value2': list('abbaxy')})
2065
+ >>> df
2066
+ id value1 value2
2067
+ 0 spam 1 a
2068
+ 1 egg 5 b
2069
+ 2 egg 5 b
2070
+ 3 spam 2 a
2071
+ 4 ham 5 x
2072
+ 5 ham 5 y
2073
+
2074
+ >>> df.groupby('id').nunique()
2075
+ value1 value2
2076
+ id
2077
+ egg 1 1
2078
+ ham 1 2
2079
+ spam 2 1
2080
+
2081
+ Check for rows with the same id but conflicting values:
2082
+
2083
+ >>> df.groupby('id').filter(lambda g: (g.nunique() > 1).any())
2084
+ id value1 value2
2085
+ 0 spam 1 a
2086
+ 3 spam 2 a
2087
+ 4 ham 5 x
2088
+ 5 ham 5 y
2089
+ """
2090
+
2091
+ if self.axis != 0:
2092
+ # see test_groupby_crash_on_nunique
2093
+ return self._python_apply_general(
2094
+ lambda sgb: sgb.nunique(dropna), self._obj_with_exclusions, is_agg=True
2095
+ )
2096
+
2097
+ return self._apply_to_column_groupbys(lambda sgb: sgb.nunique(dropna))
2098
+
2099
+ def idxmax(
2100
+ self,
2101
+ axis: Axis | None | lib.NoDefault = lib.no_default,
2102
+ skipna: bool = True,
2103
+ numeric_only: bool = False,
2104
+ ) -> DataFrame:
2105
+ """
2106
+ Return index of first occurrence of maximum over requested axis.
2107
+
2108
+ NA/null values are excluded.
2109
+
2110
+ Parameters
2111
+ ----------
2112
+ axis : {{0 or 'index', 1 or 'columns'}}, default None
2113
+ The axis to use. 0 or 'index' for row-wise, 1 or 'columns' for column-wise.
2114
+ If axis is not provided, grouper's axis is used.
2115
+
2116
+ .. versionchanged:: 2.0.0
2117
+
2118
+ .. deprecated:: 2.1.0
2119
+ For axis=1, operate on the underlying object instead. Otherwise
2120
+ the axis keyword is not necessary.
2121
+
2122
+ skipna : bool, default True
2123
+ Exclude NA/null values. If an entire row/column is NA, the result
2124
+ will be NA.
2125
+ numeric_only : bool, default False
2126
+ Include only `float`, `int` or `boolean` data.
2127
+
2128
+ .. versionadded:: 1.5.0
2129
+
2130
+ Returns
2131
+ -------
2132
+ Series
2133
+ Indexes of maxima along the specified axis.
2134
+
2135
+ Raises
2136
+ ------
2137
+ ValueError
2138
+ * If the row/column is empty
2139
+
2140
+ See Also
2141
+ --------
2142
+ Series.idxmax : Return index of the maximum element.
2143
+
2144
+ Notes
2145
+ -----
2146
+ This method is the DataFrame version of ``ndarray.argmax``.
2147
+
2148
+ Examples
2149
+ --------
2150
+ Consider a dataset containing food consumption in Argentina.
2151
+
2152
+ >>> df = pd.DataFrame({'consumption': [10.51, 103.11, 55.48],
2153
+ ... 'co2_emissions': [37.2, 19.66, 1712]},
2154
+ ... index=['Pork', 'Wheat Products', 'Beef'])
2155
+
2156
+ >>> df
2157
+ consumption co2_emissions
2158
+ Pork 10.51 37.20
2159
+ Wheat Products 103.11 19.66
2160
+ Beef 55.48 1712.00
2161
+
2162
+ By default, it returns the index for the maximum value in each column.
2163
+
2164
+ >>> df.idxmax()
2165
+ consumption Wheat Products
2166
+ co2_emissions Beef
2167
+ dtype: object
2168
+
2169
+ To return the index for the maximum value in each row, use ``axis="columns"``.
2170
+
2171
+ >>> df.idxmax(axis="columns")
2172
+ Pork co2_emissions
2173
+ Wheat Products consumption
2174
+ Beef co2_emissions
2175
+ dtype: object
2176
+ """
2177
+ return self._idxmax_idxmin(
2178
+ "idxmax", axis=axis, numeric_only=numeric_only, skipna=skipna
2179
+ )
2180
+
2181
+ def idxmin(
2182
+ self,
2183
+ axis: Axis | None | lib.NoDefault = lib.no_default,
2184
+ skipna: bool = True,
2185
+ numeric_only: bool = False,
2186
+ ) -> DataFrame:
2187
+ """
2188
+ Return index of first occurrence of minimum over requested axis.
2189
+
2190
+ NA/null values are excluded.
2191
+
2192
+ Parameters
2193
+ ----------
2194
+ axis : {{0 or 'index', 1 or 'columns'}}, default None
2195
+ The axis to use. 0 or 'index' for row-wise, 1 or 'columns' for column-wise.
2196
+ If axis is not provided, grouper's axis is used.
2197
+
2198
+ .. versionchanged:: 2.0.0
2199
+
2200
+ .. deprecated:: 2.1.0
2201
+ For axis=1, operate on the underlying object instead. Otherwise
2202
+ the axis keyword is not necessary.
2203
+
2204
+ skipna : bool, default True
2205
+ Exclude NA/null values. If an entire row/column is NA, the result
2206
+ will be NA.
2207
+ numeric_only : bool, default False
2208
+ Include only `float`, `int` or `boolean` data.
2209
+
2210
+ .. versionadded:: 1.5.0
2211
+
2212
+ Returns
2213
+ -------
2214
+ Series
2215
+ Indexes of minima along the specified axis.
2216
+
2217
+ Raises
2218
+ ------
2219
+ ValueError
2220
+ * If the row/column is empty
2221
+
2222
+ See Also
2223
+ --------
2224
+ Series.idxmin : Return index of the minimum element.
2225
+
2226
+ Notes
2227
+ -----
2228
+ This method is the DataFrame version of ``ndarray.argmin``.
2229
+
2230
+ Examples
2231
+ --------
2232
+ Consider a dataset containing food consumption in Argentina.
2233
+
2234
+ >>> df = pd.DataFrame({'consumption': [10.51, 103.11, 55.48],
2235
+ ... 'co2_emissions': [37.2, 19.66, 1712]},
2236
+ ... index=['Pork', 'Wheat Products', 'Beef'])
2237
+
2238
+ >>> df
2239
+ consumption co2_emissions
2240
+ Pork 10.51 37.20
2241
+ Wheat Products 103.11 19.66
2242
+ Beef 55.48 1712.00
2243
+
2244
+ By default, it returns the index for the minimum value in each column.
2245
+
2246
+ >>> df.idxmin()
2247
+ consumption Pork
2248
+ co2_emissions Wheat Products
2249
+ dtype: object
2250
+
2251
+ To return the index for the minimum value in each row, use ``axis="columns"``.
2252
+
2253
+ >>> df.idxmin(axis="columns")
2254
+ Pork consumption
2255
+ Wheat Products co2_emissions
2256
+ Beef consumption
2257
+ dtype: object
2258
+ """
2259
+ return self._idxmax_idxmin(
2260
+ "idxmin", axis=axis, numeric_only=numeric_only, skipna=skipna
2261
+ )
2262
+
2263
+ boxplot = boxplot_frame_groupby
2264
+
2265
+ def value_counts(
2266
+ self,
2267
+ subset: Sequence[Hashable] | None = None,
2268
+ normalize: bool = False,
2269
+ sort: bool = True,
2270
+ ascending: bool = False,
2271
+ dropna: bool = True,
2272
+ ) -> DataFrame | Series:
2273
+ """
2274
+ Return a Series or DataFrame containing counts of unique rows.
2275
+
2276
+ .. versionadded:: 1.4.0
2277
+
2278
+ Parameters
2279
+ ----------
2280
+ subset : list-like, optional
2281
+ Columns to use when counting unique combinations.
2282
+ normalize : bool, default False
2283
+ Return proportions rather than frequencies.
2284
+ sort : bool, default True
2285
+ Sort by frequencies.
2286
+ ascending : bool, default False
2287
+ Sort in ascending order.
2288
+ dropna : bool, default True
2289
+ Don't include counts of rows that contain NA values.
2290
+
2291
+ Returns
2292
+ -------
2293
+ Series or DataFrame
2294
+ Series if the groupby as_index is True, otherwise DataFrame.
2295
+
2296
+ See Also
2297
+ --------
2298
+ Series.value_counts: Equivalent method on Series.
2299
+ DataFrame.value_counts: Equivalent method on DataFrame.
2300
+ SeriesGroupBy.value_counts: Equivalent method on SeriesGroupBy.
2301
+
2302
+ Notes
2303
+ -----
2304
+ - If the groupby as_index is True then the returned Series will have a
2305
+ MultiIndex with one level per input column.
2306
+ - If the groupby as_index is False then the returned DataFrame will have an
2307
+ additional column with the value_counts. The column is labelled 'count' or
2308
+ 'proportion', depending on the ``normalize`` parameter.
2309
+
2310
+ By default, rows that contain any NA values are omitted from
2311
+ the result.
2312
+
2313
+ By default, the result will be in descending order so that the
2314
+ first element of each group is the most frequently-occurring row.
2315
+
2316
+ Examples
2317
+ --------
2318
+ >>> df = pd.DataFrame({
2319
+ ... 'gender': ['male', 'male', 'female', 'male', 'female', 'male'],
2320
+ ... 'education': ['low', 'medium', 'high', 'low', 'high', 'low'],
2321
+ ... 'country': ['US', 'FR', 'US', 'FR', 'FR', 'FR']
2322
+ ... })
2323
+
2324
+ >>> df
2325
+ gender education country
2326
+ 0 male low US
2327
+ 1 male medium FR
2328
+ 2 female high US
2329
+ 3 male low FR
2330
+ 4 female high FR
2331
+ 5 male low FR
2332
+
2333
+ >>> df.groupby('gender').value_counts()
2334
+ gender education country
2335
+ female high FR 1
2336
+ US 1
2337
+ male low FR 2
2338
+ US 1
2339
+ medium FR 1
2340
+ Name: count, dtype: int64
2341
+
2342
+ >>> df.groupby('gender').value_counts(ascending=True)
2343
+ gender education country
2344
+ female high FR 1
2345
+ US 1
2346
+ male low US 1
2347
+ medium FR 1
2348
+ low FR 2
2349
+ Name: count, dtype: int64
2350
+
2351
+ >>> df.groupby('gender').value_counts(normalize=True)
2352
+ gender education country
2353
+ female high FR 0.50
2354
+ US 0.50
2355
+ male low FR 0.50
2356
+ US 0.25
2357
+ medium FR 0.25
2358
+ Name: proportion, dtype: float64
2359
+
2360
+ >>> df.groupby('gender', as_index=False).value_counts()
2361
+ gender education country count
2362
+ 0 female high FR 1
2363
+ 1 female high US 1
2364
+ 2 male low FR 2
2365
+ 3 male low US 1
2366
+ 4 male medium FR 1
2367
+
2368
+ >>> df.groupby('gender', as_index=False).value_counts(normalize=True)
2369
+ gender education country proportion
2370
+ 0 female high FR 0.50
2371
+ 1 female high US 0.50
2372
+ 2 male low FR 0.50
2373
+ 3 male low US 0.25
2374
+ 4 male medium FR 0.25
2375
+ """
2376
+ return self._value_counts(subset, normalize, sort, ascending, dropna)
2377
+
2378
+ def fillna(
2379
+ self,
2380
+ value: Hashable | Mapping | Series | DataFrame | None = None,
2381
+ method: FillnaOptions | None = None,
2382
+ axis: Axis | None | lib.NoDefault = lib.no_default,
2383
+ inplace: bool = False,
2384
+ limit: int | None = None,
2385
+ downcast=lib.no_default,
2386
+ ) -> DataFrame | None:
2387
+ """
2388
+ Fill NA/NaN values using the specified method within groups.
2389
+
2390
+ .. deprecated:: 2.2.0
2391
+ This method is deprecated and will be removed in a future version.
2392
+ Use the :meth:`.DataFrameGroupBy.ffill` or :meth:`.DataFrameGroupBy.bfill`
2393
+ for forward or backward filling instead. If you want to fill with a
2394
+ single value, use :meth:`DataFrame.fillna` instead.
2395
+
2396
+ Parameters
2397
+ ----------
2398
+ value : scalar, dict, Series, or DataFrame
2399
+ Value to use to fill holes (e.g. 0), alternately a
2400
+ dict/Series/DataFrame of values specifying which value to use for
2401
+ each index (for a Series) or column (for a DataFrame). Values not
2402
+ in the dict/Series/DataFrame will not be filled. This value cannot
2403
+ be a list. Users wanting to use the ``value`` argument and not ``method``
2404
+ should prefer :meth:`.DataFrame.fillna` as this
2405
+ will produce the same result and be more performant.
2406
+ method : {{'bfill', 'ffill', None}}, default None
2407
+ Method to use for filling holes. ``'ffill'`` will propagate
2408
+ the last valid observation forward within a group.
2409
+ ``'bfill'`` will use next valid observation to fill the gap.
2410
+ axis : {0 or 'index', 1 or 'columns'}
2411
+ Axis along which to fill missing values. When the :class:`DataFrameGroupBy`
2412
+ ``axis`` argument is ``0``, using ``axis=1`` here will produce
2413
+ the same results as :meth:`.DataFrame.fillna`. When the
2414
+ :class:`DataFrameGroupBy` ``axis`` argument is ``1``, using ``axis=0``
2415
+ or ``axis=1`` here will produce the same results.
2416
+ inplace : bool, default False
2417
+ Broken. Do not set to True.
2418
+ limit : int, default None
2419
+ If method is specified, this is the maximum number of consecutive
2420
+ NaN values to forward/backward fill within a group. In other words,
2421
+ if there is a gap with more than this number of consecutive NaNs,
2422
+ it will only be partially filled. If method is not specified, this is the
2423
+ maximum number of entries along the entire axis where NaNs will be
2424
+ filled. Must be greater than 0 if not None.
2425
+ downcast : dict, default is None
2426
+ A dict of item->dtype of what to downcast if possible,
2427
+ or the string 'infer' which will try to downcast to an appropriate
2428
+ equal type (e.g. float64 to int64 if possible).
2429
+
2430
+ Returns
2431
+ -------
2432
+ DataFrame
2433
+ Object with missing values filled.
2434
+
2435
+ See Also
2436
+ --------
2437
+ ffill : Forward fill values within a group.
2438
+ bfill : Backward fill values within a group.
2439
+
2440
+ Examples
2441
+ --------
2442
+ >>> df = pd.DataFrame(
2443
+ ... {
2444
+ ... "key": [0, 0, 1, 1, 1],
2445
+ ... "A": [np.nan, 2, np.nan, 3, np.nan],
2446
+ ... "B": [2, 3, np.nan, np.nan, np.nan],
2447
+ ... "C": [np.nan, np.nan, 2, np.nan, np.nan],
2448
+ ... }
2449
+ ... )
2450
+ >>> df
2451
+ key A B C
2452
+ 0 0 NaN 2.0 NaN
2453
+ 1 0 2.0 3.0 NaN
2454
+ 2 1 NaN NaN 2.0
2455
+ 3 1 3.0 NaN NaN
2456
+ 4 1 NaN NaN NaN
2457
+
2458
+ Propagate non-null values forward or backward within each group along columns.
2459
+
2460
+ >>> df.groupby("key").fillna(method="ffill")
2461
+ A B C
2462
+ 0 NaN 2.0 NaN
2463
+ 1 2.0 3.0 NaN
2464
+ 2 NaN NaN 2.0
2465
+ 3 3.0 NaN 2.0
2466
+ 4 3.0 NaN 2.0
2467
+
2468
+ >>> df.groupby("key").fillna(method="bfill")
2469
+ A B C
2470
+ 0 2.0 2.0 NaN
2471
+ 1 2.0 3.0 NaN
2472
+ 2 3.0 NaN 2.0
2473
+ 3 3.0 NaN NaN
2474
+ 4 NaN NaN NaN
2475
+
2476
+ Propagate non-null values forward or backward within each group along rows.
2477
+
2478
+ >>> df.T.groupby(np.array([0, 0, 1, 1])).fillna(method="ffill").T
2479
+ key A B C
2480
+ 0 0.0 0.0 2.0 2.0
2481
+ 1 0.0 2.0 3.0 3.0
2482
+ 2 1.0 1.0 NaN 2.0
2483
+ 3 1.0 3.0 NaN NaN
2484
+ 4 1.0 1.0 NaN NaN
2485
+
2486
+ >>> df.T.groupby(np.array([0, 0, 1, 1])).fillna(method="bfill").T
2487
+ key A B C
2488
+ 0 0.0 NaN 2.0 NaN
2489
+ 1 0.0 2.0 3.0 NaN
2490
+ 2 1.0 NaN 2.0 2.0
2491
+ 3 1.0 3.0 NaN NaN
2492
+ 4 1.0 NaN NaN NaN
2493
+
2494
+ Only replace the first NaN element within a group along rows.
2495
+
2496
+ >>> df.groupby("key").fillna(method="ffill", limit=1)
2497
+ A B C
2498
+ 0 NaN 2.0 NaN
2499
+ 1 2.0 3.0 NaN
2500
+ 2 NaN NaN 2.0
2501
+ 3 3.0 NaN 2.0
2502
+ 4 3.0 NaN NaN
2503
+ """
2504
+ warnings.warn(
2505
+ f"{type(self).__name__}.fillna is deprecated and "
2506
+ "will be removed in a future version. Use obj.ffill() or obj.bfill() "
2507
+ "for forward or backward filling instead. If you want to fill with a "
2508
+ f"single value, use {type(self.obj).__name__}.fillna instead",
2509
+ FutureWarning,
2510
+ stacklevel=find_stack_level(),
2511
+ )
2512
+
2513
+ result = self._op_via_apply(
2514
+ "fillna",
2515
+ value=value,
2516
+ method=method,
2517
+ axis=axis,
2518
+ inplace=inplace,
2519
+ limit=limit,
2520
+ downcast=downcast,
2521
+ )
2522
+ return result
2523
+
2524
+ def take(
2525
+ self,
2526
+ indices: TakeIndexer,
2527
+ axis: Axis | None | lib.NoDefault = lib.no_default,
2528
+ **kwargs,
2529
+ ) -> DataFrame:
2530
+ """
2531
+ Return the elements in the given *positional* indices in each group.
2532
+
2533
+ This means that we are not indexing according to actual values in
2534
+ the index attribute of the object. We are indexing according to the
2535
+ actual position of the element in the object.
2536
+
2537
+ If a requested index does not exist for some group, this method will raise.
2538
+ To get similar behavior that ignores indices that don't exist, see
2539
+ :meth:`.DataFrameGroupBy.nth`.
2540
+
2541
+ Parameters
2542
+ ----------
2543
+ indices : array-like
2544
+ An array of ints indicating which positions to take.
2545
+ axis : {0 or 'index', 1 or 'columns', None}, default 0
2546
+ The axis on which to select elements. ``0`` means that we are
2547
+ selecting rows, ``1`` means that we are selecting columns.
2548
+
2549
+ .. deprecated:: 2.1.0
2550
+ For axis=1, operate on the underlying object instead. Otherwise
2551
+ the axis keyword is not necessary.
2552
+
2553
+ **kwargs
2554
+ For compatibility with :meth:`numpy.take`. Has no effect on the
2555
+ output.
2556
+
2557
+ Returns
2558
+ -------
2559
+ DataFrame
2560
+ An DataFrame containing the elements taken from each group.
2561
+
2562
+ See Also
2563
+ --------
2564
+ DataFrame.take : Take elements from a Series along an axis.
2565
+ DataFrame.loc : Select a subset of a DataFrame by labels.
2566
+ DataFrame.iloc : Select a subset of a DataFrame by positions.
2567
+ numpy.take : Take elements from an array along an axis.
2568
+
2569
+ Examples
2570
+ --------
2571
+ >>> df = pd.DataFrame([('falcon', 'bird', 389.0),
2572
+ ... ('parrot', 'bird', 24.0),
2573
+ ... ('lion', 'mammal', 80.5),
2574
+ ... ('monkey', 'mammal', np.nan),
2575
+ ... ('rabbit', 'mammal', 15.0)],
2576
+ ... columns=['name', 'class', 'max_speed'],
2577
+ ... index=[4, 3, 2, 1, 0])
2578
+ >>> df
2579
+ name class max_speed
2580
+ 4 falcon bird 389.0
2581
+ 3 parrot bird 24.0
2582
+ 2 lion mammal 80.5
2583
+ 1 monkey mammal NaN
2584
+ 0 rabbit mammal 15.0
2585
+ >>> gb = df.groupby([1, 1, 2, 2, 2])
2586
+
2587
+ Take elements at positions 0 and 1 along the axis 0 (default).
2588
+
2589
+ Note how the indices selected in the result do not correspond to
2590
+ our input indices 0 and 1. That's because we are selecting the 0th
2591
+ and 1st rows, not rows whose indices equal 0 and 1.
2592
+
2593
+ >>> gb.take([0, 1])
2594
+ name class max_speed
2595
+ 1 4 falcon bird 389.0
2596
+ 3 parrot bird 24.0
2597
+ 2 2 lion mammal 80.5
2598
+ 1 monkey mammal NaN
2599
+
2600
+ The order of the specified indices influences the order in the result.
2601
+ Here, the order is swapped from the previous example.
2602
+
2603
+ >>> gb.take([1, 0])
2604
+ name class max_speed
2605
+ 1 3 parrot bird 24.0
2606
+ 4 falcon bird 389.0
2607
+ 2 1 monkey mammal NaN
2608
+ 2 lion mammal 80.5
2609
+
2610
+ Take elements at indices 1 and 2 along the axis 1 (column selection).
2611
+
2612
+ We may take elements using negative integers for positive indices,
2613
+ starting from the end of the object, just like with Python lists.
2614
+
2615
+ >>> gb.take([-1, -2])
2616
+ name class max_speed
2617
+ 1 3 parrot bird 24.0
2618
+ 4 falcon bird 389.0
2619
+ 2 0 rabbit mammal 15.0
2620
+ 1 monkey mammal NaN
2621
+ """
2622
+ result = self._op_via_apply("take", indices=indices, axis=axis, **kwargs)
2623
+ return result
2624
+
2625
+ def skew(
2626
+ self,
2627
+ axis: Axis | None | lib.NoDefault = lib.no_default,
2628
+ skipna: bool = True,
2629
+ numeric_only: bool = False,
2630
+ **kwargs,
2631
+ ) -> DataFrame:
2632
+ """
2633
+ Return unbiased skew within groups.
2634
+
2635
+ Normalized by N-1.
2636
+
2637
+ Parameters
2638
+ ----------
2639
+ axis : {0 or 'index', 1 or 'columns', None}, default 0
2640
+ Axis for the function to be applied on.
2641
+
2642
+ Specifying ``axis=None`` will apply the aggregation across both axes.
2643
+
2644
+ .. versionadded:: 2.0.0
2645
+
2646
+ .. deprecated:: 2.1.0
2647
+ For axis=1, operate on the underlying object instead. Otherwise
2648
+ the axis keyword is not necessary.
2649
+
2650
+ skipna : bool, default True
2651
+ Exclude NA/null values when computing the result.
2652
+
2653
+ numeric_only : bool, default False
2654
+ Include only float, int, boolean columns.
2655
+
2656
+ **kwargs
2657
+ Additional keyword arguments to be passed to the function.
2658
+
2659
+ Returns
2660
+ -------
2661
+ DataFrame
2662
+
2663
+ See Also
2664
+ --------
2665
+ DataFrame.skew : Return unbiased skew over requested axis.
2666
+
2667
+ Examples
2668
+ --------
2669
+ >>> arrays = [['falcon', 'parrot', 'cockatoo', 'kiwi',
2670
+ ... 'lion', 'monkey', 'rabbit'],
2671
+ ... ['bird', 'bird', 'bird', 'bird',
2672
+ ... 'mammal', 'mammal', 'mammal']]
2673
+ >>> index = pd.MultiIndex.from_arrays(arrays, names=('name', 'class'))
2674
+ >>> df = pd.DataFrame({'max_speed': [389.0, 24.0, 70.0, np.nan,
2675
+ ... 80.5, 21.5, 15.0]},
2676
+ ... index=index)
2677
+ >>> df
2678
+ max_speed
2679
+ name class
2680
+ falcon bird 389.0
2681
+ parrot bird 24.0
2682
+ cockatoo bird 70.0
2683
+ kiwi bird NaN
2684
+ lion mammal 80.5
2685
+ monkey mammal 21.5
2686
+ rabbit mammal 15.0
2687
+ >>> gb = df.groupby(["class"])
2688
+ >>> gb.skew()
2689
+ max_speed
2690
+ class
2691
+ bird 1.628296
2692
+ mammal 1.669046
2693
+ >>> gb.skew(skipna=False)
2694
+ max_speed
2695
+ class
2696
+ bird NaN
2697
+ mammal 1.669046
2698
+ """
2699
+ if axis is lib.no_default:
2700
+ axis = 0
2701
+
2702
+ if axis != 0:
2703
+ result = self._op_via_apply(
2704
+ "skew",
2705
+ axis=axis,
2706
+ skipna=skipna,
2707
+ numeric_only=numeric_only,
2708
+ **kwargs,
2709
+ )
2710
+ return result
2711
+
2712
+ def alt(obj):
2713
+ # This should not be reached since the cython path should raise
2714
+ # TypeError and not NotImplementedError.
2715
+ raise TypeError(f"'skew' is not supported for dtype={obj.dtype}")
2716
+
2717
+ return self._cython_agg_general(
2718
+ "skew", alt=alt, skipna=skipna, numeric_only=numeric_only, **kwargs
2719
+ )
2720
+
2721
+ @property
2722
+ @doc(DataFrame.plot.__doc__)
2723
+ def plot(self) -> GroupByPlot:
2724
+ result = GroupByPlot(self)
2725
+ return result
2726
+
2727
+ @doc(DataFrame.corr.__doc__)
2728
+ def corr(
2729
+ self,
2730
+ method: str | Callable[[np.ndarray, np.ndarray], float] = "pearson",
2731
+ min_periods: int = 1,
2732
+ numeric_only: bool = False,
2733
+ ) -> DataFrame:
2734
+ result = self._op_via_apply(
2735
+ "corr", method=method, min_periods=min_periods, numeric_only=numeric_only
2736
+ )
2737
+ return result
2738
+
2739
+ @doc(DataFrame.cov.__doc__)
2740
+ def cov(
2741
+ self,
2742
+ min_periods: int | None = None,
2743
+ ddof: int | None = 1,
2744
+ numeric_only: bool = False,
2745
+ ) -> DataFrame:
2746
+ result = self._op_via_apply(
2747
+ "cov", min_periods=min_periods, ddof=ddof, numeric_only=numeric_only
2748
+ )
2749
+ return result
2750
+
2751
+ @doc(DataFrame.hist.__doc__)
2752
+ def hist(
2753
+ self,
2754
+ column: IndexLabel | None = None,
2755
+ by=None,
2756
+ grid: bool = True,
2757
+ xlabelsize: int | None = None,
2758
+ xrot: float | None = None,
2759
+ ylabelsize: int | None = None,
2760
+ yrot: float | None = None,
2761
+ ax=None,
2762
+ sharex: bool = False,
2763
+ sharey: bool = False,
2764
+ figsize: tuple[int, int] | None = None,
2765
+ layout: tuple[int, int] | None = None,
2766
+ bins: int | Sequence[int] = 10,
2767
+ backend: str | None = None,
2768
+ legend: bool = False,
2769
+ **kwargs,
2770
+ ):
2771
+ result = self._op_via_apply(
2772
+ "hist",
2773
+ column=column,
2774
+ by=by,
2775
+ grid=grid,
2776
+ xlabelsize=xlabelsize,
2777
+ xrot=xrot,
2778
+ ylabelsize=ylabelsize,
2779
+ yrot=yrot,
2780
+ ax=ax,
2781
+ sharex=sharex,
2782
+ sharey=sharey,
2783
+ figsize=figsize,
2784
+ layout=layout,
2785
+ bins=bins,
2786
+ backend=backend,
2787
+ legend=legend,
2788
+ **kwargs,
2789
+ )
2790
+ return result
2791
+
2792
+ @property
2793
+ @doc(DataFrame.dtypes.__doc__)
2794
+ def dtypes(self) -> Series:
2795
+ # GH#51045
2796
+ warnings.warn(
2797
+ f"{type(self).__name__}.dtypes is deprecated and will be removed in "
2798
+ "a future version. Check the dtypes on the base object instead",
2799
+ FutureWarning,
2800
+ stacklevel=find_stack_level(),
2801
+ )
2802
+
2803
+ # error: Incompatible return value type (got "DataFrame", expected "Series")
2804
+ return self._python_apply_general( # type: ignore[return-value]
2805
+ lambda df: df.dtypes, self._selected_obj
2806
+ )
2807
+
2808
+ @doc(DataFrame.corrwith.__doc__)
2809
+ def corrwith(
2810
+ self,
2811
+ other: DataFrame | Series,
2812
+ axis: Axis | lib.NoDefault = lib.no_default,
2813
+ drop: bool = False,
2814
+ method: CorrelationMethod = "pearson",
2815
+ numeric_only: bool = False,
2816
+ ) -> DataFrame:
2817
+ result = self._op_via_apply(
2818
+ "corrwith",
2819
+ other=other,
2820
+ axis=axis,
2821
+ drop=drop,
2822
+ method=method,
2823
+ numeric_only=numeric_only,
2824
+ )
2825
+ return result
2826
+
2827
+
2828
+ def _wrap_transform_general_frame(
2829
+ obj: DataFrame, group: DataFrame, res: DataFrame | Series
2830
+ ) -> DataFrame:
2831
+ from pandas import concat
2832
+
2833
+ if isinstance(res, Series):
2834
+ # we need to broadcast across the
2835
+ # other dimension; this will preserve dtypes
2836
+ # GH14457
2837
+ if res.index.is_(obj.index):
2838
+ res_frame = concat([res] * len(group.columns), axis=1)
2839
+ res_frame.columns = group.columns
2840
+ res_frame.index = group.index
2841
+ else:
2842
+ res_frame = obj._constructor(
2843
+ np.tile(res.values, (len(group.index), 1)),
2844
+ columns=group.columns,
2845
+ index=group.index,
2846
+ )
2847
+ assert isinstance(res_frame, DataFrame)
2848
+ return res_frame
2849
+ elif isinstance(res, DataFrame) and not res.index.is_(group.index):
2850
+ return res._align_frame(group)[0]
2851
+ else:
2852
+ return res
venv/lib/python3.10/site-packages/pandas/core/groupby/groupby.py ADDED
The diff for this file is too large to render. See raw diff
 
venv/lib/python3.10/site-packages/pandas/core/groupby/grouper.py ADDED
@@ -0,0 +1,1102 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ Provide user facing operators for doing the split part of the
3
+ split-apply-combine paradigm.
4
+ """
5
+ from __future__ import annotations
6
+
7
+ from typing import (
8
+ TYPE_CHECKING,
9
+ final,
10
+ )
11
+ import warnings
12
+
13
+ import numpy as np
14
+
15
+ from pandas._config import (
16
+ using_copy_on_write,
17
+ warn_copy_on_write,
18
+ )
19
+
20
+ from pandas._libs import lib
21
+ from pandas._libs.tslibs import OutOfBoundsDatetime
22
+ from pandas.errors import InvalidIndexError
23
+ from pandas.util._decorators import cache_readonly
24
+ from pandas.util._exceptions import find_stack_level
25
+
26
+ from pandas.core.dtypes.common import (
27
+ is_list_like,
28
+ is_scalar,
29
+ )
30
+ from pandas.core.dtypes.dtypes import CategoricalDtype
31
+
32
+ from pandas.core import algorithms
33
+ from pandas.core.arrays import (
34
+ Categorical,
35
+ ExtensionArray,
36
+ )
37
+ import pandas.core.common as com
38
+ from pandas.core.frame import DataFrame
39
+ from pandas.core.groupby import ops
40
+ from pandas.core.groupby.categorical import recode_for_groupby
41
+ from pandas.core.indexes.api import (
42
+ CategoricalIndex,
43
+ Index,
44
+ MultiIndex,
45
+ )
46
+ from pandas.core.series import Series
47
+
48
+ from pandas.io.formats.printing import pprint_thing
49
+
50
+ if TYPE_CHECKING:
51
+ from collections.abc import (
52
+ Hashable,
53
+ Iterator,
54
+ )
55
+
56
+ from pandas._typing import (
57
+ ArrayLike,
58
+ Axis,
59
+ NDFrameT,
60
+ npt,
61
+ )
62
+
63
+ from pandas.core.generic import NDFrame
64
+
65
+
66
+ class Grouper:
67
+ """
68
+ A Grouper allows the user to specify a groupby instruction for an object.
69
+
70
+ This specification will select a column via the key parameter, or if the
71
+ level and/or axis parameters are given, a level of the index of the target
72
+ object.
73
+
74
+ If `axis` and/or `level` are passed as keywords to both `Grouper` and
75
+ `groupby`, the values passed to `Grouper` take precedence.
76
+
77
+ Parameters
78
+ ----------
79
+ key : str, defaults to None
80
+ Groupby key, which selects the grouping column of the target.
81
+ level : name/number, defaults to None
82
+ The level for the target index.
83
+ freq : str / frequency object, defaults to None
84
+ This will groupby the specified frequency if the target selection
85
+ (via key or level) is a datetime-like object. For full specification
86
+ of available frequencies, please see `here
87
+ <https://pandas.pydata.org/pandas-docs/stable/user_guide/timeseries.html#offset-aliases>`_.
88
+ axis : str, int, defaults to 0
89
+ Number/name of the axis.
90
+ sort : bool, default to False
91
+ Whether to sort the resulting labels.
92
+ closed : {'left' or 'right'}
93
+ Closed end of interval. Only when `freq` parameter is passed.
94
+ label : {'left' or 'right'}
95
+ Interval boundary to use for labeling.
96
+ Only when `freq` parameter is passed.
97
+ convention : {'start', 'end', 'e', 's'}
98
+ If grouper is PeriodIndex and `freq` parameter is passed.
99
+
100
+ origin : Timestamp or str, default 'start_day'
101
+ The timestamp on which to adjust the grouping. The timezone of origin must
102
+ match the timezone of the index.
103
+ If string, must be one of the following:
104
+
105
+ - 'epoch': `origin` is 1970-01-01
106
+ - 'start': `origin` is the first value of the timeseries
107
+ - 'start_day': `origin` is the first day at midnight of the timeseries
108
+
109
+ - 'end': `origin` is the last value of the timeseries
110
+ - 'end_day': `origin` is the ceiling midnight of the last day
111
+
112
+ .. versionadded:: 1.3.0
113
+
114
+ offset : Timedelta or str, default is None
115
+ An offset timedelta added to the origin.
116
+
117
+ dropna : bool, default True
118
+ If True, and if group keys contain NA values, NA values together with
119
+ row/column will be dropped. If False, NA values will also be treated as
120
+ the key in groups.
121
+
122
+ Returns
123
+ -------
124
+ Grouper or pandas.api.typing.TimeGrouper
125
+ A TimeGrouper is returned if ``freq`` is not ``None``. Otherwise, a Grouper
126
+ is returned.
127
+
128
+ Examples
129
+ --------
130
+ ``df.groupby(pd.Grouper(key="Animal"))`` is equivalent to ``df.groupby('Animal')``
131
+
132
+ >>> df = pd.DataFrame(
133
+ ... {
134
+ ... "Animal": ["Falcon", "Parrot", "Falcon", "Falcon", "Parrot"],
135
+ ... "Speed": [100, 5, 200, 300, 15],
136
+ ... }
137
+ ... )
138
+ >>> df
139
+ Animal Speed
140
+ 0 Falcon 100
141
+ 1 Parrot 5
142
+ 2 Falcon 200
143
+ 3 Falcon 300
144
+ 4 Parrot 15
145
+ >>> df.groupby(pd.Grouper(key="Animal")).mean()
146
+ Speed
147
+ Animal
148
+ Falcon 200.0
149
+ Parrot 10.0
150
+
151
+ Specify a resample operation on the column 'Publish date'
152
+
153
+ >>> df = pd.DataFrame(
154
+ ... {
155
+ ... "Publish date": [
156
+ ... pd.Timestamp("2000-01-02"),
157
+ ... pd.Timestamp("2000-01-02"),
158
+ ... pd.Timestamp("2000-01-09"),
159
+ ... pd.Timestamp("2000-01-16")
160
+ ... ],
161
+ ... "ID": [0, 1, 2, 3],
162
+ ... "Price": [10, 20, 30, 40]
163
+ ... }
164
+ ... )
165
+ >>> df
166
+ Publish date ID Price
167
+ 0 2000-01-02 0 10
168
+ 1 2000-01-02 1 20
169
+ 2 2000-01-09 2 30
170
+ 3 2000-01-16 3 40
171
+ >>> df.groupby(pd.Grouper(key="Publish date", freq="1W")).mean()
172
+ ID Price
173
+ Publish date
174
+ 2000-01-02 0.5 15.0
175
+ 2000-01-09 2.0 30.0
176
+ 2000-01-16 3.0 40.0
177
+
178
+ If you want to adjust the start of the bins based on a fixed timestamp:
179
+
180
+ >>> start, end = '2000-10-01 23:30:00', '2000-10-02 00:30:00'
181
+ >>> rng = pd.date_range(start, end, freq='7min')
182
+ >>> ts = pd.Series(np.arange(len(rng)) * 3, index=rng)
183
+ >>> ts
184
+ 2000-10-01 23:30:00 0
185
+ 2000-10-01 23:37:00 3
186
+ 2000-10-01 23:44:00 6
187
+ 2000-10-01 23:51:00 9
188
+ 2000-10-01 23:58:00 12
189
+ 2000-10-02 00:05:00 15
190
+ 2000-10-02 00:12:00 18
191
+ 2000-10-02 00:19:00 21
192
+ 2000-10-02 00:26:00 24
193
+ Freq: 7min, dtype: int64
194
+
195
+ >>> ts.groupby(pd.Grouper(freq='17min')).sum()
196
+ 2000-10-01 23:14:00 0
197
+ 2000-10-01 23:31:00 9
198
+ 2000-10-01 23:48:00 21
199
+ 2000-10-02 00:05:00 54
200
+ 2000-10-02 00:22:00 24
201
+ Freq: 17min, dtype: int64
202
+
203
+ >>> ts.groupby(pd.Grouper(freq='17min', origin='epoch')).sum()
204
+ 2000-10-01 23:18:00 0
205
+ 2000-10-01 23:35:00 18
206
+ 2000-10-01 23:52:00 27
207
+ 2000-10-02 00:09:00 39
208
+ 2000-10-02 00:26:00 24
209
+ Freq: 17min, dtype: int64
210
+
211
+ >>> ts.groupby(pd.Grouper(freq='17min', origin='2000-01-01')).sum()
212
+ 2000-10-01 23:24:00 3
213
+ 2000-10-01 23:41:00 15
214
+ 2000-10-01 23:58:00 45
215
+ 2000-10-02 00:15:00 45
216
+ Freq: 17min, dtype: int64
217
+
218
+ If you want to adjust the start of the bins with an `offset` Timedelta, the two
219
+ following lines are equivalent:
220
+
221
+ >>> ts.groupby(pd.Grouper(freq='17min', origin='start')).sum()
222
+ 2000-10-01 23:30:00 9
223
+ 2000-10-01 23:47:00 21
224
+ 2000-10-02 00:04:00 54
225
+ 2000-10-02 00:21:00 24
226
+ Freq: 17min, dtype: int64
227
+
228
+ >>> ts.groupby(pd.Grouper(freq='17min', offset='23h30min')).sum()
229
+ 2000-10-01 23:30:00 9
230
+ 2000-10-01 23:47:00 21
231
+ 2000-10-02 00:04:00 54
232
+ 2000-10-02 00:21:00 24
233
+ Freq: 17min, dtype: int64
234
+
235
+ To replace the use of the deprecated `base` argument, you can now use `offset`,
236
+ in this example it is equivalent to have `base=2`:
237
+
238
+ >>> ts.groupby(pd.Grouper(freq='17min', offset='2min')).sum()
239
+ 2000-10-01 23:16:00 0
240
+ 2000-10-01 23:33:00 9
241
+ 2000-10-01 23:50:00 36
242
+ 2000-10-02 00:07:00 39
243
+ 2000-10-02 00:24:00 24
244
+ Freq: 17min, dtype: int64
245
+ """
246
+
247
+ sort: bool
248
+ dropna: bool
249
+ _gpr_index: Index | None
250
+ _grouper: Index | None
251
+
252
+ _attributes: tuple[str, ...] = ("key", "level", "freq", "axis", "sort", "dropna")
253
+
254
+ def __new__(cls, *args, **kwargs):
255
+ if kwargs.get("freq") is not None:
256
+ from pandas.core.resample import TimeGrouper
257
+
258
+ cls = TimeGrouper
259
+ return super().__new__(cls)
260
+
261
+ def __init__(
262
+ self,
263
+ key=None,
264
+ level=None,
265
+ freq=None,
266
+ axis: Axis | lib.NoDefault = lib.no_default,
267
+ sort: bool = False,
268
+ dropna: bool = True,
269
+ ) -> None:
270
+ if type(self) is Grouper:
271
+ # i.e. not TimeGrouper
272
+ if axis is not lib.no_default:
273
+ warnings.warn(
274
+ "Grouper axis keyword is deprecated and will be removed in a "
275
+ "future version. To group on axis=1, use obj.T.groupby(...) "
276
+ "instead",
277
+ FutureWarning,
278
+ stacklevel=find_stack_level(),
279
+ )
280
+ else:
281
+ axis = 0
282
+ if axis is lib.no_default:
283
+ axis = 0
284
+
285
+ self.key = key
286
+ self.level = level
287
+ self.freq = freq
288
+ self.axis = axis
289
+ self.sort = sort
290
+ self.dropna = dropna
291
+
292
+ self._grouper_deprecated = None
293
+ self._indexer_deprecated: npt.NDArray[np.intp] | None = None
294
+ self._obj_deprecated = None
295
+ self._gpr_index = None
296
+ self.binner = None
297
+ self._grouper = None
298
+ self._indexer: npt.NDArray[np.intp] | None = None
299
+
300
+ def _get_grouper(
301
+ self, obj: NDFrameT, validate: bool = True
302
+ ) -> tuple[ops.BaseGrouper, NDFrameT]:
303
+ """
304
+ Parameters
305
+ ----------
306
+ obj : Series or DataFrame
307
+ validate : bool, default True
308
+ if True, validate the grouper
309
+
310
+ Returns
311
+ -------
312
+ a tuple of grouper, obj (possibly sorted)
313
+ """
314
+ obj, _, _ = self._set_grouper(obj)
315
+ grouper, _, obj = get_grouper(
316
+ obj,
317
+ [self.key],
318
+ axis=self.axis,
319
+ level=self.level,
320
+ sort=self.sort,
321
+ validate=validate,
322
+ dropna=self.dropna,
323
+ )
324
+ # Without setting this, subsequent lookups to .groups raise
325
+ # error: Incompatible types in assignment (expression has type "BaseGrouper",
326
+ # variable has type "None")
327
+ self._grouper_deprecated = grouper # type: ignore[assignment]
328
+
329
+ return grouper, obj
330
+
331
+ def _set_grouper(
332
+ self, obj: NDFrameT, sort: bool = False, *, gpr_index: Index | None = None
333
+ ) -> tuple[NDFrameT, Index, npt.NDArray[np.intp] | None]:
334
+ """
335
+ given an object and the specifications, setup the internal grouper
336
+ for this particular specification
337
+
338
+ Parameters
339
+ ----------
340
+ obj : Series or DataFrame
341
+ sort : bool, default False
342
+ whether the resulting grouper should be sorted
343
+ gpr_index : Index or None, default None
344
+
345
+ Returns
346
+ -------
347
+ NDFrame
348
+ Index
349
+ np.ndarray[np.intp] | None
350
+ """
351
+ assert obj is not None
352
+
353
+ if self.key is not None and self.level is not None:
354
+ raise ValueError("The Grouper cannot specify both a key and a level!")
355
+
356
+ # Keep self._grouper value before overriding
357
+ if self._grouper is None:
358
+ # TODO: What are we assuming about subsequent calls?
359
+ self._grouper = gpr_index
360
+ self._indexer = self._indexer_deprecated
361
+
362
+ # the key must be a valid info item
363
+ if self.key is not None:
364
+ key = self.key
365
+ # The 'on' is already defined
366
+ if getattr(gpr_index, "name", None) == key and isinstance(obj, Series):
367
+ # Sometimes self._grouper will have been resorted while
368
+ # obj has not. In this case there is a mismatch when we
369
+ # call self._grouper.take(obj.index) so we need to undo the sorting
370
+ # before we call _grouper.take.
371
+ assert self._grouper is not None
372
+ if self._indexer is not None:
373
+ reverse_indexer = self._indexer.argsort()
374
+ unsorted_ax = self._grouper.take(reverse_indexer)
375
+ ax = unsorted_ax.take(obj.index)
376
+ else:
377
+ ax = self._grouper.take(obj.index)
378
+ else:
379
+ if key not in obj._info_axis:
380
+ raise KeyError(f"The grouper name {key} is not found")
381
+ ax = Index(obj[key], name=key)
382
+
383
+ else:
384
+ ax = obj._get_axis(self.axis)
385
+ if self.level is not None:
386
+ level = self.level
387
+
388
+ # if a level is given it must be a mi level or
389
+ # equivalent to the axis name
390
+ if isinstance(ax, MultiIndex):
391
+ level = ax._get_level_number(level)
392
+ ax = Index(ax._get_level_values(level), name=ax.names[level])
393
+
394
+ else:
395
+ if level not in (0, ax.name):
396
+ raise ValueError(f"The level {level} is not valid")
397
+
398
+ # possibly sort
399
+ indexer: npt.NDArray[np.intp] | None = None
400
+ if (self.sort or sort) and not ax.is_monotonic_increasing:
401
+ # use stable sort to support first, last, nth
402
+ # TODO: why does putting na_position="first" fix datetimelike cases?
403
+ indexer = self._indexer_deprecated = ax.array.argsort(
404
+ kind="mergesort", na_position="first"
405
+ )
406
+ ax = ax.take(indexer)
407
+ obj = obj.take(indexer, axis=self.axis)
408
+
409
+ # error: Incompatible types in assignment (expression has type
410
+ # "NDFrameT", variable has type "None")
411
+ self._obj_deprecated = obj # type: ignore[assignment]
412
+ self._gpr_index = ax
413
+ return obj, ax, indexer
414
+
415
+ @final
416
+ @property
417
+ def ax(self) -> Index:
418
+ warnings.warn(
419
+ f"{type(self).__name__}.ax is deprecated and will be removed in a "
420
+ "future version. Use Resampler.ax instead",
421
+ FutureWarning,
422
+ stacklevel=find_stack_level(),
423
+ )
424
+ index = self._gpr_index
425
+ if index is None:
426
+ raise ValueError("_set_grouper must be called before ax is accessed")
427
+ return index
428
+
429
+ @final
430
+ @property
431
+ def indexer(self):
432
+ warnings.warn(
433
+ f"{type(self).__name__}.indexer is deprecated and will be removed "
434
+ "in a future version. Use Resampler.indexer instead.",
435
+ FutureWarning,
436
+ stacklevel=find_stack_level(),
437
+ )
438
+ return self._indexer_deprecated
439
+
440
+ @final
441
+ @property
442
+ def obj(self):
443
+ # TODO(3.0): enforcing these deprecations on Grouper should close
444
+ # GH#25564, GH#41930
445
+ warnings.warn(
446
+ f"{type(self).__name__}.obj is deprecated and will be removed "
447
+ "in a future version. Use GroupBy.indexer instead.",
448
+ FutureWarning,
449
+ stacklevel=find_stack_level(),
450
+ )
451
+ return self._obj_deprecated
452
+
453
+ @final
454
+ @property
455
+ def grouper(self):
456
+ warnings.warn(
457
+ f"{type(self).__name__}.grouper is deprecated and will be removed "
458
+ "in a future version. Use GroupBy.grouper instead.",
459
+ FutureWarning,
460
+ stacklevel=find_stack_level(),
461
+ )
462
+ return self._grouper_deprecated
463
+
464
+ @final
465
+ @property
466
+ def groups(self):
467
+ warnings.warn(
468
+ f"{type(self).__name__}.groups is deprecated and will be removed "
469
+ "in a future version. Use GroupBy.groups instead.",
470
+ FutureWarning,
471
+ stacklevel=find_stack_level(),
472
+ )
473
+ # error: "None" has no attribute "groups"
474
+ return self._grouper_deprecated.groups # type: ignore[attr-defined]
475
+
476
+ @final
477
+ def __repr__(self) -> str:
478
+ attrs_list = (
479
+ f"{attr_name}={repr(getattr(self, attr_name))}"
480
+ for attr_name in self._attributes
481
+ if getattr(self, attr_name) is not None
482
+ )
483
+ attrs = ", ".join(attrs_list)
484
+ cls_name = type(self).__name__
485
+ return f"{cls_name}({attrs})"
486
+
487
+
488
+ @final
489
+ class Grouping:
490
+ """
491
+ Holds the grouping information for a single key
492
+
493
+ Parameters
494
+ ----------
495
+ index : Index
496
+ grouper :
497
+ obj : DataFrame or Series
498
+ name : Label
499
+ level :
500
+ observed : bool, default False
501
+ If we are a Categorical, use the observed values
502
+ in_axis : if the Grouping is a column in self.obj and hence among
503
+ Groupby.exclusions list
504
+ dropna : bool, default True
505
+ Whether to drop NA groups.
506
+ uniques : Array-like, optional
507
+ When specified, will be used for unique values. Enables including empty groups
508
+ in the result for a BinGrouper. Must not contain duplicates.
509
+
510
+ Attributes
511
+ -------
512
+ indices : dict
513
+ Mapping of {group -> index_list}
514
+ codes : ndarray
515
+ Group codes
516
+ group_index : Index or None
517
+ unique groups
518
+ groups : dict
519
+ Mapping of {group -> label_list}
520
+ """
521
+
522
+ _codes: npt.NDArray[np.signedinteger] | None = None
523
+ _all_grouper: Categorical | None
524
+ _orig_cats: Index | None
525
+ _index: Index
526
+
527
+ def __init__(
528
+ self,
529
+ index: Index,
530
+ grouper=None,
531
+ obj: NDFrame | None = None,
532
+ level=None,
533
+ sort: bool = True,
534
+ observed: bool = False,
535
+ in_axis: bool = False,
536
+ dropna: bool = True,
537
+ uniques: ArrayLike | None = None,
538
+ ) -> None:
539
+ self.level = level
540
+ self._orig_grouper = grouper
541
+ grouping_vector = _convert_grouper(index, grouper)
542
+ self._all_grouper = None
543
+ self._orig_cats = None
544
+ self._index = index
545
+ self._sort = sort
546
+ self.obj = obj
547
+ self._observed = observed
548
+ self.in_axis = in_axis
549
+ self._dropna = dropna
550
+ self._uniques = uniques
551
+
552
+ # we have a single grouper which may be a myriad of things,
553
+ # some of which are dependent on the passing in level
554
+
555
+ ilevel = self._ilevel
556
+ if ilevel is not None:
557
+ # In extant tests, the new self.grouping_vector matches
558
+ # `index.get_level_values(ilevel)` whenever
559
+ # mapper is None and isinstance(index, MultiIndex)
560
+ if isinstance(index, MultiIndex):
561
+ index_level = index.get_level_values(ilevel)
562
+ else:
563
+ index_level = index
564
+
565
+ if grouping_vector is None:
566
+ grouping_vector = index_level
567
+ else:
568
+ mapper = grouping_vector
569
+ grouping_vector = index_level.map(mapper)
570
+
571
+ # a passed Grouper like, directly get the grouper in the same way
572
+ # as single grouper groupby, use the group_info to get codes
573
+ elif isinstance(grouping_vector, Grouper):
574
+ # get the new grouper; we already have disambiguated
575
+ # what key/level refer to exactly, don't need to
576
+ # check again as we have by this point converted these
577
+ # to an actual value (rather than a pd.Grouper)
578
+ assert self.obj is not None # for mypy
579
+ newgrouper, newobj = grouping_vector._get_grouper(self.obj, validate=False)
580
+ self.obj = newobj
581
+
582
+ if isinstance(newgrouper, ops.BinGrouper):
583
+ # TODO: can we unwrap this and get a tighter typing
584
+ # for self.grouping_vector?
585
+ grouping_vector = newgrouper
586
+ else:
587
+ # ops.BaseGrouper
588
+ # TODO: 2023-02-03 no test cases with len(newgrouper.groupings) > 1.
589
+ # If that were to occur, would we be throwing out information?
590
+ # error: Cannot determine type of "grouping_vector" [has-type]
591
+ ng = newgrouper.groupings[0].grouping_vector # type: ignore[has-type]
592
+ # use Index instead of ndarray so we can recover the name
593
+ grouping_vector = Index(ng, name=newgrouper.result_index.name)
594
+
595
+ elif not isinstance(
596
+ grouping_vector, (Series, Index, ExtensionArray, np.ndarray)
597
+ ):
598
+ # no level passed
599
+ if getattr(grouping_vector, "ndim", 1) != 1:
600
+ t = str(type(grouping_vector))
601
+ raise ValueError(f"Grouper for '{t}' not 1-dimensional")
602
+
603
+ grouping_vector = index.map(grouping_vector)
604
+
605
+ if not (
606
+ hasattr(grouping_vector, "__len__")
607
+ and len(grouping_vector) == len(index)
608
+ ):
609
+ grper = pprint_thing(grouping_vector)
610
+ errmsg = (
611
+ "Grouper result violates len(labels) == "
612
+ f"len(data)\nresult: {grper}"
613
+ )
614
+ raise AssertionError(errmsg)
615
+
616
+ if isinstance(grouping_vector, np.ndarray):
617
+ if grouping_vector.dtype.kind in "mM":
618
+ # if we have a date/time-like grouper, make sure that we have
619
+ # Timestamps like
620
+ # TODO 2022-10-08 we only have one test that gets here and
621
+ # values are already in nanoseconds in that case.
622
+ grouping_vector = Series(grouping_vector).to_numpy()
623
+ elif isinstance(getattr(grouping_vector, "dtype", None), CategoricalDtype):
624
+ # a passed Categorical
625
+ self._orig_cats = grouping_vector.categories
626
+ grouping_vector, self._all_grouper = recode_for_groupby(
627
+ grouping_vector, sort, observed
628
+ )
629
+
630
+ self.grouping_vector = grouping_vector
631
+
632
+ def __repr__(self) -> str:
633
+ return f"Grouping({self.name})"
634
+
635
+ def __iter__(self) -> Iterator:
636
+ return iter(self.indices)
637
+
638
+ @cache_readonly
639
+ def _passed_categorical(self) -> bool:
640
+ dtype = getattr(self.grouping_vector, "dtype", None)
641
+ return isinstance(dtype, CategoricalDtype)
642
+
643
+ @cache_readonly
644
+ def name(self) -> Hashable:
645
+ ilevel = self._ilevel
646
+ if ilevel is not None:
647
+ return self._index.names[ilevel]
648
+
649
+ if isinstance(self._orig_grouper, (Index, Series)):
650
+ return self._orig_grouper.name
651
+
652
+ elif isinstance(self.grouping_vector, ops.BaseGrouper):
653
+ return self.grouping_vector.result_index.name
654
+
655
+ elif isinstance(self.grouping_vector, Index):
656
+ return self.grouping_vector.name
657
+
658
+ # otherwise we have ndarray or ExtensionArray -> no name
659
+ return None
660
+
661
+ @cache_readonly
662
+ def _ilevel(self) -> int | None:
663
+ """
664
+ If necessary, converted index level name to index level position.
665
+ """
666
+ level = self.level
667
+ if level is None:
668
+ return None
669
+ if not isinstance(level, int):
670
+ index = self._index
671
+ if level not in index.names:
672
+ raise AssertionError(f"Level {level} not in index")
673
+ return index.names.index(level)
674
+ return level
675
+
676
+ @property
677
+ def ngroups(self) -> int:
678
+ return len(self._group_index)
679
+
680
+ @cache_readonly
681
+ def indices(self) -> dict[Hashable, npt.NDArray[np.intp]]:
682
+ # we have a list of groupers
683
+ if isinstance(self.grouping_vector, ops.BaseGrouper):
684
+ return self.grouping_vector.indices
685
+
686
+ values = Categorical(self.grouping_vector)
687
+ return values._reverse_indexer()
688
+
689
+ @property
690
+ def codes(self) -> npt.NDArray[np.signedinteger]:
691
+ return self._codes_and_uniques[0]
692
+
693
+ @cache_readonly
694
+ def _group_arraylike(self) -> ArrayLike:
695
+ """
696
+ Analogous to result_index, but holding an ArrayLike to ensure
697
+ we can retain ExtensionDtypes.
698
+ """
699
+ if self._all_grouper is not None:
700
+ # retain dtype for categories, including unobserved ones
701
+ return self._result_index._values
702
+
703
+ elif self._passed_categorical:
704
+ return self._group_index._values
705
+
706
+ return self._codes_and_uniques[1]
707
+
708
+ @property
709
+ def group_arraylike(self) -> ArrayLike:
710
+ """
711
+ Analogous to result_index, but holding an ArrayLike to ensure
712
+ we can retain ExtensionDtypes.
713
+ """
714
+ warnings.warn(
715
+ "group_arraylike is deprecated and will be removed in a future "
716
+ "version of pandas",
717
+ category=FutureWarning,
718
+ stacklevel=find_stack_level(),
719
+ )
720
+ return self._group_arraylike
721
+
722
+ @cache_readonly
723
+ def _result_index(self) -> Index:
724
+ # result_index retains dtype for categories, including unobserved ones,
725
+ # which group_index does not
726
+ if self._all_grouper is not None:
727
+ group_idx = self._group_index
728
+ assert isinstance(group_idx, CategoricalIndex)
729
+ cats = self._orig_cats
730
+ # set_categories is dynamically added
731
+ return group_idx.set_categories(cats) # type: ignore[attr-defined]
732
+ return self._group_index
733
+
734
+ @property
735
+ def result_index(self) -> Index:
736
+ warnings.warn(
737
+ "result_index is deprecated and will be removed in a future "
738
+ "version of pandas",
739
+ category=FutureWarning,
740
+ stacklevel=find_stack_level(),
741
+ )
742
+ return self._result_index
743
+
744
+ @cache_readonly
745
+ def _group_index(self) -> Index:
746
+ codes, uniques = self._codes_and_uniques
747
+ if not self._dropna and self._passed_categorical:
748
+ assert isinstance(uniques, Categorical)
749
+ if self._sort and (codes == len(uniques)).any():
750
+ # Add NA value on the end when sorting
751
+ uniques = Categorical.from_codes(
752
+ np.append(uniques.codes, [-1]), uniques.categories, validate=False
753
+ )
754
+ elif len(codes) > 0:
755
+ # Need to determine proper placement of NA value when not sorting
756
+ cat = self.grouping_vector
757
+ na_idx = (cat.codes < 0).argmax()
758
+ if cat.codes[na_idx] < 0:
759
+ # count number of unique codes that comes before the nan value
760
+ na_unique_idx = algorithms.nunique_ints(cat.codes[:na_idx])
761
+ new_codes = np.insert(uniques.codes, na_unique_idx, -1)
762
+ uniques = Categorical.from_codes(
763
+ new_codes, uniques.categories, validate=False
764
+ )
765
+ return Index._with_infer(uniques, name=self.name)
766
+
767
+ @property
768
+ def group_index(self) -> Index:
769
+ warnings.warn(
770
+ "group_index is deprecated and will be removed in a future "
771
+ "version of pandas",
772
+ category=FutureWarning,
773
+ stacklevel=find_stack_level(),
774
+ )
775
+ return self._group_index
776
+
777
+ @cache_readonly
778
+ def _codes_and_uniques(self) -> tuple[npt.NDArray[np.signedinteger], ArrayLike]:
779
+ uniques: ArrayLike
780
+ if self._passed_categorical:
781
+ # we make a CategoricalIndex out of the cat grouper
782
+ # preserving the categories / ordered attributes;
783
+ # doesn't (yet - GH#46909) handle dropna=False
784
+ cat = self.grouping_vector
785
+ categories = cat.categories
786
+
787
+ if self._observed:
788
+ ucodes = algorithms.unique1d(cat.codes)
789
+ ucodes = ucodes[ucodes != -1]
790
+ if self._sort:
791
+ ucodes = np.sort(ucodes)
792
+ else:
793
+ ucodes = np.arange(len(categories))
794
+
795
+ uniques = Categorical.from_codes(
796
+ codes=ucodes, categories=categories, ordered=cat.ordered, validate=False
797
+ )
798
+
799
+ codes = cat.codes
800
+ if not self._dropna:
801
+ na_mask = codes < 0
802
+ if np.any(na_mask):
803
+ if self._sort:
804
+ # Replace NA codes with `largest code + 1`
805
+ na_code = len(categories)
806
+ codes = np.where(na_mask, na_code, codes)
807
+ else:
808
+ # Insert NA code into the codes based on first appearance
809
+ # A negative code must exist, no need to check codes[na_idx] < 0
810
+ na_idx = na_mask.argmax()
811
+ # count number of unique codes that comes before the nan value
812
+ na_code = algorithms.nunique_ints(codes[:na_idx])
813
+ codes = np.where(codes >= na_code, codes + 1, codes)
814
+ codes = np.where(na_mask, na_code, codes)
815
+
816
+ if not self._observed:
817
+ uniques = uniques.reorder_categories(self._orig_cats)
818
+
819
+ return codes, uniques
820
+
821
+ elif isinstance(self.grouping_vector, ops.BaseGrouper):
822
+ # we have a list of groupers
823
+ codes = self.grouping_vector.codes_info
824
+ uniques = self.grouping_vector.result_index._values
825
+ elif self._uniques is not None:
826
+ # GH#50486 Code grouping_vector using _uniques; allows
827
+ # including uniques that are not present in grouping_vector.
828
+ cat = Categorical(self.grouping_vector, categories=self._uniques)
829
+ codes = cat.codes
830
+ uniques = self._uniques
831
+ else:
832
+ # GH35667, replace dropna=False with use_na_sentinel=False
833
+ # error: Incompatible types in assignment (expression has type "Union[
834
+ # ndarray[Any, Any], Index]", variable has type "Categorical")
835
+ codes, uniques = algorithms.factorize( # type: ignore[assignment]
836
+ self.grouping_vector, sort=self._sort, use_na_sentinel=self._dropna
837
+ )
838
+ return codes, uniques
839
+
840
+ @cache_readonly
841
+ def groups(self) -> dict[Hashable, np.ndarray]:
842
+ cats = Categorical.from_codes(self.codes, self._group_index, validate=False)
843
+ return self._index.groupby(cats)
844
+
845
+
846
+ def get_grouper(
847
+ obj: NDFrameT,
848
+ key=None,
849
+ axis: Axis = 0,
850
+ level=None,
851
+ sort: bool = True,
852
+ observed: bool = False,
853
+ validate: bool = True,
854
+ dropna: bool = True,
855
+ ) -> tuple[ops.BaseGrouper, frozenset[Hashable], NDFrameT]:
856
+ """
857
+ Create and return a BaseGrouper, which is an internal
858
+ mapping of how to create the grouper indexers.
859
+ This may be composed of multiple Grouping objects, indicating
860
+ multiple groupers
861
+
862
+ Groupers are ultimately index mappings. They can originate as:
863
+ index mappings, keys to columns, functions, or Groupers
864
+
865
+ Groupers enable local references to axis,level,sort, while
866
+ the passed in axis, level, and sort are 'global'.
867
+
868
+ This routine tries to figure out what the passing in references
869
+ are and then creates a Grouping for each one, combined into
870
+ a BaseGrouper.
871
+
872
+ If observed & we have a categorical grouper, only show the observed
873
+ values.
874
+
875
+ If validate, then check for key/level overlaps.
876
+
877
+ """
878
+ group_axis = obj._get_axis(axis)
879
+
880
+ # validate that the passed single level is compatible with the passed
881
+ # axis of the object
882
+ if level is not None:
883
+ # TODO: These if-block and else-block are almost same.
884
+ # MultiIndex instance check is removable, but it seems that there are
885
+ # some processes only for non-MultiIndex in else-block,
886
+ # eg. `obj.index.name != level`. We have to consider carefully whether
887
+ # these are applicable for MultiIndex. Even if these are applicable,
888
+ # we need to check if it makes no side effect to subsequent processes
889
+ # on the outside of this condition.
890
+ # (GH 17621)
891
+ if isinstance(group_axis, MultiIndex):
892
+ if is_list_like(level) and len(level) == 1:
893
+ level = level[0]
894
+
895
+ if key is None and is_scalar(level):
896
+ # Get the level values from group_axis
897
+ key = group_axis.get_level_values(level)
898
+ level = None
899
+
900
+ else:
901
+ # allow level to be a length-one list-like object
902
+ # (e.g., level=[0])
903
+ # GH 13901
904
+ if is_list_like(level):
905
+ nlevels = len(level)
906
+ if nlevels == 1:
907
+ level = level[0]
908
+ elif nlevels == 0:
909
+ raise ValueError("No group keys passed!")
910
+ else:
911
+ raise ValueError("multiple levels only valid with MultiIndex")
912
+
913
+ if isinstance(level, str):
914
+ if obj._get_axis(axis).name != level:
915
+ raise ValueError(
916
+ f"level name {level} is not the name "
917
+ f"of the {obj._get_axis_name(axis)}"
918
+ )
919
+ elif level > 0 or level < -1:
920
+ raise ValueError("level > 0 or level < -1 only valid with MultiIndex")
921
+
922
+ # NOTE: `group_axis` and `group_axis.get_level_values(level)`
923
+ # are same in this section.
924
+ level = None
925
+ key = group_axis
926
+
927
+ # a passed-in Grouper, directly convert
928
+ if isinstance(key, Grouper):
929
+ grouper, obj = key._get_grouper(obj, validate=False)
930
+ if key.key is None:
931
+ return grouper, frozenset(), obj
932
+ else:
933
+ return grouper, frozenset({key.key}), obj
934
+
935
+ # already have a BaseGrouper, just return it
936
+ elif isinstance(key, ops.BaseGrouper):
937
+ return key, frozenset(), obj
938
+
939
+ if not isinstance(key, list):
940
+ keys = [key]
941
+ match_axis_length = False
942
+ else:
943
+ keys = key
944
+ match_axis_length = len(keys) == len(group_axis)
945
+
946
+ # what are we after, exactly?
947
+ any_callable = any(callable(g) or isinstance(g, dict) for g in keys)
948
+ any_groupers = any(isinstance(g, (Grouper, Grouping)) for g in keys)
949
+ any_arraylike = any(
950
+ isinstance(g, (list, tuple, Series, Index, np.ndarray)) for g in keys
951
+ )
952
+
953
+ # is this an index replacement?
954
+ if (
955
+ not any_callable
956
+ and not any_arraylike
957
+ and not any_groupers
958
+ and match_axis_length
959
+ and level is None
960
+ ):
961
+ if isinstance(obj, DataFrame):
962
+ all_in_columns_index = all(
963
+ g in obj.columns or g in obj.index.names for g in keys
964
+ )
965
+ else:
966
+ assert isinstance(obj, Series)
967
+ all_in_columns_index = all(g in obj.index.names for g in keys)
968
+
969
+ if not all_in_columns_index:
970
+ keys = [com.asarray_tuplesafe(keys)]
971
+
972
+ if isinstance(level, (tuple, list)):
973
+ if key is None:
974
+ keys = [None] * len(level)
975
+ levels = level
976
+ else:
977
+ levels = [level] * len(keys)
978
+
979
+ groupings: list[Grouping] = []
980
+ exclusions: set[Hashable] = set()
981
+
982
+ # if the actual grouper should be obj[key]
983
+ def is_in_axis(key) -> bool:
984
+ if not _is_label_like(key):
985
+ if obj.ndim == 1:
986
+ return False
987
+
988
+ # items -> .columns for DataFrame, .index for Series
989
+ items = obj.axes[-1]
990
+ try:
991
+ items.get_loc(key)
992
+ except (KeyError, TypeError, InvalidIndexError):
993
+ # TypeError shows up here if we pass e.g. an Index
994
+ return False
995
+
996
+ return True
997
+
998
+ # if the grouper is obj[name]
999
+ def is_in_obj(gpr) -> bool:
1000
+ if not hasattr(gpr, "name"):
1001
+ return False
1002
+ if using_copy_on_write() or warn_copy_on_write():
1003
+ # For the CoW case, we check the references to determine if the
1004
+ # series is part of the object
1005
+ try:
1006
+ obj_gpr_column = obj[gpr.name]
1007
+ except (KeyError, IndexError, InvalidIndexError, OutOfBoundsDatetime):
1008
+ return False
1009
+ if isinstance(gpr, Series) and isinstance(obj_gpr_column, Series):
1010
+ return gpr._mgr.references_same_values( # type: ignore[union-attr]
1011
+ obj_gpr_column._mgr, 0 # type: ignore[arg-type]
1012
+ )
1013
+ return False
1014
+ try:
1015
+ return gpr is obj[gpr.name]
1016
+ except (KeyError, IndexError, InvalidIndexError, OutOfBoundsDatetime):
1017
+ # IndexError reached in e.g. test_skip_group_keys when we pass
1018
+ # lambda here
1019
+ # InvalidIndexError raised on key-types inappropriate for index,
1020
+ # e.g. DatetimeIndex.get_loc(tuple())
1021
+ # OutOfBoundsDatetime raised when obj is a Series with DatetimeIndex
1022
+ # and gpr.name is month str
1023
+ return False
1024
+
1025
+ for gpr, level in zip(keys, levels):
1026
+ if is_in_obj(gpr): # df.groupby(df['name'])
1027
+ in_axis = True
1028
+ exclusions.add(gpr.name)
1029
+
1030
+ elif is_in_axis(gpr): # df.groupby('name')
1031
+ if obj.ndim != 1 and gpr in obj:
1032
+ if validate:
1033
+ obj._check_label_or_level_ambiguity(gpr, axis=axis)
1034
+ in_axis, name, gpr = True, gpr, obj[gpr]
1035
+ if gpr.ndim != 1:
1036
+ # non-unique columns; raise here to get the name in the
1037
+ # exception message
1038
+ raise ValueError(f"Grouper for '{name}' not 1-dimensional")
1039
+ exclusions.add(name)
1040
+ elif obj._is_level_reference(gpr, axis=axis):
1041
+ in_axis, level, gpr = False, gpr, None
1042
+ else:
1043
+ raise KeyError(gpr)
1044
+ elif isinstance(gpr, Grouper) and gpr.key is not None:
1045
+ # Add key to exclusions
1046
+ exclusions.add(gpr.key)
1047
+ in_axis = True
1048
+ else:
1049
+ in_axis = False
1050
+
1051
+ # create the Grouping
1052
+ # allow us to passing the actual Grouping as the gpr
1053
+ ping = (
1054
+ Grouping(
1055
+ group_axis,
1056
+ gpr,
1057
+ obj=obj,
1058
+ level=level,
1059
+ sort=sort,
1060
+ observed=observed,
1061
+ in_axis=in_axis,
1062
+ dropna=dropna,
1063
+ )
1064
+ if not isinstance(gpr, Grouping)
1065
+ else gpr
1066
+ )
1067
+
1068
+ groupings.append(ping)
1069
+
1070
+ if len(groupings) == 0 and len(obj):
1071
+ raise ValueError("No group keys passed!")
1072
+ if len(groupings) == 0:
1073
+ groupings.append(Grouping(Index([], dtype="int"), np.array([], dtype=np.intp)))
1074
+
1075
+ # create the internals grouper
1076
+ grouper = ops.BaseGrouper(group_axis, groupings, sort=sort, dropna=dropna)
1077
+ return grouper, frozenset(exclusions), obj
1078
+
1079
+
1080
+ def _is_label_like(val) -> bool:
1081
+ return isinstance(val, (str, tuple)) or (val is not None and is_scalar(val))
1082
+
1083
+
1084
+ def _convert_grouper(axis: Index, grouper):
1085
+ if isinstance(grouper, dict):
1086
+ return grouper.get
1087
+ elif isinstance(grouper, Series):
1088
+ if grouper.index.equals(axis):
1089
+ return grouper._values
1090
+ else:
1091
+ return grouper.reindex(axis)._values
1092
+ elif isinstance(grouper, MultiIndex):
1093
+ return grouper._values
1094
+ elif isinstance(grouper, (list, tuple, Index, Categorical, np.ndarray)):
1095
+ if len(grouper) != len(axis):
1096
+ raise ValueError("Grouper and axis must be same length")
1097
+
1098
+ if isinstance(grouper, (list, tuple)):
1099
+ grouper = com.asarray_tuplesafe(grouper)
1100
+ return grouper
1101
+ else:
1102
+ return grouper
venv/lib/python3.10/site-packages/pandas/core/groupby/indexing.py ADDED
@@ -0,0 +1,304 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from __future__ import annotations
2
+
3
+ from collections.abc import Iterable
4
+ from typing import (
5
+ TYPE_CHECKING,
6
+ Literal,
7
+ cast,
8
+ )
9
+
10
+ import numpy as np
11
+
12
+ from pandas.util._decorators import (
13
+ cache_readonly,
14
+ doc,
15
+ )
16
+
17
+ from pandas.core.dtypes.common import (
18
+ is_integer,
19
+ is_list_like,
20
+ )
21
+
22
+ if TYPE_CHECKING:
23
+ from pandas._typing import PositionalIndexer
24
+
25
+ from pandas import (
26
+ DataFrame,
27
+ Series,
28
+ )
29
+ from pandas.core.groupby import groupby
30
+
31
+
32
+ class GroupByIndexingMixin:
33
+ """
34
+ Mixin for adding ._positional_selector to GroupBy.
35
+ """
36
+
37
+ @cache_readonly
38
+ def _positional_selector(self) -> GroupByPositionalSelector:
39
+ """
40
+ Return positional selection for each group.
41
+
42
+ ``groupby._positional_selector[i:j]`` is similar to
43
+ ``groupby.apply(lambda x: x.iloc[i:j])``
44
+ but much faster and preserves the original index and order.
45
+
46
+ ``_positional_selector[]`` is compatible with and extends :meth:`~GroupBy.head`
47
+ and :meth:`~GroupBy.tail`. For example:
48
+
49
+ - ``head(5)``
50
+ - ``_positional_selector[5:-5]``
51
+ - ``tail(5)``
52
+
53
+ together return all the rows.
54
+
55
+ Allowed inputs for the index are:
56
+
57
+ - An integer valued iterable, e.g. ``range(2, 4)``.
58
+ - A comma separated list of integers and slices, e.g. ``5``, ``2, 4``, ``2:4``.
59
+
60
+ The output format is the same as :meth:`~GroupBy.head` and
61
+ :meth:`~GroupBy.tail`, namely
62
+ a subset of the ``DataFrame`` or ``Series`` with the index and order preserved.
63
+
64
+ Returns
65
+ -------
66
+ Series
67
+ The filtered subset of the original Series.
68
+ DataFrame
69
+ The filtered subset of the original DataFrame.
70
+
71
+ See Also
72
+ --------
73
+ DataFrame.iloc : Purely integer-location based indexing for selection by
74
+ position.
75
+ GroupBy.head : Return first n rows of each group.
76
+ GroupBy.tail : Return last n rows of each group.
77
+ GroupBy.nth : Take the nth row from each group if n is an int, or a
78
+ subset of rows, if n is a list of ints.
79
+
80
+ Notes
81
+ -----
82
+ - The slice step cannot be negative.
83
+ - If the index specification results in overlaps, the item is not duplicated.
84
+ - If the index specification changes the order of items, then
85
+ they are returned in their original order.
86
+ By contrast, ``DataFrame.iloc`` can change the row order.
87
+ - ``groupby()`` parameters such as as_index and dropna are ignored.
88
+
89
+ The differences between ``_positional_selector[]`` and :meth:`~GroupBy.nth`
90
+ with ``as_index=False`` are:
91
+
92
+ - Input to ``_positional_selector`` can include
93
+ one or more slices whereas ``nth``
94
+ just handles an integer or a list of integers.
95
+ - ``_positional_selector`` can accept a slice relative to the
96
+ last row of each group.
97
+ - ``_positional_selector`` does not have an equivalent to the
98
+ ``nth()`` ``dropna`` parameter.
99
+
100
+ Examples
101
+ --------
102
+ >>> df = pd.DataFrame([["a", 1], ["a", 2], ["a", 3], ["b", 4], ["b", 5]],
103
+ ... columns=["A", "B"])
104
+ >>> df.groupby("A")._positional_selector[1:2]
105
+ A B
106
+ 1 a 2
107
+ 4 b 5
108
+
109
+ >>> df.groupby("A")._positional_selector[1, -1]
110
+ A B
111
+ 1 a 2
112
+ 2 a 3
113
+ 4 b 5
114
+ """
115
+ if TYPE_CHECKING:
116
+ # pylint: disable-next=used-before-assignment
117
+ groupby_self = cast(groupby.GroupBy, self)
118
+ else:
119
+ groupby_self = self
120
+
121
+ return GroupByPositionalSelector(groupby_self)
122
+
123
+ def _make_mask_from_positional_indexer(
124
+ self,
125
+ arg: PositionalIndexer | tuple,
126
+ ) -> np.ndarray:
127
+ if is_list_like(arg):
128
+ if all(is_integer(i) for i in cast(Iterable, arg)):
129
+ mask = self._make_mask_from_list(cast(Iterable[int], arg))
130
+ else:
131
+ mask = self._make_mask_from_tuple(cast(tuple, arg))
132
+
133
+ elif isinstance(arg, slice):
134
+ mask = self._make_mask_from_slice(arg)
135
+ elif is_integer(arg):
136
+ mask = self._make_mask_from_int(cast(int, arg))
137
+ else:
138
+ raise TypeError(
139
+ f"Invalid index {type(arg)}. "
140
+ "Must be integer, list-like, slice or a tuple of "
141
+ "integers and slices"
142
+ )
143
+
144
+ if isinstance(mask, bool):
145
+ if mask:
146
+ mask = self._ascending_count >= 0
147
+ else:
148
+ mask = self._ascending_count < 0
149
+
150
+ return cast(np.ndarray, mask)
151
+
152
+ def _make_mask_from_int(self, arg: int) -> np.ndarray:
153
+ if arg >= 0:
154
+ return self._ascending_count == arg
155
+ else:
156
+ return self._descending_count == (-arg - 1)
157
+
158
+ def _make_mask_from_list(self, args: Iterable[int]) -> bool | np.ndarray:
159
+ positive = [arg for arg in args if arg >= 0]
160
+ negative = [-arg - 1 for arg in args if arg < 0]
161
+
162
+ mask: bool | np.ndarray = False
163
+
164
+ if positive:
165
+ mask |= np.isin(self._ascending_count, positive)
166
+
167
+ if negative:
168
+ mask |= np.isin(self._descending_count, negative)
169
+
170
+ return mask
171
+
172
+ def _make_mask_from_tuple(self, args: tuple) -> bool | np.ndarray:
173
+ mask: bool | np.ndarray = False
174
+
175
+ for arg in args:
176
+ if is_integer(arg):
177
+ mask |= self._make_mask_from_int(cast(int, arg))
178
+ elif isinstance(arg, slice):
179
+ mask |= self._make_mask_from_slice(arg)
180
+ else:
181
+ raise ValueError(
182
+ f"Invalid argument {type(arg)}. Should be int or slice."
183
+ )
184
+
185
+ return mask
186
+
187
+ def _make_mask_from_slice(self, arg: slice) -> bool | np.ndarray:
188
+ start = arg.start
189
+ stop = arg.stop
190
+ step = arg.step
191
+
192
+ if step is not None and step < 0:
193
+ raise ValueError(f"Invalid step {step}. Must be non-negative")
194
+
195
+ mask: bool | np.ndarray = True
196
+
197
+ if step is None:
198
+ step = 1
199
+
200
+ if start is None:
201
+ if step > 1:
202
+ mask &= self._ascending_count % step == 0
203
+
204
+ elif start >= 0:
205
+ mask &= self._ascending_count >= start
206
+
207
+ if step > 1:
208
+ mask &= (self._ascending_count - start) % step == 0
209
+
210
+ else:
211
+ mask &= self._descending_count < -start
212
+
213
+ offset_array = self._descending_count + start + 1
214
+ limit_array = (
215
+ self._ascending_count + self._descending_count + (start + 1)
216
+ ) < 0
217
+ offset_array = np.where(limit_array, self._ascending_count, offset_array)
218
+
219
+ mask &= offset_array % step == 0
220
+
221
+ if stop is not None:
222
+ if stop >= 0:
223
+ mask &= self._ascending_count < stop
224
+ else:
225
+ mask &= self._descending_count >= -stop
226
+
227
+ return mask
228
+
229
+ @cache_readonly
230
+ def _ascending_count(self) -> np.ndarray:
231
+ if TYPE_CHECKING:
232
+ groupby_self = cast(groupby.GroupBy, self)
233
+ else:
234
+ groupby_self = self
235
+
236
+ return groupby_self._cumcount_array()
237
+
238
+ @cache_readonly
239
+ def _descending_count(self) -> np.ndarray:
240
+ if TYPE_CHECKING:
241
+ groupby_self = cast(groupby.GroupBy, self)
242
+ else:
243
+ groupby_self = self
244
+
245
+ return groupby_self._cumcount_array(ascending=False)
246
+
247
+
248
+ @doc(GroupByIndexingMixin._positional_selector)
249
+ class GroupByPositionalSelector:
250
+ def __init__(self, groupby_object: groupby.GroupBy) -> None:
251
+ self.groupby_object = groupby_object
252
+
253
+ def __getitem__(self, arg: PositionalIndexer | tuple) -> DataFrame | Series:
254
+ """
255
+ Select by positional index per group.
256
+
257
+ Implements GroupBy._positional_selector
258
+
259
+ Parameters
260
+ ----------
261
+ arg : PositionalIndexer | tuple
262
+ Allowed values are:
263
+ - int
264
+ - int valued iterable such as list or range
265
+ - slice with step either None or positive
266
+ - tuple of integers and slices
267
+
268
+ Returns
269
+ -------
270
+ Series
271
+ The filtered subset of the original groupby Series.
272
+ DataFrame
273
+ The filtered subset of the original groupby DataFrame.
274
+
275
+ See Also
276
+ --------
277
+ DataFrame.iloc : Integer-location based indexing for selection by position.
278
+ GroupBy.head : Return first n rows of each group.
279
+ GroupBy.tail : Return last n rows of each group.
280
+ GroupBy._positional_selector : Return positional selection for each group.
281
+ GroupBy.nth : Take the nth row from each group if n is an int, or a
282
+ subset of rows, if n is a list of ints.
283
+ """
284
+ mask = self.groupby_object._make_mask_from_positional_indexer(arg)
285
+ return self.groupby_object._mask_selected_obj(mask)
286
+
287
+
288
+ class GroupByNthSelector:
289
+ """
290
+ Dynamically substituted for GroupBy.nth to enable both call and index
291
+ """
292
+
293
+ def __init__(self, groupby_object: groupby.GroupBy) -> None:
294
+ self.groupby_object = groupby_object
295
+
296
+ def __call__(
297
+ self,
298
+ n: PositionalIndexer | tuple,
299
+ dropna: Literal["any", "all", None] = None,
300
+ ) -> DataFrame | Series:
301
+ return self.groupby_object._nth(n, dropna)
302
+
303
+ def __getitem__(self, n: PositionalIndexer | tuple) -> DataFrame | Series:
304
+ return self.groupby_object._nth(n)
venv/lib/python3.10/site-packages/pandas/core/groupby/numba_.py ADDED
@@ -0,0 +1,181 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """Common utilities for Numba operations with groupby ops"""
2
+ from __future__ import annotations
3
+
4
+ import functools
5
+ import inspect
6
+ from typing import (
7
+ TYPE_CHECKING,
8
+ Any,
9
+ Callable,
10
+ )
11
+
12
+ import numpy as np
13
+
14
+ from pandas.compat._optional import import_optional_dependency
15
+
16
+ from pandas.core.util.numba_ import (
17
+ NumbaUtilError,
18
+ jit_user_function,
19
+ )
20
+
21
+ if TYPE_CHECKING:
22
+ from pandas._typing import Scalar
23
+
24
+
25
+ def validate_udf(func: Callable) -> None:
26
+ """
27
+ Validate user defined function for ops when using Numba with groupby ops.
28
+
29
+ The first signature arguments should include:
30
+
31
+ def f(values, index, ...):
32
+ ...
33
+
34
+ Parameters
35
+ ----------
36
+ func : function, default False
37
+ user defined function
38
+
39
+ Returns
40
+ -------
41
+ None
42
+
43
+ Raises
44
+ ------
45
+ NumbaUtilError
46
+ """
47
+ if not callable(func):
48
+ raise NotImplementedError(
49
+ "Numba engine can only be used with a single function."
50
+ )
51
+ udf_signature = list(inspect.signature(func).parameters.keys())
52
+ expected_args = ["values", "index"]
53
+ min_number_args = len(expected_args)
54
+ if (
55
+ len(udf_signature) < min_number_args
56
+ or udf_signature[:min_number_args] != expected_args
57
+ ):
58
+ raise NumbaUtilError(
59
+ f"The first {min_number_args} arguments to {func.__name__} must be "
60
+ f"{expected_args}"
61
+ )
62
+
63
+
64
+ @functools.cache
65
+ def generate_numba_agg_func(
66
+ func: Callable[..., Scalar],
67
+ nopython: bool,
68
+ nogil: bool,
69
+ parallel: bool,
70
+ ) -> Callable[[np.ndarray, np.ndarray, np.ndarray, np.ndarray, int, Any], np.ndarray]:
71
+ """
72
+ Generate a numba jitted agg function specified by values from engine_kwargs.
73
+
74
+ 1. jit the user's function
75
+ 2. Return a groupby agg function with the jitted function inline
76
+
77
+ Configurations specified in engine_kwargs apply to both the user's
78
+ function _AND_ the groupby evaluation loop.
79
+
80
+ Parameters
81
+ ----------
82
+ func : function
83
+ function to be applied to each group and will be JITed
84
+ nopython : bool
85
+ nopython to be passed into numba.jit
86
+ nogil : bool
87
+ nogil to be passed into numba.jit
88
+ parallel : bool
89
+ parallel to be passed into numba.jit
90
+
91
+ Returns
92
+ -------
93
+ Numba function
94
+ """
95
+ numba_func = jit_user_function(func)
96
+ if TYPE_CHECKING:
97
+ import numba
98
+ else:
99
+ numba = import_optional_dependency("numba")
100
+
101
+ @numba.jit(nopython=nopython, nogil=nogil, parallel=parallel)
102
+ def group_agg(
103
+ values: np.ndarray,
104
+ index: np.ndarray,
105
+ begin: np.ndarray,
106
+ end: np.ndarray,
107
+ num_columns: int,
108
+ *args: Any,
109
+ ) -> np.ndarray:
110
+ assert len(begin) == len(end)
111
+ num_groups = len(begin)
112
+
113
+ result = np.empty((num_groups, num_columns))
114
+ for i in numba.prange(num_groups):
115
+ group_index = index[begin[i] : end[i]]
116
+ for j in numba.prange(num_columns):
117
+ group = values[begin[i] : end[i], j]
118
+ result[i, j] = numba_func(group, group_index, *args)
119
+ return result
120
+
121
+ return group_agg
122
+
123
+
124
+ @functools.cache
125
+ def generate_numba_transform_func(
126
+ func: Callable[..., np.ndarray],
127
+ nopython: bool,
128
+ nogil: bool,
129
+ parallel: bool,
130
+ ) -> Callable[[np.ndarray, np.ndarray, np.ndarray, np.ndarray, int, Any], np.ndarray]:
131
+ """
132
+ Generate a numba jitted transform function specified by values from engine_kwargs.
133
+
134
+ 1. jit the user's function
135
+ 2. Return a groupby transform function with the jitted function inline
136
+
137
+ Configurations specified in engine_kwargs apply to both the user's
138
+ function _AND_ the groupby evaluation loop.
139
+
140
+ Parameters
141
+ ----------
142
+ func : function
143
+ function to be applied to each window and will be JITed
144
+ nopython : bool
145
+ nopython to be passed into numba.jit
146
+ nogil : bool
147
+ nogil to be passed into numba.jit
148
+ parallel : bool
149
+ parallel to be passed into numba.jit
150
+
151
+ Returns
152
+ -------
153
+ Numba function
154
+ """
155
+ numba_func = jit_user_function(func)
156
+ if TYPE_CHECKING:
157
+ import numba
158
+ else:
159
+ numba = import_optional_dependency("numba")
160
+
161
+ @numba.jit(nopython=nopython, nogil=nogil, parallel=parallel)
162
+ def group_transform(
163
+ values: np.ndarray,
164
+ index: np.ndarray,
165
+ begin: np.ndarray,
166
+ end: np.ndarray,
167
+ num_columns: int,
168
+ *args: Any,
169
+ ) -> np.ndarray:
170
+ assert len(begin) == len(end)
171
+ num_groups = len(begin)
172
+
173
+ result = np.empty((len(values), num_columns))
174
+ for i in numba.prange(num_groups):
175
+ group_index = index[begin[i] : end[i]]
176
+ for j in numba.prange(num_columns):
177
+ group = values[begin[i] : end[i], j]
178
+ result[begin[i] : end[i], j] = numba_func(group, group_index, *args)
179
+ return result
180
+
181
+ return group_transform
venv/lib/python3.10/site-packages/pandas/core/groupby/ops.py ADDED
@@ -0,0 +1,1208 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ Provide classes to perform the groupby aggregate operations.
3
+
4
+ These are not exposed to the user and provide implementations of the grouping
5
+ operations, primarily in cython. These classes (BaseGrouper and BinGrouper)
6
+ are contained *in* the SeriesGroupBy and DataFrameGroupBy objects.
7
+ """
8
+ from __future__ import annotations
9
+
10
+ import collections
11
+ import functools
12
+ from typing import (
13
+ TYPE_CHECKING,
14
+ Callable,
15
+ Generic,
16
+ final,
17
+ )
18
+
19
+ import numpy as np
20
+
21
+ from pandas._libs import (
22
+ NaT,
23
+ lib,
24
+ )
25
+ import pandas._libs.groupby as libgroupby
26
+ from pandas._typing import (
27
+ ArrayLike,
28
+ AxisInt,
29
+ NDFrameT,
30
+ Shape,
31
+ npt,
32
+ )
33
+ from pandas.errors import AbstractMethodError
34
+ from pandas.util._decorators import cache_readonly
35
+
36
+ from pandas.core.dtypes.cast import (
37
+ maybe_cast_pointwise_result,
38
+ maybe_downcast_to_dtype,
39
+ )
40
+ from pandas.core.dtypes.common import (
41
+ ensure_float64,
42
+ ensure_int64,
43
+ ensure_platform_int,
44
+ ensure_uint64,
45
+ is_1d_only_ea_dtype,
46
+ )
47
+ from pandas.core.dtypes.missing import (
48
+ isna,
49
+ maybe_fill,
50
+ )
51
+
52
+ from pandas.core.frame import DataFrame
53
+ from pandas.core.groupby import grouper
54
+ from pandas.core.indexes.api import (
55
+ CategoricalIndex,
56
+ Index,
57
+ MultiIndex,
58
+ ensure_index,
59
+ )
60
+ from pandas.core.series import Series
61
+ from pandas.core.sorting import (
62
+ compress_group_index,
63
+ decons_obs_group_ids,
64
+ get_flattened_list,
65
+ get_group_index,
66
+ get_group_index_sorter,
67
+ get_indexer_dict,
68
+ )
69
+
70
+ if TYPE_CHECKING:
71
+ from collections.abc import (
72
+ Hashable,
73
+ Iterator,
74
+ Sequence,
75
+ )
76
+
77
+ from pandas.core.generic import NDFrame
78
+
79
+
80
+ def check_result_array(obj, dtype) -> None:
81
+ # Our operation is supposed to be an aggregation/reduction. If
82
+ # it returns an ndarray, this likely means an invalid operation has
83
+ # been passed. See test_apply_without_aggregation, test_agg_must_agg
84
+ if isinstance(obj, np.ndarray):
85
+ if dtype != object:
86
+ # If it is object dtype, the function can be a reduction/aggregation
87
+ # and still return an ndarray e.g. test_agg_over_numpy_arrays
88
+ raise ValueError("Must produce aggregated value")
89
+
90
+
91
+ def extract_result(res):
92
+ """
93
+ Extract the result object, it might be a 0-dim ndarray
94
+ or a len-1 0-dim, or a scalar
95
+ """
96
+ if hasattr(res, "_values"):
97
+ # Preserve EA
98
+ res = res._values
99
+ if res.ndim == 1 and len(res) == 1:
100
+ # see test_agg_lambda_with_timezone, test_resampler_grouper.py::test_apply
101
+ res = res[0]
102
+ return res
103
+
104
+
105
+ class WrappedCythonOp:
106
+ """
107
+ Dispatch logic for functions defined in _libs.groupby
108
+
109
+ Parameters
110
+ ----------
111
+ kind: str
112
+ Whether the operation is an aggregate or transform.
113
+ how: str
114
+ Operation name, e.g. "mean".
115
+ has_dropped_na: bool
116
+ True precisely when dropna=True and the grouper contains a null value.
117
+ """
118
+
119
+ # Functions for which we do _not_ attempt to cast the cython result
120
+ # back to the original dtype.
121
+ cast_blocklist = frozenset(
122
+ ["any", "all", "rank", "count", "size", "idxmin", "idxmax"]
123
+ )
124
+
125
+ def __init__(self, kind: str, how: str, has_dropped_na: bool) -> None:
126
+ self.kind = kind
127
+ self.how = how
128
+ self.has_dropped_na = has_dropped_na
129
+
130
+ _CYTHON_FUNCTIONS: dict[str, dict] = {
131
+ "aggregate": {
132
+ "any": functools.partial(libgroupby.group_any_all, val_test="any"),
133
+ "all": functools.partial(libgroupby.group_any_all, val_test="all"),
134
+ "sum": "group_sum",
135
+ "prod": "group_prod",
136
+ "idxmin": functools.partial(libgroupby.group_idxmin_idxmax, name="idxmin"),
137
+ "idxmax": functools.partial(libgroupby.group_idxmin_idxmax, name="idxmax"),
138
+ "min": "group_min",
139
+ "max": "group_max",
140
+ "mean": "group_mean",
141
+ "median": "group_median_float64",
142
+ "var": "group_var",
143
+ "std": functools.partial(libgroupby.group_var, name="std"),
144
+ "sem": functools.partial(libgroupby.group_var, name="sem"),
145
+ "skew": "group_skew",
146
+ "first": "group_nth",
147
+ "last": "group_last",
148
+ "ohlc": "group_ohlc",
149
+ },
150
+ "transform": {
151
+ "cumprod": "group_cumprod",
152
+ "cumsum": "group_cumsum",
153
+ "cummin": "group_cummin",
154
+ "cummax": "group_cummax",
155
+ "rank": "group_rank",
156
+ },
157
+ }
158
+
159
+ _cython_arity = {"ohlc": 4} # OHLC
160
+
161
+ @classmethod
162
+ def get_kind_from_how(cls, how: str) -> str:
163
+ if how in cls._CYTHON_FUNCTIONS["aggregate"]:
164
+ return "aggregate"
165
+ return "transform"
166
+
167
+ # Note: we make this a classmethod and pass kind+how so that caching
168
+ # works at the class level and not the instance level
169
+ @classmethod
170
+ @functools.cache
171
+ def _get_cython_function(
172
+ cls, kind: str, how: str, dtype: np.dtype, is_numeric: bool
173
+ ):
174
+ dtype_str = dtype.name
175
+ ftype = cls._CYTHON_FUNCTIONS[kind][how]
176
+
177
+ # see if there is a fused-type version of function
178
+ # only valid for numeric
179
+ if callable(ftype):
180
+ f = ftype
181
+ else:
182
+ f = getattr(libgroupby, ftype)
183
+ if is_numeric:
184
+ return f
185
+ elif dtype == np.dtype(object):
186
+ if how in ["median", "cumprod"]:
187
+ # no fused types -> no __signatures__
188
+ raise NotImplementedError(
189
+ f"function is not implemented for this dtype: "
190
+ f"[how->{how},dtype->{dtype_str}]"
191
+ )
192
+ elif how in ["std", "sem", "idxmin", "idxmax"]:
193
+ # We have a partial object that does not have __signatures__
194
+ return f
195
+ elif how == "skew":
196
+ # _get_cython_vals will convert to float64
197
+ pass
198
+ elif "object" not in f.__signatures__:
199
+ # raise NotImplementedError here rather than TypeError later
200
+ raise NotImplementedError(
201
+ f"function is not implemented for this dtype: "
202
+ f"[how->{how},dtype->{dtype_str}]"
203
+ )
204
+ return f
205
+ else:
206
+ raise NotImplementedError(
207
+ "This should not be reached. Please report a bug at "
208
+ "github.com/pandas-dev/pandas/",
209
+ dtype,
210
+ )
211
+
212
+ def _get_cython_vals(self, values: np.ndarray) -> np.ndarray:
213
+ """
214
+ Cast numeric dtypes to float64 for functions that only support that.
215
+
216
+ Parameters
217
+ ----------
218
+ values : np.ndarray
219
+
220
+ Returns
221
+ -------
222
+ values : np.ndarray
223
+ """
224
+ how = self.how
225
+
226
+ if how in ["median", "std", "sem", "skew"]:
227
+ # median only has a float64 implementation
228
+ # We should only get here with is_numeric, as non-numeric cases
229
+ # should raise in _get_cython_function
230
+ values = ensure_float64(values)
231
+
232
+ elif values.dtype.kind in "iu":
233
+ if how in ["var", "mean"] or (
234
+ self.kind == "transform" and self.has_dropped_na
235
+ ):
236
+ # has_dropped_na check need for test_null_group_str_transformer
237
+ # result may still include NaN, so we have to cast
238
+ values = ensure_float64(values)
239
+
240
+ elif how in ["sum", "ohlc", "prod", "cumsum", "cumprod"]:
241
+ # Avoid overflow during group op
242
+ if values.dtype.kind == "i":
243
+ values = ensure_int64(values)
244
+ else:
245
+ values = ensure_uint64(values)
246
+
247
+ return values
248
+
249
+ def _get_output_shape(self, ngroups: int, values: np.ndarray) -> Shape:
250
+ how = self.how
251
+ kind = self.kind
252
+
253
+ arity = self._cython_arity.get(how, 1)
254
+
255
+ out_shape: Shape
256
+ if how == "ohlc":
257
+ out_shape = (ngroups, arity)
258
+ elif arity > 1:
259
+ raise NotImplementedError(
260
+ "arity of more than 1 is not supported for the 'how' argument"
261
+ )
262
+ elif kind == "transform":
263
+ out_shape = values.shape
264
+ else:
265
+ out_shape = (ngroups,) + values.shape[1:]
266
+ return out_shape
267
+
268
+ def _get_out_dtype(self, dtype: np.dtype) -> np.dtype:
269
+ how = self.how
270
+
271
+ if how == "rank":
272
+ out_dtype = "float64"
273
+ elif how in ["idxmin", "idxmax"]:
274
+ # The Cython implementation only produces the row number; we'll take
275
+ # from the index using this in post processing
276
+ out_dtype = "intp"
277
+ else:
278
+ if dtype.kind in "iufcb":
279
+ out_dtype = f"{dtype.kind}{dtype.itemsize}"
280
+ else:
281
+ out_dtype = "object"
282
+ return np.dtype(out_dtype)
283
+
284
+ def _get_result_dtype(self, dtype: np.dtype) -> np.dtype:
285
+ """
286
+ Get the desired dtype of a result based on the
287
+ input dtype and how it was computed.
288
+
289
+ Parameters
290
+ ----------
291
+ dtype : np.dtype
292
+
293
+ Returns
294
+ -------
295
+ np.dtype
296
+ The desired dtype of the result.
297
+ """
298
+ how = self.how
299
+
300
+ if how in ["sum", "cumsum", "sum", "prod", "cumprod"]:
301
+ if dtype == np.dtype(bool):
302
+ return np.dtype(np.int64)
303
+ elif how in ["mean", "median", "var", "std", "sem"]:
304
+ if dtype.kind in "fc":
305
+ return dtype
306
+ elif dtype.kind in "iub":
307
+ return np.dtype(np.float64)
308
+ return dtype
309
+
310
+ @final
311
+ def _cython_op_ndim_compat(
312
+ self,
313
+ values: np.ndarray,
314
+ *,
315
+ min_count: int,
316
+ ngroups: int,
317
+ comp_ids: np.ndarray,
318
+ mask: npt.NDArray[np.bool_] | None = None,
319
+ result_mask: npt.NDArray[np.bool_] | None = None,
320
+ **kwargs,
321
+ ) -> np.ndarray:
322
+ if values.ndim == 1:
323
+ # expand to 2d, dispatch, then squeeze if appropriate
324
+ values2d = values[None, :]
325
+ if mask is not None:
326
+ mask = mask[None, :]
327
+ if result_mask is not None:
328
+ result_mask = result_mask[None, :]
329
+ res = self._call_cython_op(
330
+ values2d,
331
+ min_count=min_count,
332
+ ngroups=ngroups,
333
+ comp_ids=comp_ids,
334
+ mask=mask,
335
+ result_mask=result_mask,
336
+ **kwargs,
337
+ )
338
+ if res.shape[0] == 1:
339
+ return res[0]
340
+
341
+ # otherwise we have OHLC
342
+ return res.T
343
+
344
+ return self._call_cython_op(
345
+ values,
346
+ min_count=min_count,
347
+ ngroups=ngroups,
348
+ comp_ids=comp_ids,
349
+ mask=mask,
350
+ result_mask=result_mask,
351
+ **kwargs,
352
+ )
353
+
354
+ @final
355
+ def _call_cython_op(
356
+ self,
357
+ values: np.ndarray, # np.ndarray[ndim=2]
358
+ *,
359
+ min_count: int,
360
+ ngroups: int,
361
+ comp_ids: np.ndarray,
362
+ mask: npt.NDArray[np.bool_] | None,
363
+ result_mask: npt.NDArray[np.bool_] | None,
364
+ **kwargs,
365
+ ) -> np.ndarray: # np.ndarray[ndim=2]
366
+ orig_values = values
367
+
368
+ dtype = values.dtype
369
+ is_numeric = dtype.kind in "iufcb"
370
+
371
+ is_datetimelike = dtype.kind in "mM"
372
+
373
+ if is_datetimelike:
374
+ values = values.view("int64")
375
+ is_numeric = True
376
+ elif dtype.kind == "b":
377
+ values = values.view("uint8")
378
+ if values.dtype == "float16":
379
+ values = values.astype(np.float32)
380
+
381
+ if self.how in ["any", "all"]:
382
+ if mask is None:
383
+ mask = isna(values)
384
+ if dtype == object:
385
+ if kwargs["skipna"]:
386
+ # GH#37501: don't raise on pd.NA when skipna=True
387
+ if mask.any():
388
+ # mask on original values computed separately
389
+ values = values.copy()
390
+ values[mask] = True
391
+ values = values.astype(bool, copy=False).view(np.int8)
392
+ is_numeric = True
393
+
394
+ values = values.T
395
+ if mask is not None:
396
+ mask = mask.T
397
+ if result_mask is not None:
398
+ result_mask = result_mask.T
399
+
400
+ out_shape = self._get_output_shape(ngroups, values)
401
+ func = self._get_cython_function(self.kind, self.how, values.dtype, is_numeric)
402
+ values = self._get_cython_vals(values)
403
+ out_dtype = self._get_out_dtype(values.dtype)
404
+
405
+ result = maybe_fill(np.empty(out_shape, dtype=out_dtype))
406
+ if self.kind == "aggregate":
407
+ counts = np.zeros(ngroups, dtype=np.int64)
408
+ if self.how in [
409
+ "idxmin",
410
+ "idxmax",
411
+ "min",
412
+ "max",
413
+ "mean",
414
+ "last",
415
+ "first",
416
+ "sum",
417
+ ]:
418
+ func(
419
+ out=result,
420
+ counts=counts,
421
+ values=values,
422
+ labels=comp_ids,
423
+ min_count=min_count,
424
+ mask=mask,
425
+ result_mask=result_mask,
426
+ is_datetimelike=is_datetimelike,
427
+ **kwargs,
428
+ )
429
+ elif self.how in ["sem", "std", "var", "ohlc", "prod", "median"]:
430
+ if self.how in ["std", "sem"]:
431
+ kwargs["is_datetimelike"] = is_datetimelike
432
+ func(
433
+ result,
434
+ counts,
435
+ values,
436
+ comp_ids,
437
+ min_count=min_count,
438
+ mask=mask,
439
+ result_mask=result_mask,
440
+ **kwargs,
441
+ )
442
+ elif self.how in ["any", "all"]:
443
+ func(
444
+ out=result,
445
+ values=values,
446
+ labels=comp_ids,
447
+ mask=mask,
448
+ result_mask=result_mask,
449
+ **kwargs,
450
+ )
451
+ result = result.astype(bool, copy=False)
452
+ elif self.how in ["skew"]:
453
+ func(
454
+ out=result,
455
+ counts=counts,
456
+ values=values,
457
+ labels=comp_ids,
458
+ mask=mask,
459
+ result_mask=result_mask,
460
+ **kwargs,
461
+ )
462
+ if dtype == object:
463
+ result = result.astype(object)
464
+
465
+ else:
466
+ raise NotImplementedError(f"{self.how} is not implemented")
467
+ else:
468
+ # TODO: min_count
469
+ if self.how != "rank":
470
+ # TODO: should rank take result_mask?
471
+ kwargs["result_mask"] = result_mask
472
+ func(
473
+ out=result,
474
+ values=values,
475
+ labels=comp_ids,
476
+ ngroups=ngroups,
477
+ is_datetimelike=is_datetimelike,
478
+ mask=mask,
479
+ **kwargs,
480
+ )
481
+
482
+ if self.kind == "aggregate" and self.how not in ["idxmin", "idxmax"]:
483
+ # i.e. counts is defined. Locations where count<min_count
484
+ # need to have the result set to np.nan, which may require casting,
485
+ # see GH#40767. For idxmin/idxmax is handled specially via post-processing
486
+ if result.dtype.kind in "iu" and not is_datetimelike:
487
+ # if the op keeps the int dtypes, we have to use 0
488
+ cutoff = max(0 if self.how in ["sum", "prod"] else 1, min_count)
489
+ empty_groups = counts < cutoff
490
+ if empty_groups.any():
491
+ if result_mask is not None:
492
+ assert result_mask[empty_groups].all()
493
+ else:
494
+ # Note: this conversion could be lossy, see GH#40767
495
+ result = result.astype("float64")
496
+ result[empty_groups] = np.nan
497
+
498
+ result = result.T
499
+
500
+ if self.how not in self.cast_blocklist:
501
+ # e.g. if we are int64 and need to restore to datetime64/timedelta64
502
+ # "rank" is the only member of cast_blocklist we get here
503
+ # Casting only needed for float16, bool, datetimelike,
504
+ # and self.how in ["sum", "prod", "ohlc", "cumprod"]
505
+ res_dtype = self._get_result_dtype(orig_values.dtype)
506
+ op_result = maybe_downcast_to_dtype(result, res_dtype)
507
+ else:
508
+ op_result = result
509
+
510
+ return op_result
511
+
512
+ @final
513
+ def _validate_axis(self, axis: AxisInt, values: ArrayLike) -> None:
514
+ if values.ndim > 2:
515
+ raise NotImplementedError("number of dimensions is currently limited to 2")
516
+ if values.ndim == 2:
517
+ assert axis == 1, axis
518
+ elif not is_1d_only_ea_dtype(values.dtype):
519
+ # Note: it is *not* the case that axis is always 0 for 1-dim values,
520
+ # as we can have 1D ExtensionArrays that we need to treat as 2D
521
+ assert axis == 0
522
+
523
+ @final
524
+ def cython_operation(
525
+ self,
526
+ *,
527
+ values: ArrayLike,
528
+ axis: AxisInt,
529
+ min_count: int = -1,
530
+ comp_ids: np.ndarray,
531
+ ngroups: int,
532
+ **kwargs,
533
+ ) -> ArrayLike:
534
+ """
535
+ Call our cython function, with appropriate pre- and post- processing.
536
+ """
537
+ self._validate_axis(axis, values)
538
+
539
+ if not isinstance(values, np.ndarray):
540
+ # i.e. ExtensionArray
541
+ return values._groupby_op(
542
+ how=self.how,
543
+ has_dropped_na=self.has_dropped_na,
544
+ min_count=min_count,
545
+ ngroups=ngroups,
546
+ ids=comp_ids,
547
+ **kwargs,
548
+ )
549
+
550
+ return self._cython_op_ndim_compat(
551
+ values,
552
+ min_count=min_count,
553
+ ngroups=ngroups,
554
+ comp_ids=comp_ids,
555
+ mask=None,
556
+ **kwargs,
557
+ )
558
+
559
+
560
+ class BaseGrouper:
561
+ """
562
+ This is an internal Grouper class, which actually holds
563
+ the generated groups
564
+
565
+ Parameters
566
+ ----------
567
+ axis : Index
568
+ groupings : Sequence[Grouping]
569
+ all the grouping instances to handle in this grouper
570
+ for example for grouper list to groupby, need to pass the list
571
+ sort : bool, default True
572
+ whether this grouper will give sorted result or not
573
+
574
+ """
575
+
576
+ axis: Index
577
+
578
+ def __init__(
579
+ self,
580
+ axis: Index,
581
+ groupings: Sequence[grouper.Grouping],
582
+ sort: bool = True,
583
+ dropna: bool = True,
584
+ ) -> None:
585
+ assert isinstance(axis, Index), axis
586
+
587
+ self.axis = axis
588
+ self._groupings: list[grouper.Grouping] = list(groupings)
589
+ self._sort = sort
590
+ self.dropna = dropna
591
+
592
+ @property
593
+ def groupings(self) -> list[grouper.Grouping]:
594
+ return self._groupings
595
+
596
+ @property
597
+ def shape(self) -> Shape:
598
+ return tuple(ping.ngroups for ping in self.groupings)
599
+
600
+ def __iter__(self) -> Iterator[Hashable]:
601
+ return iter(self.indices)
602
+
603
+ @property
604
+ def nkeys(self) -> int:
605
+ return len(self.groupings)
606
+
607
+ def get_iterator(
608
+ self, data: NDFrameT, axis: AxisInt = 0
609
+ ) -> Iterator[tuple[Hashable, NDFrameT]]:
610
+ """
611
+ Groupby iterator
612
+
613
+ Returns
614
+ -------
615
+ Generator yielding sequence of (name, subsetted object)
616
+ for each group
617
+ """
618
+ splitter = self._get_splitter(data, axis=axis)
619
+ keys = self.group_keys_seq
620
+ yield from zip(keys, splitter)
621
+
622
+ @final
623
+ def _get_splitter(self, data: NDFrame, axis: AxisInt = 0) -> DataSplitter:
624
+ """
625
+ Returns
626
+ -------
627
+ Generator yielding subsetted objects
628
+ """
629
+ ids, _, ngroups = self.group_info
630
+ return _get_splitter(
631
+ data,
632
+ ids,
633
+ ngroups,
634
+ sorted_ids=self._sorted_ids,
635
+ sort_idx=self._sort_idx,
636
+ axis=axis,
637
+ )
638
+
639
+ @final
640
+ @cache_readonly
641
+ def group_keys_seq(self):
642
+ if len(self.groupings) == 1:
643
+ return self.levels[0]
644
+ else:
645
+ ids, _, ngroups = self.group_info
646
+
647
+ # provide "flattened" iterator for multi-group setting
648
+ return get_flattened_list(ids, ngroups, self.levels, self.codes)
649
+
650
+ @cache_readonly
651
+ def indices(self) -> dict[Hashable, npt.NDArray[np.intp]]:
652
+ """dict {group name -> group indices}"""
653
+ if len(self.groupings) == 1 and isinstance(self.result_index, CategoricalIndex):
654
+ # This shows unused categories in indices GH#38642
655
+ return self.groupings[0].indices
656
+ codes_list = [ping.codes for ping in self.groupings]
657
+ keys = [ping._group_index for ping in self.groupings]
658
+ return get_indexer_dict(codes_list, keys)
659
+
660
+ @final
661
+ def result_ilocs(self) -> npt.NDArray[np.intp]:
662
+ """
663
+ Get the original integer locations of result_index in the input.
664
+ """
665
+ # Original indices are where group_index would go via sorting.
666
+ # But when dropna is true, we need to remove null values while accounting for
667
+ # any gaps that then occur because of them.
668
+ group_index = get_group_index(
669
+ self.codes, self.shape, sort=self._sort, xnull=True
670
+ )
671
+ group_index, _ = compress_group_index(group_index, sort=self._sort)
672
+
673
+ if self.has_dropped_na:
674
+ mask = np.where(group_index >= 0)
675
+ # Count how many gaps are caused by previous null values for each position
676
+ null_gaps = np.cumsum(group_index == -1)[mask]
677
+ group_index = group_index[mask]
678
+
679
+ result = get_group_index_sorter(group_index, self.ngroups)
680
+
681
+ if self.has_dropped_na:
682
+ # Shift by the number of prior null gaps
683
+ result += np.take(null_gaps, result)
684
+
685
+ return result
686
+
687
+ @final
688
+ @property
689
+ def codes(self) -> list[npt.NDArray[np.signedinteger]]:
690
+ return [ping.codes for ping in self.groupings]
691
+
692
+ @property
693
+ def levels(self) -> list[Index]:
694
+ return [ping._group_index for ping in self.groupings]
695
+
696
+ @property
697
+ def names(self) -> list[Hashable]:
698
+ return [ping.name for ping in self.groupings]
699
+
700
+ @final
701
+ def size(self) -> Series:
702
+ """
703
+ Compute group sizes.
704
+ """
705
+ ids, _, ngroups = self.group_info
706
+ out: np.ndarray | list
707
+ if ngroups:
708
+ out = np.bincount(ids[ids != -1], minlength=ngroups)
709
+ else:
710
+ out = []
711
+ return Series(out, index=self.result_index, dtype="int64", copy=False)
712
+
713
+ @cache_readonly
714
+ def groups(self) -> dict[Hashable, np.ndarray]:
715
+ """dict {group name -> group labels}"""
716
+ if len(self.groupings) == 1:
717
+ return self.groupings[0].groups
718
+ else:
719
+ to_groupby = []
720
+ for ping in self.groupings:
721
+ gv = ping.grouping_vector
722
+ if not isinstance(gv, BaseGrouper):
723
+ to_groupby.append(gv)
724
+ else:
725
+ to_groupby.append(gv.groupings[0].grouping_vector)
726
+ index = MultiIndex.from_arrays(to_groupby)
727
+ return self.axis.groupby(index)
728
+
729
+ @final
730
+ @cache_readonly
731
+ def is_monotonic(self) -> bool:
732
+ # return if my group orderings are monotonic
733
+ return Index(self.group_info[0]).is_monotonic_increasing
734
+
735
+ @final
736
+ @cache_readonly
737
+ def has_dropped_na(self) -> bool:
738
+ """
739
+ Whether grouper has null value(s) that are dropped.
740
+ """
741
+ return bool((self.group_info[0] < 0).any())
742
+
743
+ @cache_readonly
744
+ def group_info(self) -> tuple[npt.NDArray[np.intp], npt.NDArray[np.intp], int]:
745
+ comp_ids, obs_group_ids = self._get_compressed_codes()
746
+
747
+ ngroups = len(obs_group_ids)
748
+ comp_ids = ensure_platform_int(comp_ids)
749
+
750
+ return comp_ids, obs_group_ids, ngroups
751
+
752
+ @cache_readonly
753
+ def codes_info(self) -> npt.NDArray[np.intp]:
754
+ # return the codes of items in original grouped axis
755
+ ids, _, _ = self.group_info
756
+ return ids
757
+
758
+ @final
759
+ def _get_compressed_codes(
760
+ self,
761
+ ) -> tuple[npt.NDArray[np.signedinteger], npt.NDArray[np.intp]]:
762
+ # The first returned ndarray may have any signed integer dtype
763
+ if len(self.groupings) > 1:
764
+ group_index = get_group_index(self.codes, self.shape, sort=True, xnull=True)
765
+ return compress_group_index(group_index, sort=self._sort)
766
+ # FIXME: compress_group_index's second return value is int64, not intp
767
+
768
+ ping = self.groupings[0]
769
+ return ping.codes, np.arange(len(ping._group_index), dtype=np.intp)
770
+
771
+ @final
772
+ @cache_readonly
773
+ def ngroups(self) -> int:
774
+ return len(self.result_index)
775
+
776
+ @property
777
+ def reconstructed_codes(self) -> list[npt.NDArray[np.intp]]:
778
+ codes = self.codes
779
+ ids, obs_ids, _ = self.group_info
780
+ return decons_obs_group_ids(ids, obs_ids, self.shape, codes, xnull=True)
781
+
782
+ @cache_readonly
783
+ def result_index(self) -> Index:
784
+ if len(self.groupings) == 1:
785
+ return self.groupings[0]._result_index.rename(self.names[0])
786
+
787
+ codes = self.reconstructed_codes
788
+ levels = [ping._result_index for ping in self.groupings]
789
+ return MultiIndex(
790
+ levels=levels, codes=codes, verify_integrity=False, names=self.names
791
+ )
792
+
793
+ @final
794
+ def get_group_levels(self) -> list[ArrayLike]:
795
+ # Note: only called from _insert_inaxis_grouper, which
796
+ # is only called for BaseGrouper, never for BinGrouper
797
+ if len(self.groupings) == 1:
798
+ return [self.groupings[0]._group_arraylike]
799
+
800
+ name_list = []
801
+ for ping, codes in zip(self.groupings, self.reconstructed_codes):
802
+ codes = ensure_platform_int(codes)
803
+ levels = ping._group_arraylike.take(codes)
804
+
805
+ name_list.append(levels)
806
+
807
+ return name_list
808
+
809
+ # ------------------------------------------------------------
810
+ # Aggregation functions
811
+
812
+ @final
813
+ def _cython_operation(
814
+ self,
815
+ kind: str,
816
+ values,
817
+ how: str,
818
+ axis: AxisInt,
819
+ min_count: int = -1,
820
+ **kwargs,
821
+ ) -> ArrayLike:
822
+ """
823
+ Returns the values of a cython operation.
824
+ """
825
+ assert kind in ["transform", "aggregate"]
826
+
827
+ cy_op = WrappedCythonOp(kind=kind, how=how, has_dropped_na=self.has_dropped_na)
828
+
829
+ ids, _, _ = self.group_info
830
+ ngroups = self.ngroups
831
+ return cy_op.cython_operation(
832
+ values=values,
833
+ axis=axis,
834
+ min_count=min_count,
835
+ comp_ids=ids,
836
+ ngroups=ngroups,
837
+ **kwargs,
838
+ )
839
+
840
+ @final
841
+ def agg_series(
842
+ self, obj: Series, func: Callable, preserve_dtype: bool = False
843
+ ) -> ArrayLike:
844
+ """
845
+ Parameters
846
+ ----------
847
+ obj : Series
848
+ func : function taking a Series and returning a scalar-like
849
+ preserve_dtype : bool
850
+ Whether the aggregation is known to be dtype-preserving.
851
+
852
+ Returns
853
+ -------
854
+ np.ndarray or ExtensionArray
855
+ """
856
+
857
+ if not isinstance(obj._values, np.ndarray):
858
+ # we can preserve a little bit more aggressively with EA dtype
859
+ # because maybe_cast_pointwise_result will do a try/except
860
+ # with _from_sequence. NB we are assuming here that _from_sequence
861
+ # is sufficiently strict that it casts appropriately.
862
+ preserve_dtype = True
863
+
864
+ result = self._aggregate_series_pure_python(obj, func)
865
+
866
+ npvalues = lib.maybe_convert_objects(result, try_float=False)
867
+ if preserve_dtype:
868
+ out = maybe_cast_pointwise_result(npvalues, obj.dtype, numeric_only=True)
869
+ else:
870
+ out = npvalues
871
+ return out
872
+
873
+ @final
874
+ def _aggregate_series_pure_python(
875
+ self, obj: Series, func: Callable
876
+ ) -> npt.NDArray[np.object_]:
877
+ _, _, ngroups = self.group_info
878
+
879
+ result = np.empty(ngroups, dtype="O")
880
+ initialized = False
881
+
882
+ splitter = self._get_splitter(obj, axis=0)
883
+
884
+ for i, group in enumerate(splitter):
885
+ res = func(group)
886
+ res = extract_result(res)
887
+
888
+ if not initialized:
889
+ # We only do this validation on the first iteration
890
+ check_result_array(res, group.dtype)
891
+ initialized = True
892
+
893
+ result[i] = res
894
+
895
+ return result
896
+
897
+ @final
898
+ def apply_groupwise(
899
+ self, f: Callable, data: DataFrame | Series, axis: AxisInt = 0
900
+ ) -> tuple[list, bool]:
901
+ mutated = False
902
+ splitter = self._get_splitter(data, axis=axis)
903
+ group_keys = self.group_keys_seq
904
+ result_values = []
905
+
906
+ # This calls DataSplitter.__iter__
907
+ zipped = zip(group_keys, splitter)
908
+
909
+ for key, group in zipped:
910
+ # Pinning name is needed for
911
+ # test_group_apply_once_per_group,
912
+ # test_inconsistent_return_type, test_set_group_name,
913
+ # test_group_name_available_in_inference_pass,
914
+ # test_groupby_multi_timezone
915
+ object.__setattr__(group, "name", key)
916
+
917
+ # group might be modified
918
+ group_axes = group.axes
919
+ res = f(group)
920
+ if not mutated and not _is_indexed_like(res, group_axes, axis):
921
+ mutated = True
922
+ result_values.append(res)
923
+ # getattr pattern for __name__ is needed for functools.partial objects
924
+ if len(group_keys) == 0 and getattr(f, "__name__", None) in [
925
+ "skew",
926
+ "sum",
927
+ "prod",
928
+ ]:
929
+ # If group_keys is empty, then no function calls have been made,
930
+ # so we will not have raised even if this is an invalid dtype.
931
+ # So do one dummy call here to raise appropriate TypeError.
932
+ f(data.iloc[:0])
933
+
934
+ return result_values, mutated
935
+
936
+ # ------------------------------------------------------------
937
+ # Methods for sorting subsets of our GroupBy's object
938
+
939
+ @final
940
+ @cache_readonly
941
+ def _sort_idx(self) -> npt.NDArray[np.intp]:
942
+ # Counting sort indexer
943
+ ids, _, ngroups = self.group_info
944
+ return get_group_index_sorter(ids, ngroups)
945
+
946
+ @final
947
+ @cache_readonly
948
+ def _sorted_ids(self) -> npt.NDArray[np.intp]:
949
+ ids, _, _ = self.group_info
950
+ return ids.take(self._sort_idx)
951
+
952
+
953
+ class BinGrouper(BaseGrouper):
954
+ """
955
+ This is an internal Grouper class
956
+
957
+ Parameters
958
+ ----------
959
+ bins : the split index of binlabels to group the item of axis
960
+ binlabels : the label list
961
+ indexer : np.ndarray[np.intp], optional
962
+ the indexer created by Grouper
963
+ some groupers (TimeGrouper) will sort its axis and its
964
+ group_info is also sorted, so need the indexer to reorder
965
+
966
+ Examples
967
+ --------
968
+ bins: [2, 4, 6, 8, 10]
969
+ binlabels: DatetimeIndex(['2005-01-01', '2005-01-03',
970
+ '2005-01-05', '2005-01-07', '2005-01-09'],
971
+ dtype='datetime64[ns]', freq='2D')
972
+
973
+ the group_info, which contains the label of each item in grouped
974
+ axis, the index of label in label list, group number, is
975
+
976
+ (array([0, 0, 1, 1, 2, 2, 3, 3, 4, 4]), array([0, 1, 2, 3, 4]), 5)
977
+
978
+ means that, the grouped axis has 10 items, can be grouped into 5
979
+ labels, the first and second items belong to the first label, the
980
+ third and forth items belong to the second label, and so on
981
+
982
+ """
983
+
984
+ bins: npt.NDArray[np.int64]
985
+ binlabels: Index
986
+
987
+ def __init__(
988
+ self,
989
+ bins,
990
+ binlabels,
991
+ indexer=None,
992
+ ) -> None:
993
+ self.bins = ensure_int64(bins)
994
+ self.binlabels = ensure_index(binlabels)
995
+ self.indexer = indexer
996
+
997
+ # These lengths must match, otherwise we could call agg_series
998
+ # with empty self.bins, which would raise later.
999
+ assert len(self.binlabels) == len(self.bins)
1000
+
1001
+ @cache_readonly
1002
+ def groups(self):
1003
+ """dict {group name -> group labels}"""
1004
+ # this is mainly for compat
1005
+ # GH 3881
1006
+ result = {
1007
+ key: value
1008
+ for key, value in zip(self.binlabels, self.bins)
1009
+ if key is not NaT
1010
+ }
1011
+ return result
1012
+
1013
+ @property
1014
+ def nkeys(self) -> int:
1015
+ # still matches len(self.groupings), but we can hard-code
1016
+ return 1
1017
+
1018
+ @cache_readonly
1019
+ def codes_info(self) -> npt.NDArray[np.intp]:
1020
+ # return the codes of items in original grouped axis
1021
+ ids, _, _ = self.group_info
1022
+ if self.indexer is not None:
1023
+ sorter = np.lexsort((ids, self.indexer))
1024
+ ids = ids[sorter]
1025
+ return ids
1026
+
1027
+ def get_iterator(self, data: NDFrame, axis: AxisInt = 0):
1028
+ """
1029
+ Groupby iterator
1030
+
1031
+ Returns
1032
+ -------
1033
+ Generator yielding sequence of (name, subsetted object)
1034
+ for each group
1035
+ """
1036
+ if axis == 0:
1037
+ slicer = lambda start, edge: data.iloc[start:edge]
1038
+ else:
1039
+ slicer = lambda start, edge: data.iloc[:, start:edge]
1040
+
1041
+ length = len(data.axes[axis])
1042
+
1043
+ start = 0
1044
+ for edge, label in zip(self.bins, self.binlabels):
1045
+ if label is not NaT:
1046
+ yield label, slicer(start, edge)
1047
+ start = edge
1048
+
1049
+ if start < length:
1050
+ yield self.binlabels[-1], slicer(start, None)
1051
+
1052
+ @cache_readonly
1053
+ def indices(self):
1054
+ indices = collections.defaultdict(list)
1055
+
1056
+ i = 0
1057
+ for label, bin in zip(self.binlabels, self.bins):
1058
+ if i < bin:
1059
+ if label is not NaT:
1060
+ indices[label] = list(range(i, bin))
1061
+ i = bin
1062
+ return indices
1063
+
1064
+ @cache_readonly
1065
+ def group_info(self) -> tuple[npt.NDArray[np.intp], npt.NDArray[np.intp], int]:
1066
+ ngroups = self.ngroups
1067
+ obs_group_ids = np.arange(ngroups, dtype=np.intp)
1068
+ rep = np.diff(np.r_[0, self.bins])
1069
+
1070
+ rep = ensure_platform_int(rep)
1071
+ if ngroups == len(self.bins):
1072
+ comp_ids = np.repeat(np.arange(ngroups), rep)
1073
+ else:
1074
+ comp_ids = np.repeat(np.r_[-1, np.arange(ngroups)], rep)
1075
+
1076
+ return (
1077
+ ensure_platform_int(comp_ids),
1078
+ obs_group_ids,
1079
+ ngroups,
1080
+ )
1081
+
1082
+ @cache_readonly
1083
+ def reconstructed_codes(self) -> list[np.ndarray]:
1084
+ # get unique result indices, and prepend 0 as groupby starts from the first
1085
+ return [np.r_[0, np.flatnonzero(self.bins[1:] != self.bins[:-1]) + 1]]
1086
+
1087
+ @cache_readonly
1088
+ def result_index(self) -> Index:
1089
+ if len(self.binlabels) != 0 and isna(self.binlabels[0]):
1090
+ return self.binlabels[1:]
1091
+
1092
+ return self.binlabels
1093
+
1094
+ @property
1095
+ def levels(self) -> list[Index]:
1096
+ return [self.binlabels]
1097
+
1098
+ @property
1099
+ def names(self) -> list[Hashable]:
1100
+ return [self.binlabels.name]
1101
+
1102
+ @property
1103
+ def groupings(self) -> list[grouper.Grouping]:
1104
+ lev = self.binlabels
1105
+ codes = self.group_info[0]
1106
+ labels = lev.take(codes)
1107
+ ping = grouper.Grouping(
1108
+ labels, labels, in_axis=False, level=None, uniques=lev._values
1109
+ )
1110
+ return [ping]
1111
+
1112
+
1113
+ def _is_indexed_like(obj, axes, axis: AxisInt) -> bool:
1114
+ if isinstance(obj, Series):
1115
+ if len(axes) > 1:
1116
+ return False
1117
+ return obj.axes[axis].equals(axes[axis])
1118
+ elif isinstance(obj, DataFrame):
1119
+ return obj.axes[axis].equals(axes[axis])
1120
+
1121
+ return False
1122
+
1123
+
1124
+ # ----------------------------------------------------------------------
1125
+ # Splitting / application
1126
+
1127
+
1128
+ class DataSplitter(Generic[NDFrameT]):
1129
+ def __init__(
1130
+ self,
1131
+ data: NDFrameT,
1132
+ labels: npt.NDArray[np.intp],
1133
+ ngroups: int,
1134
+ *,
1135
+ sort_idx: npt.NDArray[np.intp],
1136
+ sorted_ids: npt.NDArray[np.intp],
1137
+ axis: AxisInt = 0,
1138
+ ) -> None:
1139
+ self.data = data
1140
+ self.labels = ensure_platform_int(labels) # _should_ already be np.intp
1141
+ self.ngroups = ngroups
1142
+
1143
+ self._slabels = sorted_ids
1144
+ self._sort_idx = sort_idx
1145
+
1146
+ self.axis = axis
1147
+ assert isinstance(axis, int), axis
1148
+
1149
+ def __iter__(self) -> Iterator:
1150
+ sdata = self._sorted_data
1151
+
1152
+ if self.ngroups == 0:
1153
+ # we are inside a generator, rather than raise StopIteration
1154
+ # we merely return signal the end
1155
+ return
1156
+
1157
+ starts, ends = lib.generate_slices(self._slabels, self.ngroups)
1158
+
1159
+ for start, end in zip(starts, ends):
1160
+ yield self._chop(sdata, slice(start, end))
1161
+
1162
+ @cache_readonly
1163
+ def _sorted_data(self) -> NDFrameT:
1164
+ return self.data.take(self._sort_idx, axis=self.axis)
1165
+
1166
+ def _chop(self, sdata, slice_obj: slice) -> NDFrame:
1167
+ raise AbstractMethodError(self)
1168
+
1169
+
1170
+ class SeriesSplitter(DataSplitter):
1171
+ def _chop(self, sdata: Series, slice_obj: slice) -> Series:
1172
+ # fastpath equivalent to `sdata.iloc[slice_obj]`
1173
+ mgr = sdata._mgr.get_slice(slice_obj)
1174
+ ser = sdata._constructor_from_mgr(mgr, axes=mgr.axes)
1175
+ ser._name = sdata.name
1176
+ return ser.__finalize__(sdata, method="groupby")
1177
+
1178
+
1179
+ class FrameSplitter(DataSplitter):
1180
+ def _chop(self, sdata: DataFrame, slice_obj: slice) -> DataFrame:
1181
+ # Fastpath equivalent to:
1182
+ # if self.axis == 0:
1183
+ # return sdata.iloc[slice_obj]
1184
+ # else:
1185
+ # return sdata.iloc[:, slice_obj]
1186
+ mgr = sdata._mgr.get_slice(slice_obj, axis=1 - self.axis)
1187
+ df = sdata._constructor_from_mgr(mgr, axes=mgr.axes)
1188
+ return df.__finalize__(sdata, method="groupby")
1189
+
1190
+
1191
+ def _get_splitter(
1192
+ data: NDFrame,
1193
+ labels: npt.NDArray[np.intp],
1194
+ ngroups: int,
1195
+ *,
1196
+ sort_idx: npt.NDArray[np.intp],
1197
+ sorted_ids: npt.NDArray[np.intp],
1198
+ axis: AxisInt = 0,
1199
+ ) -> DataSplitter:
1200
+ if isinstance(data, Series):
1201
+ klass: type[DataSplitter] = SeriesSplitter
1202
+ else:
1203
+ # i.e. DataFrame
1204
+ klass = FrameSplitter
1205
+
1206
+ return klass(
1207
+ data, labels, ngroups, sort_idx=sort_idx, sorted_ids=sorted_ids, axis=axis
1208
+ )
venv/lib/python3.10/site-packages/pandas/core/internals/__pycache__/array_manager.cpython-310.pyc ADDED
Binary file (39.1 kB). View file
 
venv/lib/python3.10/site-packages/pandas/core/internals/__pycache__/blocks.cpython-310.pyc ADDED
Binary file (59.5 kB). View file