applied-ai-018 commited on
Commit
1a3e5ab
·
verified ·
1 Parent(s): 3db66cc

Add files using upload-large-folder tool

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. ckpts/llama-3b/global_step100/bf16_zero_pp_rank_180_mp_rank_00_optim_states.pt +3 -0
  2. ckpts/llama-3b/global_step100/bf16_zero_pp_rank_46_mp_rank_00_optim_states.pt +3 -0
  3. ckpts/llama-3b/global_step100/bf16_zero_pp_rank_90_mp_rank_03_optim_states.pt +3 -0
  4. venv/lib/python3.10/site-packages/pandas/tests/groupby/__init__.py +25 -0
  5. venv/lib/python3.10/site-packages/pandas/tests/groupby/aggregate/__init__.py +0 -0
  6. venv/lib/python3.10/site-packages/pandas/tests/groupby/aggregate/test_aggregate.py +1672 -0
  7. venv/lib/python3.10/site-packages/pandas/tests/groupby/aggregate/test_cython.py +435 -0
  8. venv/lib/python3.10/site-packages/pandas/tests/groupby/aggregate/test_numba.py +392 -0
  9. venv/lib/python3.10/site-packages/pandas/tests/groupby/aggregate/test_other.py +675 -0
  10. venv/lib/python3.10/site-packages/pandas/tests/groupby/conftest.py +208 -0
  11. venv/lib/python3.10/site-packages/pandas/tests/groupby/test_all_methods.py +83 -0
  12. venv/lib/python3.10/site-packages/pandas/tests/groupby/test_api.py +265 -0
  13. venv/lib/python3.10/site-packages/pandas/tests/groupby/test_apply.py +1606 -0
  14. venv/lib/python3.10/site-packages/pandas/tests/groupby/test_apply_mutate.py +163 -0
  15. venv/lib/python3.10/site-packages/pandas/tests/groupby/test_bin_groupby.py +65 -0
  16. venv/lib/python3.10/site-packages/pandas/tests/groupby/test_categorical.py +2169 -0
  17. venv/lib/python3.10/site-packages/pandas/tests/groupby/test_counting.py +394 -0
  18. venv/lib/python3.10/site-packages/pandas/tests/groupby/test_cumulative.py +319 -0
  19. venv/lib/python3.10/site-packages/pandas/tests/groupby/test_filters.py +636 -0
  20. venv/lib/python3.10/site-packages/pandas/tests/groupby/test_groupby.py +0 -0
  21. venv/lib/python3.10/site-packages/pandas/tests/groupby/test_groupby_dropna.py +696 -0
  22. venv/lib/python3.10/site-packages/pandas/tests/groupby/test_groupby_subclass.py +135 -0
  23. venv/lib/python3.10/site-packages/pandas/tests/groupby/test_grouping.py +1236 -0
  24. venv/lib/python3.10/site-packages/pandas/tests/groupby/test_index_as_string.py +85 -0
  25. venv/lib/python3.10/site-packages/pandas/tests/groupby/test_indexing.py +333 -0
  26. venv/lib/python3.10/site-packages/pandas/tests/groupby/test_libgroupby.py +331 -0
  27. venv/lib/python3.10/site-packages/pandas/tests/groupby/test_missing.py +163 -0
  28. venv/lib/python3.10/site-packages/pandas/tests/groupby/test_numba.py +80 -0
  29. venv/lib/python3.10/site-packages/pandas/tests/groupby/test_numeric_only.py +521 -0
  30. venv/lib/python3.10/site-packages/pandas/tests/groupby/test_pipe.py +80 -0
  31. venv/lib/python3.10/site-packages/pandas/tests/groupby/test_raises.py +716 -0
  32. venv/lib/python3.10/site-packages/pandas/tests/groupby/test_reductions.py +1176 -0
  33. venv/lib/python3.10/site-packages/pandas/tests/groupby/test_timegrouper.py +963 -0
  34. venv/lib/python3.10/site-packages/pandas/tests/io/parser/__pycache__/__init__.cpython-310.pyc +0 -0
  35. venv/lib/python3.10/site-packages/pandas/tests/io/parser/__pycache__/conftest.cpython-310.pyc +0 -0
  36. venv/lib/python3.10/site-packages/pandas/tests/io/parser/__pycache__/test_c_parser_only.cpython-310.pyc +0 -0
  37. venv/lib/python3.10/site-packages/pandas/tests/io/parser/__pycache__/test_comment.cpython-310.pyc +0 -0
  38. venv/lib/python3.10/site-packages/pandas/tests/io/parser/__pycache__/test_compression.cpython-310.pyc +0 -0
  39. venv/lib/python3.10/site-packages/pandas/tests/io/parser/__pycache__/test_concatenate_chunks.cpython-310.pyc +0 -0
  40. venv/lib/python3.10/site-packages/pandas/tests/io/parser/__pycache__/test_converters.cpython-310.pyc +0 -0
  41. venv/lib/python3.10/site-packages/pandas/tests/io/parser/__pycache__/test_encoding.cpython-310.pyc +0 -0
  42. venv/lib/python3.10/site-packages/pandas/tests/io/parser/__pycache__/test_header.cpython-310.pyc +0 -0
  43. venv/lib/python3.10/site-packages/pandas/tests/io/parser/__pycache__/test_mangle_dupes.cpython-310.pyc +0 -0
  44. venv/lib/python3.10/site-packages/pandas/tests/io/parser/__pycache__/test_multi_thread.cpython-310.pyc +0 -0
  45. venv/lib/python3.10/site-packages/pandas/tests/io/parser/__pycache__/test_na_values.cpython-310.pyc +0 -0
  46. venv/lib/python3.10/site-packages/pandas/tests/io/parser/__pycache__/test_network.cpython-310.pyc +0 -0
  47. venv/lib/python3.10/site-packages/pandas/tests/io/parser/__pycache__/test_parse_dates.cpython-310.pyc +0 -0
  48. venv/lib/python3.10/site-packages/pandas/tests/io/parser/__pycache__/test_python_parser_only.cpython-310.pyc +0 -0
  49. venv/lib/python3.10/site-packages/pandas/tests/io/parser/__pycache__/test_quoting.cpython-310.pyc +0 -0
  50. venv/lib/python3.10/site-packages/pandas/tests/io/parser/__pycache__/test_read_fwf.cpython-310.pyc +0 -0
ckpts/llama-3b/global_step100/bf16_zero_pp_rank_180_mp_rank_00_optim_states.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:d8473e8705345f07b09663c4bde90d4cb68b759804dd8fc1d59efa6833debf41
3
+ size 41830148
ckpts/llama-3b/global_step100/bf16_zero_pp_rank_46_mp_rank_00_optim_states.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:c60044f4ceb01efeb14c6d5362fb835c27a4a366de279a19d862a37e50a8b2d8
3
+ size 41830138
ckpts/llama-3b/global_step100/bf16_zero_pp_rank_90_mp_rank_03_optim_states.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:0734336ccb5cc29b5d47ee8b114c84cea1a7803948296b50b0411052ef6abf74
3
+ size 41830330
venv/lib/python3.10/site-packages/pandas/tests/groupby/__init__.py ADDED
@@ -0,0 +1,25 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ def get_groupby_method_args(name, obj):
2
+ """
3
+ Get required arguments for a groupby method.
4
+
5
+ When parametrizing a test over groupby methods (e.g. "sum", "mean", "fillna"),
6
+ it is often the case that arguments are required for certain methods.
7
+
8
+ Parameters
9
+ ----------
10
+ name: str
11
+ Name of the method.
12
+ obj: Series or DataFrame
13
+ pandas object that is being grouped.
14
+
15
+ Returns
16
+ -------
17
+ A tuple of required arguments for the method.
18
+ """
19
+ if name in ("nth", "fillna", "take"):
20
+ return (0,)
21
+ if name == "quantile":
22
+ return (0.5,)
23
+ if name == "corrwith":
24
+ return (obj,)
25
+ return ()
venv/lib/python3.10/site-packages/pandas/tests/groupby/aggregate/__init__.py ADDED
File without changes
venv/lib/python3.10/site-packages/pandas/tests/groupby/aggregate/test_aggregate.py ADDED
@@ -0,0 +1,1672 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ test .agg behavior / note that .apply is tested generally in test_groupby.py
3
+ """
4
+ import datetime
5
+ import functools
6
+ from functools import partial
7
+ import re
8
+
9
+ import numpy as np
10
+ import pytest
11
+
12
+ from pandas.errors import SpecificationError
13
+
14
+ from pandas.core.dtypes.common import is_integer_dtype
15
+
16
+ import pandas as pd
17
+ from pandas import (
18
+ DataFrame,
19
+ Index,
20
+ MultiIndex,
21
+ Series,
22
+ concat,
23
+ to_datetime,
24
+ )
25
+ import pandas._testing as tm
26
+ from pandas.core.groupby.grouper import Grouping
27
+
28
+
29
+ def test_groupby_agg_no_extra_calls():
30
+ # GH#31760
31
+ df = DataFrame({"key": ["a", "b", "c", "c"], "value": [1, 2, 3, 4]})
32
+ gb = df.groupby("key")["value"]
33
+
34
+ def dummy_func(x):
35
+ assert len(x) != 0
36
+ return x.sum()
37
+
38
+ gb.agg(dummy_func)
39
+
40
+
41
+ def test_agg_regression1(tsframe):
42
+ grouped = tsframe.groupby([lambda x: x.year, lambda x: x.month])
43
+ result = grouped.agg("mean")
44
+ expected = grouped.mean()
45
+ tm.assert_frame_equal(result, expected)
46
+
47
+
48
+ def test_agg_must_agg(df):
49
+ grouped = df.groupby("A")["C"]
50
+
51
+ msg = "Must produce aggregated value"
52
+ with pytest.raises(Exception, match=msg):
53
+ grouped.agg(lambda x: x.describe())
54
+ with pytest.raises(Exception, match=msg):
55
+ grouped.agg(lambda x: x.index[:2])
56
+
57
+
58
+ def test_agg_ser_multi_key(df):
59
+ f = lambda x: x.sum()
60
+ results = df.C.groupby([df.A, df.B]).aggregate(f)
61
+ expected = df.groupby(["A", "B"]).sum()["C"]
62
+ tm.assert_series_equal(results, expected)
63
+
64
+
65
+ def test_groupby_aggregation_mixed_dtype():
66
+ # GH 6212
67
+ expected = DataFrame(
68
+ {
69
+ "v1": [5, 5, 7, np.nan, 3, 3, 4, 1],
70
+ "v2": [55, 55, 77, np.nan, 33, 33, 44, 11],
71
+ },
72
+ index=MultiIndex.from_tuples(
73
+ [
74
+ (1, 95),
75
+ (1, 99),
76
+ (2, 95),
77
+ (2, 99),
78
+ ("big", "damp"),
79
+ ("blue", "dry"),
80
+ ("red", "red"),
81
+ ("red", "wet"),
82
+ ],
83
+ names=["by1", "by2"],
84
+ ),
85
+ )
86
+
87
+ df = DataFrame(
88
+ {
89
+ "v1": [1, 3, 5, 7, 8, 3, 5, np.nan, 4, 5, 7, 9],
90
+ "v2": [11, 33, 55, 77, 88, 33, 55, np.nan, 44, 55, 77, 99],
91
+ "by1": ["red", "blue", 1, 2, np.nan, "big", 1, 2, "red", 1, np.nan, 12],
92
+ "by2": [
93
+ "wet",
94
+ "dry",
95
+ 99,
96
+ 95,
97
+ np.nan,
98
+ "damp",
99
+ 95,
100
+ 99,
101
+ "red",
102
+ 99,
103
+ np.nan,
104
+ np.nan,
105
+ ],
106
+ }
107
+ )
108
+
109
+ g = df.groupby(["by1", "by2"])
110
+ result = g[["v1", "v2"]].mean()
111
+ tm.assert_frame_equal(result, expected)
112
+
113
+
114
+ def test_groupby_aggregation_multi_level_column():
115
+ # GH 29772
116
+ lst = [
117
+ [True, True, True, False],
118
+ [True, False, np.nan, False],
119
+ [True, True, np.nan, False],
120
+ [True, True, np.nan, False],
121
+ ]
122
+ df = DataFrame(
123
+ data=lst,
124
+ columns=MultiIndex.from_tuples([("A", 0), ("A", 1), ("B", 0), ("B", 1)]),
125
+ )
126
+
127
+ msg = "DataFrame.groupby with axis=1 is deprecated"
128
+ with tm.assert_produces_warning(FutureWarning, match=msg):
129
+ gb = df.groupby(level=1, axis=1)
130
+ result = gb.sum(numeric_only=False)
131
+ expected = DataFrame({0: [2.0, True, True, True], 1: [1, 0, 1, 1]})
132
+
133
+ tm.assert_frame_equal(result, expected)
134
+
135
+
136
+ def test_agg_apply_corner(ts, tsframe):
137
+ # nothing to group, all NA
138
+ grouped = ts.groupby(ts * np.nan, group_keys=False)
139
+ assert ts.dtype == np.float64
140
+
141
+ # groupby float64 values results in a float64 Index
142
+ exp = Series([], dtype=np.float64, index=Index([], dtype=np.float64))
143
+ tm.assert_series_equal(grouped.sum(), exp)
144
+ tm.assert_series_equal(grouped.agg("sum"), exp)
145
+ tm.assert_series_equal(grouped.apply("sum"), exp, check_index_type=False)
146
+
147
+ # DataFrame
148
+ grouped = tsframe.groupby(tsframe["A"] * np.nan, group_keys=False)
149
+ exp_df = DataFrame(
150
+ columns=tsframe.columns,
151
+ dtype=float,
152
+ index=Index([], name="A", dtype=np.float64),
153
+ )
154
+ tm.assert_frame_equal(grouped.sum(), exp_df)
155
+ tm.assert_frame_equal(grouped.agg("sum"), exp_df)
156
+
157
+ msg = "The behavior of DataFrame.sum with axis=None is deprecated"
158
+ with tm.assert_produces_warning(FutureWarning, match=msg, check_stacklevel=False):
159
+ res = grouped.apply(np.sum)
160
+ tm.assert_frame_equal(res, exp_df)
161
+
162
+
163
+ def test_agg_grouping_is_list_tuple(ts):
164
+ df = DataFrame(
165
+ np.random.default_rng(2).standard_normal((30, 4)),
166
+ columns=Index(list("ABCD"), dtype=object),
167
+ index=pd.date_range("2000-01-01", periods=30, freq="B"),
168
+ )
169
+
170
+ grouped = df.groupby(lambda x: x.year)
171
+ grouper = grouped._grouper.groupings[0].grouping_vector
172
+ grouped._grouper.groupings[0] = Grouping(ts.index, list(grouper))
173
+
174
+ result = grouped.agg("mean")
175
+ expected = grouped.mean()
176
+ tm.assert_frame_equal(result, expected)
177
+
178
+ grouped._grouper.groupings[0] = Grouping(ts.index, tuple(grouper))
179
+
180
+ result = grouped.agg("mean")
181
+ expected = grouped.mean()
182
+ tm.assert_frame_equal(result, expected)
183
+
184
+
185
+ def test_agg_python_multiindex(multiindex_dataframe_random_data):
186
+ grouped = multiindex_dataframe_random_data.groupby(["A", "B"])
187
+
188
+ result = grouped.agg("mean")
189
+ expected = grouped.mean()
190
+ tm.assert_frame_equal(result, expected)
191
+
192
+
193
+ @pytest.mark.parametrize(
194
+ "groupbyfunc", [lambda x: x.weekday(), [lambda x: x.month, lambda x: x.weekday()]]
195
+ )
196
+ def test_aggregate_str_func(tsframe, groupbyfunc):
197
+ grouped = tsframe.groupby(groupbyfunc)
198
+
199
+ # single series
200
+ result = grouped["A"].agg("std")
201
+ expected = grouped["A"].std()
202
+ tm.assert_series_equal(result, expected)
203
+
204
+ # group frame by function name
205
+ result = grouped.aggregate("var")
206
+ expected = grouped.var()
207
+ tm.assert_frame_equal(result, expected)
208
+
209
+ # group frame by function dict
210
+ result = grouped.agg({"A": "var", "B": "std", "C": "mean", "D": "sem"})
211
+ expected = DataFrame(
212
+ {
213
+ "A": grouped["A"].var(),
214
+ "B": grouped["B"].std(),
215
+ "C": grouped["C"].mean(),
216
+ "D": grouped["D"].sem(),
217
+ }
218
+ )
219
+ tm.assert_frame_equal(result, expected)
220
+
221
+
222
+ def test_std_masked_dtype(any_numeric_ea_dtype):
223
+ # GH#35516
224
+ df = DataFrame(
225
+ {
226
+ "a": [2, 1, 1, 1, 2, 2, 1],
227
+ "b": Series([pd.NA, 1, 2, 1, 1, 1, 2], dtype="Float64"),
228
+ }
229
+ )
230
+ result = df.groupby("a").std()
231
+ expected = DataFrame(
232
+ {"b": [0.57735, 0]}, index=Index([1, 2], name="a"), dtype="Float64"
233
+ )
234
+ tm.assert_frame_equal(result, expected)
235
+
236
+
237
+ def test_agg_str_with_kwarg_axis_1_raises(df, reduction_func):
238
+ gb = df.groupby(level=0)
239
+ warn_msg = f"DataFrameGroupBy.{reduction_func} with axis=1 is deprecated"
240
+ if reduction_func in ("idxmax", "idxmin"):
241
+ error = TypeError
242
+ msg = "'[<>]' not supported between instances of 'float' and 'str'"
243
+ warn = FutureWarning
244
+ else:
245
+ error = ValueError
246
+ msg = f"Operation {reduction_func} does not support axis=1"
247
+ warn = None
248
+ with pytest.raises(error, match=msg):
249
+ with tm.assert_produces_warning(warn, match=warn_msg):
250
+ gb.agg(reduction_func, axis=1)
251
+
252
+
253
+ @pytest.mark.parametrize(
254
+ "func, expected, dtype, result_dtype_dict",
255
+ [
256
+ ("sum", [5, 7, 9], "int64", {}),
257
+ ("std", [4.5**0.5] * 3, int, {"i": float, "j": float, "k": float}),
258
+ ("var", [4.5] * 3, int, {"i": float, "j": float, "k": float}),
259
+ ("sum", [5, 7, 9], "Int64", {"j": "int64"}),
260
+ ("std", [4.5**0.5] * 3, "Int64", {"i": float, "j": float, "k": float}),
261
+ ("var", [4.5] * 3, "Int64", {"i": "float64", "j": "float64", "k": "float64"}),
262
+ ],
263
+ )
264
+ def test_multiindex_groupby_mixed_cols_axis1(func, expected, dtype, result_dtype_dict):
265
+ # GH#43209
266
+ df = DataFrame(
267
+ [[1, 2, 3, 4, 5, 6]] * 3,
268
+ columns=MultiIndex.from_product([["a", "b"], ["i", "j", "k"]]),
269
+ ).astype({("a", "j"): dtype, ("b", "j"): dtype})
270
+
271
+ msg = "DataFrame.groupby with axis=1 is deprecated"
272
+ with tm.assert_produces_warning(FutureWarning, match=msg):
273
+ gb = df.groupby(level=1, axis=1)
274
+ result = gb.agg(func)
275
+ expected = DataFrame([expected] * 3, columns=["i", "j", "k"]).astype(
276
+ result_dtype_dict
277
+ )
278
+
279
+ tm.assert_frame_equal(result, expected)
280
+
281
+
282
+ @pytest.mark.parametrize(
283
+ "func, expected_data, result_dtype_dict",
284
+ [
285
+ ("sum", [[2, 4], [10, 12], [18, 20]], {10: "int64", 20: "int64"}),
286
+ # std should ideally return Int64 / Float64 #43330
287
+ ("std", [[2**0.5] * 2] * 3, "float64"),
288
+ ("var", [[2] * 2] * 3, {10: "float64", 20: "float64"}),
289
+ ],
290
+ )
291
+ def test_groupby_mixed_cols_axis1(func, expected_data, result_dtype_dict):
292
+ # GH#43209
293
+ df = DataFrame(
294
+ np.arange(12).reshape(3, 4),
295
+ index=Index([0, 1, 0], name="y"),
296
+ columns=Index([10, 20, 10, 20], name="x"),
297
+ dtype="int64",
298
+ ).astype({10: "Int64"})
299
+
300
+ msg = "DataFrame.groupby with axis=1 is deprecated"
301
+ with tm.assert_produces_warning(FutureWarning, match=msg):
302
+ gb = df.groupby("x", axis=1)
303
+ result = gb.agg(func)
304
+ expected = DataFrame(
305
+ data=expected_data,
306
+ index=Index([0, 1, 0], name="y"),
307
+ columns=Index([10, 20], name="x"),
308
+ ).astype(result_dtype_dict)
309
+ tm.assert_frame_equal(result, expected)
310
+
311
+
312
+ def test_aggregate_item_by_item(df):
313
+ grouped = df.groupby("A")
314
+
315
+ aggfun_0 = lambda ser: ser.size
316
+ result = grouped.agg(aggfun_0)
317
+ foosum = (df.A == "foo").sum()
318
+ barsum = (df.A == "bar").sum()
319
+ K = len(result.columns)
320
+
321
+ # GH5782
322
+ exp = Series(np.array([foosum] * K), index=list("BCD"), name="foo")
323
+ tm.assert_series_equal(result.xs("foo"), exp)
324
+
325
+ exp = Series(np.array([barsum] * K), index=list("BCD"), name="bar")
326
+ tm.assert_almost_equal(result.xs("bar"), exp)
327
+
328
+ def aggfun_1(ser):
329
+ return ser.size
330
+
331
+ result = DataFrame().groupby(df.A).agg(aggfun_1)
332
+ assert isinstance(result, DataFrame)
333
+ assert len(result) == 0
334
+
335
+
336
+ def test_wrap_agg_out(three_group):
337
+ grouped = three_group.groupby(["A", "B"])
338
+
339
+ def func(ser):
340
+ if ser.dtype == object:
341
+ raise TypeError("Test error message")
342
+ return ser.sum()
343
+
344
+ with pytest.raises(TypeError, match="Test error message"):
345
+ grouped.aggregate(func)
346
+ result = grouped[["D", "E", "F"]].aggregate(func)
347
+ exp_grouped = three_group.loc[:, ["A", "B", "D", "E", "F"]]
348
+ expected = exp_grouped.groupby(["A", "B"]).aggregate(func)
349
+ tm.assert_frame_equal(result, expected)
350
+
351
+
352
+ def test_agg_multiple_functions_maintain_order(df):
353
+ # GH #610
354
+ funcs = [("mean", np.mean), ("max", np.max), ("min", np.min)]
355
+ msg = "is currently using SeriesGroupBy.mean"
356
+ with tm.assert_produces_warning(FutureWarning, match=msg):
357
+ result = df.groupby("A")["C"].agg(funcs)
358
+ exp_cols = Index(["mean", "max", "min"])
359
+
360
+ tm.assert_index_equal(result.columns, exp_cols)
361
+
362
+
363
+ def test_series_index_name(df):
364
+ grouped = df.loc[:, ["C"]].groupby(df["A"])
365
+ result = grouped.agg(lambda x: x.mean())
366
+ assert result.index.name == "A"
367
+
368
+
369
+ def test_agg_multiple_functions_same_name():
370
+ # GH 30880
371
+ df = DataFrame(
372
+ np.random.default_rng(2).standard_normal((1000, 3)),
373
+ index=pd.date_range("1/1/2012", freq="s", periods=1000),
374
+ columns=["A", "B", "C"],
375
+ )
376
+ result = df.resample("3min").agg(
377
+ {"A": [partial(np.quantile, q=0.9999), partial(np.quantile, q=0.1111)]}
378
+ )
379
+ expected_index = pd.date_range("1/1/2012", freq="3min", periods=6)
380
+ expected_columns = MultiIndex.from_tuples([("A", "quantile"), ("A", "quantile")])
381
+ expected_values = np.array(
382
+ [df.resample("3min").A.quantile(q=q).values for q in [0.9999, 0.1111]]
383
+ ).T
384
+ expected = DataFrame(
385
+ expected_values, columns=expected_columns, index=expected_index
386
+ )
387
+ tm.assert_frame_equal(result, expected)
388
+
389
+
390
+ def test_agg_multiple_functions_same_name_with_ohlc_present():
391
+ # GH 30880
392
+ # ohlc expands dimensions, so different test to the above is required.
393
+ df = DataFrame(
394
+ np.random.default_rng(2).standard_normal((1000, 3)),
395
+ index=pd.date_range("1/1/2012", freq="s", periods=1000, name="dti"),
396
+ columns=Index(["A", "B", "C"], name="alpha"),
397
+ )
398
+ result = df.resample("3min").agg(
399
+ {"A": ["ohlc", partial(np.quantile, q=0.9999), partial(np.quantile, q=0.1111)]}
400
+ )
401
+ expected_index = pd.date_range("1/1/2012", freq="3min", periods=6, name="dti")
402
+ expected_columns = MultiIndex.from_tuples(
403
+ [
404
+ ("A", "ohlc", "open"),
405
+ ("A", "ohlc", "high"),
406
+ ("A", "ohlc", "low"),
407
+ ("A", "ohlc", "close"),
408
+ ("A", "quantile", "A"),
409
+ ("A", "quantile", "A"),
410
+ ],
411
+ names=["alpha", None, None],
412
+ )
413
+ non_ohlc_expected_values = np.array(
414
+ [df.resample("3min").A.quantile(q=q).values for q in [0.9999, 0.1111]]
415
+ ).T
416
+ expected_values = np.hstack(
417
+ [df.resample("3min").A.ohlc(), non_ohlc_expected_values]
418
+ )
419
+ expected = DataFrame(
420
+ expected_values, columns=expected_columns, index=expected_index
421
+ )
422
+ tm.assert_frame_equal(result, expected)
423
+
424
+
425
+ def test_multiple_functions_tuples_and_non_tuples(df):
426
+ # #1359
427
+ # Columns B and C would cause partial failure
428
+ df = df.drop(columns=["B", "C"])
429
+
430
+ funcs = [("foo", "mean"), "std"]
431
+ ex_funcs = [("foo", "mean"), ("std", "std")]
432
+
433
+ result = df.groupby("A")["D"].agg(funcs)
434
+ expected = df.groupby("A")["D"].agg(ex_funcs)
435
+ tm.assert_frame_equal(result, expected)
436
+
437
+ result = df.groupby("A").agg(funcs)
438
+ expected = df.groupby("A").agg(ex_funcs)
439
+ tm.assert_frame_equal(result, expected)
440
+
441
+
442
+ def test_more_flexible_frame_multi_function(df):
443
+ grouped = df.groupby("A")
444
+
445
+ exmean = grouped.agg({"C": "mean", "D": "mean"})
446
+ exstd = grouped.agg({"C": "std", "D": "std"})
447
+
448
+ expected = concat([exmean, exstd], keys=["mean", "std"], axis=1)
449
+ expected = expected.swaplevel(0, 1, axis=1).sort_index(level=0, axis=1)
450
+
451
+ d = {"C": ["mean", "std"], "D": ["mean", "std"]}
452
+ result = grouped.aggregate(d)
453
+
454
+ tm.assert_frame_equal(result, expected)
455
+
456
+ # be careful
457
+ result = grouped.aggregate({"C": "mean", "D": ["mean", "std"]})
458
+ expected = grouped.aggregate({"C": "mean", "D": ["mean", "std"]})
459
+ tm.assert_frame_equal(result, expected)
460
+
461
+ def numpymean(x):
462
+ return np.mean(x)
463
+
464
+ def numpystd(x):
465
+ return np.std(x, ddof=1)
466
+
467
+ # this uses column selection & renaming
468
+ msg = r"nested renamer is not supported"
469
+ with pytest.raises(SpecificationError, match=msg):
470
+ d = {"C": "mean", "D": {"foo": "mean", "bar": "std"}}
471
+ grouped.aggregate(d)
472
+
473
+ # But without renaming, these functions are OK
474
+ d = {"C": ["mean"], "D": [numpymean, numpystd]}
475
+ grouped.aggregate(d)
476
+
477
+
478
+ def test_multi_function_flexible_mix(df):
479
+ # GH #1268
480
+ grouped = df.groupby("A")
481
+
482
+ # Expected
483
+ d = {"C": {"foo": "mean", "bar": "std"}, "D": {"sum": "sum"}}
484
+ # this uses column selection & renaming
485
+ msg = r"nested renamer is not supported"
486
+ with pytest.raises(SpecificationError, match=msg):
487
+ grouped.aggregate(d)
488
+
489
+ # Test 1
490
+ d = {"C": {"foo": "mean", "bar": "std"}, "D": "sum"}
491
+ # this uses column selection & renaming
492
+ with pytest.raises(SpecificationError, match=msg):
493
+ grouped.aggregate(d)
494
+
495
+ # Test 2
496
+ d = {"C": {"foo": "mean", "bar": "std"}, "D": "sum"}
497
+ # this uses column selection & renaming
498
+ with pytest.raises(SpecificationError, match=msg):
499
+ grouped.aggregate(d)
500
+
501
+
502
+ def test_groupby_agg_coercing_bools():
503
+ # issue 14873
504
+ dat = DataFrame({"a": [1, 1, 2, 2], "b": [0, 1, 2, 3], "c": [None, None, 1, 1]})
505
+ gp = dat.groupby("a")
506
+
507
+ index = Index([1, 2], name="a")
508
+
509
+ result = gp["b"].aggregate(lambda x: (x != 0).all())
510
+ expected = Series([False, True], index=index, name="b")
511
+ tm.assert_series_equal(result, expected)
512
+
513
+ result = gp["c"].aggregate(lambda x: x.isnull().all())
514
+ expected = Series([True, False], index=index, name="c")
515
+ tm.assert_series_equal(result, expected)
516
+
517
+
518
+ def test_groupby_agg_dict_with_getitem():
519
+ # issue 25471
520
+ dat = DataFrame({"A": ["A", "A", "B", "B", "B"], "B": [1, 2, 1, 1, 2]})
521
+ result = dat.groupby("A")[["B"]].agg({"B": "sum"})
522
+
523
+ expected = DataFrame({"B": [3, 4]}, index=["A", "B"]).rename_axis("A", axis=0)
524
+
525
+ tm.assert_frame_equal(result, expected)
526
+
527
+
528
+ def test_groupby_agg_dict_dup_columns():
529
+ # GH#55006
530
+ df = DataFrame(
531
+ [[1, 2, 3, 4], [1, 3, 4, 5], [2, 4, 5, 6]],
532
+ columns=["a", "b", "c", "c"],
533
+ )
534
+ gb = df.groupby("a")
535
+ result = gb.agg({"b": "sum"})
536
+ expected = DataFrame({"b": [5, 4]}, index=Index([1, 2], name="a"))
537
+ tm.assert_frame_equal(result, expected)
538
+
539
+
540
+ @pytest.mark.parametrize(
541
+ "op",
542
+ [
543
+ lambda x: x.sum(),
544
+ lambda x: x.cumsum(),
545
+ lambda x: x.transform("sum"),
546
+ lambda x: x.transform("cumsum"),
547
+ lambda x: x.agg("sum"),
548
+ lambda x: x.agg("cumsum"),
549
+ ],
550
+ )
551
+ def test_bool_agg_dtype(op):
552
+ # GH 7001
553
+ # Bool sum aggregations result in int
554
+ df = DataFrame({"a": [1, 1], "b": [False, True]})
555
+ s = df.set_index("a")["b"]
556
+
557
+ result = op(df.groupby("a"))["b"].dtype
558
+ assert is_integer_dtype(result)
559
+
560
+ result = op(s.groupby("a")).dtype
561
+ assert is_integer_dtype(result)
562
+
563
+
564
+ @pytest.mark.parametrize(
565
+ "keys, agg_index",
566
+ [
567
+ (["a"], Index([1], name="a")),
568
+ (["a", "b"], MultiIndex([[1], [2]], [[0], [0]], names=["a", "b"])),
569
+ ],
570
+ )
571
+ @pytest.mark.parametrize(
572
+ "input_dtype", ["bool", "int32", "int64", "float32", "float64"]
573
+ )
574
+ @pytest.mark.parametrize(
575
+ "result_dtype", ["bool", "int32", "int64", "float32", "float64"]
576
+ )
577
+ @pytest.mark.parametrize("method", ["apply", "aggregate", "transform"])
578
+ def test_callable_result_dtype_frame(
579
+ keys, agg_index, input_dtype, result_dtype, method
580
+ ):
581
+ # GH 21240
582
+ df = DataFrame({"a": [1], "b": [2], "c": [True]})
583
+ df["c"] = df["c"].astype(input_dtype)
584
+ op = getattr(df.groupby(keys)[["c"]], method)
585
+ result = op(lambda x: x.astype(result_dtype).iloc[0])
586
+ expected_index = pd.RangeIndex(0, 1) if method == "transform" else agg_index
587
+ expected = DataFrame({"c": [df["c"].iloc[0]]}, index=expected_index).astype(
588
+ result_dtype
589
+ )
590
+ if method == "apply":
591
+ expected.columns.names = [0]
592
+ tm.assert_frame_equal(result, expected)
593
+
594
+
595
+ @pytest.mark.parametrize(
596
+ "keys, agg_index",
597
+ [
598
+ (["a"], Index([1], name="a")),
599
+ (["a", "b"], MultiIndex([[1], [2]], [[0], [0]], names=["a", "b"])),
600
+ ],
601
+ )
602
+ @pytest.mark.parametrize("input", [True, 1, 1.0])
603
+ @pytest.mark.parametrize("dtype", [bool, int, float])
604
+ @pytest.mark.parametrize("method", ["apply", "aggregate", "transform"])
605
+ def test_callable_result_dtype_series(keys, agg_index, input, dtype, method):
606
+ # GH 21240
607
+ df = DataFrame({"a": [1], "b": [2], "c": [input]})
608
+ op = getattr(df.groupby(keys)["c"], method)
609
+ result = op(lambda x: x.astype(dtype).iloc[0])
610
+ expected_index = pd.RangeIndex(0, 1) if method == "transform" else agg_index
611
+ expected = Series([df["c"].iloc[0]], index=expected_index, name="c").astype(dtype)
612
+ tm.assert_series_equal(result, expected)
613
+
614
+
615
+ def test_order_aggregate_multiple_funcs():
616
+ # GH 25692
617
+ df = DataFrame({"A": [1, 1, 2, 2], "B": [1, 2, 3, 4]})
618
+
619
+ res = df.groupby("A").agg(["sum", "max", "mean", "ohlc", "min"])
620
+ result = res.columns.levels[1]
621
+
622
+ expected = Index(["sum", "max", "mean", "ohlc", "min"])
623
+
624
+ tm.assert_index_equal(result, expected)
625
+
626
+
627
+ def test_ohlc_ea_dtypes(any_numeric_ea_dtype):
628
+ # GH#37493
629
+ df = DataFrame(
630
+ {"a": [1, 1, 2, 3, 4, 4], "b": [22, 11, pd.NA, 10, 20, pd.NA]},
631
+ dtype=any_numeric_ea_dtype,
632
+ )
633
+ gb = df.groupby("a")
634
+ result = gb.ohlc()
635
+ expected = DataFrame(
636
+ [[22, 22, 11, 11], [pd.NA] * 4, [10] * 4, [20] * 4],
637
+ columns=MultiIndex.from_product([["b"], ["open", "high", "low", "close"]]),
638
+ index=Index([1, 2, 3, 4], dtype=any_numeric_ea_dtype, name="a"),
639
+ dtype=any_numeric_ea_dtype,
640
+ )
641
+ tm.assert_frame_equal(result, expected)
642
+
643
+ gb2 = df.groupby("a", as_index=False)
644
+ result2 = gb2.ohlc()
645
+ expected2 = expected.reset_index()
646
+ tm.assert_frame_equal(result2, expected2)
647
+
648
+
649
+ @pytest.mark.parametrize("dtype", [np.int64, np.uint64])
650
+ @pytest.mark.parametrize("how", ["first", "last", "min", "max", "mean", "median"])
651
+ def test_uint64_type_handling(dtype, how):
652
+ # GH 26310
653
+ df = DataFrame({"x": 6903052872240755750, "y": [1, 2]})
654
+ expected = df.groupby("y").agg({"x": how})
655
+ df.x = df.x.astype(dtype)
656
+ result = df.groupby("y").agg({"x": how})
657
+ if how not in ("mean", "median"):
658
+ # mean and median always result in floats
659
+ result.x = result.x.astype(np.int64)
660
+ tm.assert_frame_equal(result, expected, check_exact=True)
661
+
662
+
663
+ def test_func_duplicates_raises():
664
+ # GH28426
665
+ msg = "Function names"
666
+ df = DataFrame({"A": [0, 0, 1, 1], "B": [1, 2, 3, 4]})
667
+ with pytest.raises(SpecificationError, match=msg):
668
+ df.groupby("A").agg(["min", "min"])
669
+
670
+
671
+ @pytest.mark.parametrize(
672
+ "index",
673
+ [
674
+ pd.CategoricalIndex(list("abc")),
675
+ pd.interval_range(0, 3),
676
+ pd.period_range("2020", periods=3, freq="D"),
677
+ MultiIndex.from_tuples([("a", 0), ("a", 1), ("b", 0)]),
678
+ ],
679
+ )
680
+ def test_agg_index_has_complex_internals(index):
681
+ # GH 31223
682
+ df = DataFrame({"group": [1, 1, 2], "value": [0, 1, 0]}, index=index)
683
+ result = df.groupby("group").agg({"value": Series.nunique})
684
+ expected = DataFrame({"group": [1, 2], "value": [2, 1]}).set_index("group")
685
+ tm.assert_frame_equal(result, expected)
686
+
687
+
688
+ def test_agg_split_block():
689
+ # https://github.com/pandas-dev/pandas/issues/31522
690
+ df = DataFrame(
691
+ {
692
+ "key1": ["a", "a", "b", "b", "a"],
693
+ "key2": ["one", "two", "one", "two", "one"],
694
+ "key3": ["three", "three", "three", "six", "six"],
695
+ }
696
+ )
697
+ result = df.groupby("key1").min()
698
+ expected = DataFrame(
699
+ {"key2": ["one", "one"], "key3": ["six", "six"]},
700
+ index=Index(["a", "b"], name="key1"),
701
+ )
702
+ tm.assert_frame_equal(result, expected)
703
+
704
+
705
+ def test_agg_split_object_part_datetime():
706
+ # https://github.com/pandas-dev/pandas/pull/31616
707
+ df = DataFrame(
708
+ {
709
+ "A": pd.date_range("2000", periods=4),
710
+ "B": ["a", "b", "c", "d"],
711
+ "C": [1, 2, 3, 4],
712
+ "D": ["b", "c", "d", "e"],
713
+ "E": pd.date_range("2000", periods=4),
714
+ "F": [1, 2, 3, 4],
715
+ }
716
+ ).astype(object)
717
+ result = df.groupby([0, 0, 0, 0]).min()
718
+ expected = DataFrame(
719
+ {
720
+ "A": [pd.Timestamp("2000")],
721
+ "B": ["a"],
722
+ "C": [1],
723
+ "D": ["b"],
724
+ "E": [pd.Timestamp("2000")],
725
+ "F": [1],
726
+ },
727
+ index=np.array([0]),
728
+ dtype=object,
729
+ )
730
+ tm.assert_frame_equal(result, expected)
731
+
732
+
733
+ class TestNamedAggregationSeries:
734
+ def test_series_named_agg(self):
735
+ df = Series([1, 2, 3, 4])
736
+ gr = df.groupby([0, 0, 1, 1])
737
+ result = gr.agg(a="sum", b="min")
738
+ expected = DataFrame(
739
+ {"a": [3, 7], "b": [1, 3]}, columns=["a", "b"], index=np.array([0, 1])
740
+ )
741
+ tm.assert_frame_equal(result, expected)
742
+
743
+ result = gr.agg(b="min", a="sum")
744
+ expected = expected[["b", "a"]]
745
+ tm.assert_frame_equal(result, expected)
746
+
747
+ def test_no_args_raises(self):
748
+ gr = Series([1, 2]).groupby([0, 1])
749
+ with pytest.raises(TypeError, match="Must provide"):
750
+ gr.agg()
751
+
752
+ # but we do allow this
753
+ result = gr.agg([])
754
+ expected = DataFrame(columns=[])
755
+ tm.assert_frame_equal(result, expected)
756
+
757
+ def test_series_named_agg_duplicates_no_raises(self):
758
+ # GH28426
759
+ gr = Series([1, 2, 3]).groupby([0, 0, 1])
760
+ grouped = gr.agg(a="sum", b="sum")
761
+ expected = DataFrame({"a": [3, 3], "b": [3, 3]}, index=np.array([0, 1]))
762
+ tm.assert_frame_equal(expected, grouped)
763
+
764
+ def test_mangled(self):
765
+ gr = Series([1, 2, 3]).groupby([0, 0, 1])
766
+ result = gr.agg(a=lambda x: 0, b=lambda x: 1)
767
+ expected = DataFrame({"a": [0, 0], "b": [1, 1]}, index=np.array([0, 1]))
768
+ tm.assert_frame_equal(result, expected)
769
+
770
+ @pytest.mark.parametrize(
771
+ "inp",
772
+ [
773
+ pd.NamedAgg(column="anything", aggfunc="min"),
774
+ ("anything", "min"),
775
+ ["anything", "min"],
776
+ ],
777
+ )
778
+ def test_named_agg_nametuple(self, inp):
779
+ # GH34422
780
+ s = Series([1, 1, 2, 2, 3, 3, 4, 5])
781
+ msg = f"func is expected but received {type(inp).__name__}"
782
+ with pytest.raises(TypeError, match=msg):
783
+ s.groupby(s.values).agg(a=inp)
784
+
785
+
786
+ class TestNamedAggregationDataFrame:
787
+ def test_agg_relabel(self):
788
+ df = DataFrame(
789
+ {"group": ["a", "a", "b", "b"], "A": [0, 1, 2, 3], "B": [5, 6, 7, 8]}
790
+ )
791
+ result = df.groupby("group").agg(a_max=("A", "max"), b_max=("B", "max"))
792
+ expected = DataFrame(
793
+ {"a_max": [1, 3], "b_max": [6, 8]},
794
+ index=Index(["a", "b"], name="group"),
795
+ columns=["a_max", "b_max"],
796
+ )
797
+ tm.assert_frame_equal(result, expected)
798
+
799
+ # order invariance
800
+ p98 = functools.partial(np.percentile, q=98)
801
+ result = df.groupby("group").agg(
802
+ b_min=("B", "min"),
803
+ a_min=("A", "min"),
804
+ a_mean=("A", "mean"),
805
+ a_max=("A", "max"),
806
+ b_max=("B", "max"),
807
+ a_98=("A", p98),
808
+ )
809
+ expected = DataFrame(
810
+ {
811
+ "b_min": [5, 7],
812
+ "a_min": [0, 2],
813
+ "a_mean": [0.5, 2.5],
814
+ "a_max": [1, 3],
815
+ "b_max": [6, 8],
816
+ "a_98": [0.98, 2.98],
817
+ },
818
+ index=Index(["a", "b"], name="group"),
819
+ columns=["b_min", "a_min", "a_mean", "a_max", "b_max", "a_98"],
820
+ )
821
+ tm.assert_frame_equal(result, expected)
822
+
823
+ def test_agg_relabel_non_identifier(self):
824
+ df = DataFrame(
825
+ {"group": ["a", "a", "b", "b"], "A": [0, 1, 2, 3], "B": [5, 6, 7, 8]}
826
+ )
827
+
828
+ result = df.groupby("group").agg(**{"my col": ("A", "max")})
829
+ expected = DataFrame({"my col": [1, 3]}, index=Index(["a", "b"], name="group"))
830
+ tm.assert_frame_equal(result, expected)
831
+
832
+ def test_duplicate_no_raises(self):
833
+ # GH 28426, if use same input function on same column,
834
+ # no error should raise
835
+ df = DataFrame({"A": [0, 0, 1, 1], "B": [1, 2, 3, 4]})
836
+
837
+ grouped = df.groupby("A").agg(a=("B", "min"), b=("B", "min"))
838
+ expected = DataFrame({"a": [1, 3], "b": [1, 3]}, index=Index([0, 1], name="A"))
839
+ tm.assert_frame_equal(grouped, expected)
840
+
841
+ quant50 = functools.partial(np.percentile, q=50)
842
+ quant70 = functools.partial(np.percentile, q=70)
843
+ quant50.__name__ = "quant50"
844
+ quant70.__name__ = "quant70"
845
+
846
+ test = DataFrame({"col1": ["a", "a", "b", "b", "b"], "col2": [1, 2, 3, 4, 5]})
847
+
848
+ grouped = test.groupby("col1").agg(
849
+ quantile_50=("col2", quant50), quantile_70=("col2", quant70)
850
+ )
851
+ expected = DataFrame(
852
+ {"quantile_50": [1.5, 4.0], "quantile_70": [1.7, 4.4]},
853
+ index=Index(["a", "b"], name="col1"),
854
+ )
855
+ tm.assert_frame_equal(grouped, expected)
856
+
857
+ def test_agg_relabel_with_level(self):
858
+ df = DataFrame(
859
+ {"A": [0, 0, 1, 1], "B": [1, 2, 3, 4]},
860
+ index=MultiIndex.from_product([["A", "B"], ["a", "b"]]),
861
+ )
862
+ result = df.groupby(level=0).agg(
863
+ aa=("A", "max"), bb=("A", "min"), cc=("B", "mean")
864
+ )
865
+ expected = DataFrame(
866
+ {"aa": [0, 1], "bb": [0, 1], "cc": [1.5, 3.5]}, index=["A", "B"]
867
+ )
868
+ tm.assert_frame_equal(result, expected)
869
+
870
+ def test_agg_relabel_other_raises(self):
871
+ df = DataFrame({"A": [0, 0, 1], "B": [1, 2, 3]})
872
+ grouped = df.groupby("A")
873
+ match = "Must provide"
874
+ with pytest.raises(TypeError, match=match):
875
+ grouped.agg(foo=1)
876
+
877
+ with pytest.raises(TypeError, match=match):
878
+ grouped.agg()
879
+
880
+ with pytest.raises(TypeError, match=match):
881
+ grouped.agg(a=("B", "max"), b=(1, 2, 3))
882
+
883
+ def test_missing_raises(self):
884
+ df = DataFrame({"A": [0, 1], "B": [1, 2]})
885
+ match = re.escape("Column(s) ['C'] do not exist")
886
+ with pytest.raises(KeyError, match=match):
887
+ df.groupby("A").agg(c=("C", "sum"))
888
+
889
+ def test_agg_namedtuple(self):
890
+ df = DataFrame({"A": [0, 1], "B": [1, 2]})
891
+ result = df.groupby("A").agg(
892
+ b=pd.NamedAgg("B", "sum"), c=pd.NamedAgg(column="B", aggfunc="count")
893
+ )
894
+ expected = df.groupby("A").agg(b=("B", "sum"), c=("B", "count"))
895
+ tm.assert_frame_equal(result, expected)
896
+
897
+ def test_mangled(self):
898
+ df = DataFrame({"A": [0, 1], "B": [1, 2], "C": [3, 4]})
899
+ result = df.groupby("A").agg(b=("B", lambda x: 0), c=("C", lambda x: 1))
900
+ expected = DataFrame({"b": [0, 0], "c": [1, 1]}, index=Index([0, 1], name="A"))
901
+ tm.assert_frame_equal(result, expected)
902
+
903
+
904
+ @pytest.mark.parametrize(
905
+ "agg_col1, agg_col2, agg_col3, agg_result1, agg_result2, agg_result3",
906
+ [
907
+ (
908
+ (("y", "A"), "max"),
909
+ (("y", "A"), np.mean),
910
+ (("y", "B"), "mean"),
911
+ [1, 3],
912
+ [0.5, 2.5],
913
+ [5.5, 7.5],
914
+ ),
915
+ (
916
+ (("y", "A"), lambda x: max(x)),
917
+ (("y", "A"), lambda x: 1),
918
+ (("y", "B"), np.mean),
919
+ [1, 3],
920
+ [1, 1],
921
+ [5.5, 7.5],
922
+ ),
923
+ (
924
+ pd.NamedAgg(("y", "A"), "max"),
925
+ pd.NamedAgg(("y", "B"), np.mean),
926
+ pd.NamedAgg(("y", "A"), lambda x: 1),
927
+ [1, 3],
928
+ [5.5, 7.5],
929
+ [1, 1],
930
+ ),
931
+ ],
932
+ )
933
+ def test_agg_relabel_multiindex_column(
934
+ agg_col1, agg_col2, agg_col3, agg_result1, agg_result2, agg_result3
935
+ ):
936
+ # GH 29422, add tests for multiindex column cases
937
+ df = DataFrame(
938
+ {"group": ["a", "a", "b", "b"], "A": [0, 1, 2, 3], "B": [5, 6, 7, 8]}
939
+ )
940
+ df.columns = MultiIndex.from_tuples([("x", "group"), ("y", "A"), ("y", "B")])
941
+ idx = Index(["a", "b"], name=("x", "group"))
942
+
943
+ result = df.groupby(("x", "group")).agg(a_max=(("y", "A"), "max"))
944
+ expected = DataFrame({"a_max": [1, 3]}, index=idx)
945
+ tm.assert_frame_equal(result, expected)
946
+
947
+ msg = "is currently using SeriesGroupBy.mean"
948
+ with tm.assert_produces_warning(FutureWarning, match=msg):
949
+ result = df.groupby(("x", "group")).agg(
950
+ col_1=agg_col1, col_2=agg_col2, col_3=agg_col3
951
+ )
952
+ expected = DataFrame(
953
+ {"col_1": agg_result1, "col_2": agg_result2, "col_3": agg_result3}, index=idx
954
+ )
955
+ tm.assert_frame_equal(result, expected)
956
+
957
+
958
+ def test_agg_relabel_multiindex_raises_not_exist():
959
+ # GH 29422, add test for raises scenario when aggregate column does not exist
960
+ df = DataFrame(
961
+ {"group": ["a", "a", "b", "b"], "A": [0, 1, 2, 3], "B": [5, 6, 7, 8]}
962
+ )
963
+ df.columns = MultiIndex.from_tuples([("x", "group"), ("y", "A"), ("y", "B")])
964
+
965
+ with pytest.raises(KeyError, match="do not exist"):
966
+ df.groupby(("x", "group")).agg(a=(("Y", "a"), "max"))
967
+
968
+
969
+ def test_agg_relabel_multiindex_duplicates():
970
+ # GH29422, add test for raises scenario when getting duplicates
971
+ # GH28426, after this change, duplicates should also work if the relabelling is
972
+ # different
973
+ df = DataFrame(
974
+ {"group": ["a", "a", "b", "b"], "A": [0, 1, 2, 3], "B": [5, 6, 7, 8]}
975
+ )
976
+ df.columns = MultiIndex.from_tuples([("x", "group"), ("y", "A"), ("y", "B")])
977
+
978
+ result = df.groupby(("x", "group")).agg(
979
+ a=(("y", "A"), "min"), b=(("y", "A"), "min")
980
+ )
981
+ idx = Index(["a", "b"], name=("x", "group"))
982
+ expected = DataFrame({"a": [0, 2], "b": [0, 2]}, index=idx)
983
+ tm.assert_frame_equal(result, expected)
984
+
985
+
986
+ @pytest.mark.parametrize("kwargs", [{"c": ["min"]}, {"b": [], "c": ["min"]}])
987
+ def test_groupby_aggregate_empty_key(kwargs):
988
+ # GH: 32580
989
+ df = DataFrame({"a": [1, 1, 2], "b": [1, 2, 3], "c": [1, 2, 4]})
990
+ result = df.groupby("a").agg(kwargs)
991
+ expected = DataFrame(
992
+ [1, 4],
993
+ index=Index([1, 2], dtype="int64", name="a"),
994
+ columns=MultiIndex.from_tuples([["c", "min"]]),
995
+ )
996
+ tm.assert_frame_equal(result, expected)
997
+
998
+
999
+ def test_groupby_aggregate_empty_key_empty_return():
1000
+ # GH: 32580 Check if everything works, when return is empty
1001
+ df = DataFrame({"a": [1, 1, 2], "b": [1, 2, 3], "c": [1, 2, 4]})
1002
+ result = df.groupby("a").agg({"b": []})
1003
+ expected = DataFrame(columns=MultiIndex(levels=[["b"], []], codes=[[], []]))
1004
+ tm.assert_frame_equal(result, expected)
1005
+
1006
+
1007
+ def test_groupby_aggregate_empty_with_multiindex_frame():
1008
+ # GH 39178
1009
+ df = DataFrame(columns=["a", "b", "c"])
1010
+ result = df.groupby(["a", "b"], group_keys=False).agg(d=("c", list))
1011
+ expected = DataFrame(
1012
+ columns=["d"], index=MultiIndex([[], []], [[], []], names=["a", "b"])
1013
+ )
1014
+ tm.assert_frame_equal(result, expected)
1015
+
1016
+
1017
+ def test_grouby_agg_loses_results_with_as_index_false_relabel():
1018
+ # GH 32240: When the aggregate function relabels column names and
1019
+ # as_index=False is specified, the results are dropped.
1020
+
1021
+ df = DataFrame(
1022
+ {"key": ["x", "y", "z", "x", "y", "z"], "val": [1.0, 0.8, 2.0, 3.0, 3.6, 0.75]}
1023
+ )
1024
+
1025
+ grouped = df.groupby("key", as_index=False)
1026
+ result = grouped.agg(min_val=pd.NamedAgg(column="val", aggfunc="min"))
1027
+ expected = DataFrame({"key": ["x", "y", "z"], "min_val": [1.0, 0.8, 0.75]})
1028
+ tm.assert_frame_equal(result, expected)
1029
+
1030
+
1031
+ def test_grouby_agg_loses_results_with_as_index_false_relabel_multiindex():
1032
+ # GH 32240: When the aggregate function relabels column names and
1033
+ # as_index=False is specified, the results are dropped. Check if
1034
+ # multiindex is returned in the right order
1035
+
1036
+ df = DataFrame(
1037
+ {
1038
+ "key": ["x", "y", "x", "y", "x", "x"],
1039
+ "key1": ["a", "b", "c", "b", "a", "c"],
1040
+ "val": [1.0, 0.8, 2.0, 3.0, 3.6, 0.75],
1041
+ }
1042
+ )
1043
+
1044
+ grouped = df.groupby(["key", "key1"], as_index=False)
1045
+ result = grouped.agg(min_val=pd.NamedAgg(column="val", aggfunc="min"))
1046
+ expected = DataFrame(
1047
+ {"key": ["x", "x", "y"], "key1": ["a", "c", "b"], "min_val": [1.0, 0.75, 0.8]}
1048
+ )
1049
+ tm.assert_frame_equal(result, expected)
1050
+
1051
+
1052
+ @pytest.mark.parametrize(
1053
+ "func", [lambda s: s.mean(), lambda s: np.mean(s), lambda s: np.nanmean(s)]
1054
+ )
1055
+ def test_multiindex_custom_func(func):
1056
+ # GH 31777
1057
+ data = [[1, 4, 2], [5, 7, 1]]
1058
+ df = DataFrame(
1059
+ data,
1060
+ columns=MultiIndex.from_arrays(
1061
+ [[1, 1, 2], [3, 4, 3]], names=["Sisko", "Janeway"]
1062
+ ),
1063
+ )
1064
+ result = df.groupby(np.array([0, 1])).agg(func)
1065
+ expected_dict = {
1066
+ (1, 3): {0: 1.0, 1: 5.0},
1067
+ (1, 4): {0: 4.0, 1: 7.0},
1068
+ (2, 3): {0: 2.0, 1: 1.0},
1069
+ }
1070
+ expected = DataFrame(expected_dict, index=np.array([0, 1]), columns=df.columns)
1071
+ tm.assert_frame_equal(result, expected)
1072
+
1073
+
1074
+ def myfunc(s):
1075
+ return np.percentile(s, q=0.90)
1076
+
1077
+
1078
+ @pytest.mark.parametrize("func", [lambda s: np.percentile(s, q=0.90), myfunc])
1079
+ def test_lambda_named_agg(func):
1080
+ # see gh-28467
1081
+ animals = DataFrame(
1082
+ {
1083
+ "kind": ["cat", "dog", "cat", "dog"],
1084
+ "height": [9.1, 6.0, 9.5, 34.0],
1085
+ "weight": [7.9, 7.5, 9.9, 198.0],
1086
+ }
1087
+ )
1088
+
1089
+ result = animals.groupby("kind").agg(
1090
+ mean_height=("height", "mean"), perc90=("height", func)
1091
+ )
1092
+ expected = DataFrame(
1093
+ [[9.3, 9.1036], [20.0, 6.252]],
1094
+ columns=["mean_height", "perc90"],
1095
+ index=Index(["cat", "dog"], name="kind"),
1096
+ )
1097
+
1098
+ tm.assert_frame_equal(result, expected)
1099
+
1100
+
1101
+ def test_aggregate_mixed_types():
1102
+ # GH 16916
1103
+ df = DataFrame(
1104
+ data=np.array([0] * 9).reshape(3, 3), columns=list("XYZ"), index=list("abc")
1105
+ )
1106
+ df["grouping"] = ["group 1", "group 1", 2]
1107
+ result = df.groupby("grouping").aggregate(lambda x: x.tolist())
1108
+ expected_data = [[[0], [0], [0]], [[0, 0], [0, 0], [0, 0]]]
1109
+ expected = DataFrame(
1110
+ expected_data,
1111
+ index=Index([2, "group 1"], dtype="object", name="grouping"),
1112
+ columns=Index(["X", "Y", "Z"], dtype="object"),
1113
+ )
1114
+ tm.assert_frame_equal(result, expected)
1115
+
1116
+
1117
+ @pytest.mark.xfail(reason="Not implemented;see GH 31256")
1118
+ def test_aggregate_udf_na_extension_type():
1119
+ # https://github.com/pandas-dev/pandas/pull/31359
1120
+ # This is currently failing to cast back to Int64Dtype.
1121
+ # The presence of the NA causes two problems
1122
+ # 1. NA is not an instance of Int64Dtype.type (numpy.int64)
1123
+ # 2. The presence of an NA forces object type, so the non-NA values is
1124
+ # a Python int rather than a NumPy int64. Python ints aren't
1125
+ # instances of numpy.int64.
1126
+ def aggfunc(x):
1127
+ if all(x > 2):
1128
+ return 1
1129
+ else:
1130
+ return pd.NA
1131
+
1132
+ df = DataFrame({"A": pd.array([1, 2, 3])})
1133
+ result = df.groupby([1, 1, 2]).agg(aggfunc)
1134
+ expected = DataFrame({"A": pd.array([1, pd.NA], dtype="Int64")}, index=[1, 2])
1135
+ tm.assert_frame_equal(result, expected)
1136
+
1137
+
1138
+ class TestLambdaMangling:
1139
+ def test_basic(self):
1140
+ df = DataFrame({"A": [0, 0, 1, 1], "B": [1, 2, 3, 4]})
1141
+ result = df.groupby("A").agg({"B": [lambda x: 0, lambda x: 1]})
1142
+
1143
+ expected = DataFrame(
1144
+ {("B", "<lambda_0>"): [0, 0], ("B", "<lambda_1>"): [1, 1]},
1145
+ index=Index([0, 1], name="A"),
1146
+ )
1147
+ tm.assert_frame_equal(result, expected)
1148
+
1149
+ def test_mangle_series_groupby(self):
1150
+ gr = Series([1, 2, 3, 4]).groupby([0, 0, 1, 1])
1151
+ result = gr.agg([lambda x: 0, lambda x: 1])
1152
+ exp_data = {"<lambda_0>": [0, 0], "<lambda_1>": [1, 1]}
1153
+ expected = DataFrame(exp_data, index=np.array([0, 1]))
1154
+ tm.assert_frame_equal(result, expected)
1155
+
1156
+ @pytest.mark.xfail(reason="GH-26611. kwargs for multi-agg.")
1157
+ def test_with_kwargs(self):
1158
+ f1 = lambda x, y, b=1: x.sum() + y + b
1159
+ f2 = lambda x, y, b=2: x.sum() + y * b
1160
+ result = Series([1, 2]).groupby([0, 0]).agg([f1, f2], 0)
1161
+ expected = DataFrame({"<lambda_0>": [4], "<lambda_1>": [6]})
1162
+ tm.assert_frame_equal(result, expected)
1163
+
1164
+ result = Series([1, 2]).groupby([0, 0]).agg([f1, f2], 0, b=10)
1165
+ expected = DataFrame({"<lambda_0>": [13], "<lambda_1>": [30]})
1166
+ tm.assert_frame_equal(result, expected)
1167
+
1168
+ def test_agg_with_one_lambda(self):
1169
+ # GH 25719, write tests for DataFrameGroupby.agg with only one lambda
1170
+ df = DataFrame(
1171
+ {
1172
+ "kind": ["cat", "dog", "cat", "dog"],
1173
+ "height": [9.1, 6.0, 9.5, 34.0],
1174
+ "weight": [7.9, 7.5, 9.9, 198.0],
1175
+ }
1176
+ )
1177
+
1178
+ columns = ["height_sqr_min", "height_max", "weight_max"]
1179
+ expected = DataFrame(
1180
+ {
1181
+ "height_sqr_min": [82.81, 36.00],
1182
+ "height_max": [9.5, 34.0],
1183
+ "weight_max": [9.9, 198.0],
1184
+ },
1185
+ index=Index(["cat", "dog"], name="kind"),
1186
+ columns=columns,
1187
+ )
1188
+
1189
+ # check pd.NameAgg case
1190
+ result1 = df.groupby(by="kind").agg(
1191
+ height_sqr_min=pd.NamedAgg(
1192
+ column="height", aggfunc=lambda x: np.min(x**2)
1193
+ ),
1194
+ height_max=pd.NamedAgg(column="height", aggfunc="max"),
1195
+ weight_max=pd.NamedAgg(column="weight", aggfunc="max"),
1196
+ )
1197
+ tm.assert_frame_equal(result1, expected)
1198
+
1199
+ # check agg(key=(col, aggfunc)) case
1200
+ result2 = df.groupby(by="kind").agg(
1201
+ height_sqr_min=("height", lambda x: np.min(x**2)),
1202
+ height_max=("height", "max"),
1203
+ weight_max=("weight", "max"),
1204
+ )
1205
+ tm.assert_frame_equal(result2, expected)
1206
+
1207
+ def test_agg_multiple_lambda(self):
1208
+ # GH25719, test for DataFrameGroupby.agg with multiple lambdas
1209
+ # with mixed aggfunc
1210
+ df = DataFrame(
1211
+ {
1212
+ "kind": ["cat", "dog", "cat", "dog"],
1213
+ "height": [9.1, 6.0, 9.5, 34.0],
1214
+ "weight": [7.9, 7.5, 9.9, 198.0],
1215
+ }
1216
+ )
1217
+ columns = [
1218
+ "height_sqr_min",
1219
+ "height_max",
1220
+ "weight_max",
1221
+ "height_max_2",
1222
+ "weight_min",
1223
+ ]
1224
+ expected = DataFrame(
1225
+ {
1226
+ "height_sqr_min": [82.81, 36.00],
1227
+ "height_max": [9.5, 34.0],
1228
+ "weight_max": [9.9, 198.0],
1229
+ "height_max_2": [9.5, 34.0],
1230
+ "weight_min": [7.9, 7.5],
1231
+ },
1232
+ index=Index(["cat", "dog"], name="kind"),
1233
+ columns=columns,
1234
+ )
1235
+
1236
+ # check agg(key=(col, aggfunc)) case
1237
+ result1 = df.groupby(by="kind").agg(
1238
+ height_sqr_min=("height", lambda x: np.min(x**2)),
1239
+ height_max=("height", "max"),
1240
+ weight_max=("weight", "max"),
1241
+ height_max_2=("height", lambda x: np.max(x)),
1242
+ weight_min=("weight", lambda x: np.min(x)),
1243
+ )
1244
+ tm.assert_frame_equal(result1, expected)
1245
+
1246
+ # check pd.NamedAgg case
1247
+ result2 = df.groupby(by="kind").agg(
1248
+ height_sqr_min=pd.NamedAgg(
1249
+ column="height", aggfunc=lambda x: np.min(x**2)
1250
+ ),
1251
+ height_max=pd.NamedAgg(column="height", aggfunc="max"),
1252
+ weight_max=pd.NamedAgg(column="weight", aggfunc="max"),
1253
+ height_max_2=pd.NamedAgg(column="height", aggfunc=lambda x: np.max(x)),
1254
+ weight_min=pd.NamedAgg(column="weight", aggfunc=lambda x: np.min(x)),
1255
+ )
1256
+ tm.assert_frame_equal(result2, expected)
1257
+
1258
+
1259
+ def test_groupby_get_by_index():
1260
+ # GH 33439
1261
+ df = DataFrame({"A": ["S", "W", "W"], "B": [1.0, 1.0, 2.0]})
1262
+ res = df.groupby("A").agg({"B": lambda x: x.get(x.index[-1])})
1263
+ expected = DataFrame({"A": ["S", "W"], "B": [1.0, 2.0]}).set_index("A")
1264
+ tm.assert_frame_equal(res, expected)
1265
+
1266
+
1267
+ @pytest.mark.parametrize(
1268
+ "grp_col_dict, exp_data",
1269
+ [
1270
+ ({"nr": "min", "cat_ord": "min"}, {"nr": [1, 5], "cat_ord": ["a", "c"]}),
1271
+ ({"cat_ord": "min"}, {"cat_ord": ["a", "c"]}),
1272
+ ({"nr": "min"}, {"nr": [1, 5]}),
1273
+ ],
1274
+ )
1275
+ def test_groupby_single_agg_cat_cols(grp_col_dict, exp_data):
1276
+ # test single aggregations on ordered categorical cols GHGH27800
1277
+
1278
+ # create the result dataframe
1279
+ input_df = DataFrame(
1280
+ {
1281
+ "nr": [1, 2, 3, 4, 5, 6, 7, 8],
1282
+ "cat_ord": list("aabbccdd"),
1283
+ "cat": list("aaaabbbb"),
1284
+ }
1285
+ )
1286
+
1287
+ input_df = input_df.astype({"cat": "category", "cat_ord": "category"})
1288
+ input_df["cat_ord"] = input_df["cat_ord"].cat.as_ordered()
1289
+ result_df = input_df.groupby("cat", observed=False).agg(grp_col_dict)
1290
+
1291
+ # create expected dataframe
1292
+ cat_index = pd.CategoricalIndex(
1293
+ ["a", "b"], categories=["a", "b"], ordered=False, name="cat", dtype="category"
1294
+ )
1295
+
1296
+ expected_df = DataFrame(data=exp_data, index=cat_index)
1297
+
1298
+ if "cat_ord" in expected_df:
1299
+ # ordered categorical columns should be preserved
1300
+ dtype = input_df["cat_ord"].dtype
1301
+ expected_df["cat_ord"] = expected_df["cat_ord"].astype(dtype)
1302
+
1303
+ tm.assert_frame_equal(result_df, expected_df)
1304
+
1305
+
1306
+ @pytest.mark.parametrize(
1307
+ "grp_col_dict, exp_data",
1308
+ [
1309
+ ({"nr": ["min", "max"], "cat_ord": "min"}, [(1, 4, "a"), (5, 8, "c")]),
1310
+ ({"nr": "min", "cat_ord": ["min", "max"]}, [(1, "a", "b"), (5, "c", "d")]),
1311
+ ({"cat_ord": ["min", "max"]}, [("a", "b"), ("c", "d")]),
1312
+ ],
1313
+ )
1314
+ def test_groupby_combined_aggs_cat_cols(grp_col_dict, exp_data):
1315
+ # test combined aggregations on ordered categorical cols GH27800
1316
+
1317
+ # create the result dataframe
1318
+ input_df = DataFrame(
1319
+ {
1320
+ "nr": [1, 2, 3, 4, 5, 6, 7, 8],
1321
+ "cat_ord": list("aabbccdd"),
1322
+ "cat": list("aaaabbbb"),
1323
+ }
1324
+ )
1325
+
1326
+ input_df = input_df.astype({"cat": "category", "cat_ord": "category"})
1327
+ input_df["cat_ord"] = input_df["cat_ord"].cat.as_ordered()
1328
+ result_df = input_df.groupby("cat", observed=False).agg(grp_col_dict)
1329
+
1330
+ # create expected dataframe
1331
+ cat_index = pd.CategoricalIndex(
1332
+ ["a", "b"], categories=["a", "b"], ordered=False, name="cat", dtype="category"
1333
+ )
1334
+
1335
+ # unpack the grp_col_dict to create the multi-index tuple
1336
+ # this tuple will be used to create the expected dataframe index
1337
+ multi_index_list = []
1338
+ for k, v in grp_col_dict.items():
1339
+ if isinstance(v, list):
1340
+ multi_index_list.extend([k, value] for value in v)
1341
+ else:
1342
+ multi_index_list.append([k, v])
1343
+ multi_index = MultiIndex.from_tuples(tuple(multi_index_list))
1344
+
1345
+ expected_df = DataFrame(data=exp_data, columns=multi_index, index=cat_index)
1346
+ for col in expected_df.columns:
1347
+ if isinstance(col, tuple) and "cat_ord" in col:
1348
+ # ordered categorical should be preserved
1349
+ expected_df[col] = expected_df[col].astype(input_df["cat_ord"].dtype)
1350
+
1351
+ tm.assert_frame_equal(result_df, expected_df)
1352
+
1353
+
1354
+ def test_nonagg_agg():
1355
+ # GH 35490 - Single/Multiple agg of non-agg function give same results
1356
+ # TODO: agg should raise for functions that don't aggregate
1357
+ df = DataFrame({"a": [1, 1, 2, 2], "b": [1, 2, 2, 1]})
1358
+ g = df.groupby("a")
1359
+
1360
+ result = g.agg(["cumsum"])
1361
+ result.columns = result.columns.droplevel(-1)
1362
+ expected = g.agg("cumsum")
1363
+
1364
+ tm.assert_frame_equal(result, expected)
1365
+
1366
+
1367
+ def test_aggregate_datetime_objects():
1368
+ # https://github.com/pandas-dev/pandas/issues/36003
1369
+ # ensure we don't raise an error but keep object dtype for out-of-bounds
1370
+ # datetimes
1371
+ df = DataFrame(
1372
+ {
1373
+ "A": ["X", "Y"],
1374
+ "B": [
1375
+ datetime.datetime(2005, 1, 1, 10, 30, 23, 540000),
1376
+ datetime.datetime(3005, 1, 1, 10, 30, 23, 540000),
1377
+ ],
1378
+ }
1379
+ )
1380
+ result = df.groupby("A").B.max()
1381
+ expected = df.set_index("A")["B"]
1382
+ tm.assert_series_equal(result, expected)
1383
+
1384
+
1385
+ def test_groupby_index_object_dtype():
1386
+ # GH 40014
1387
+ df = DataFrame({"c0": ["x", "x", "x"], "c1": ["x", "x", "y"], "p": [0, 1, 2]})
1388
+ df.index = df.index.astype("O")
1389
+ grouped = df.groupby(["c0", "c1"])
1390
+ res = grouped.p.agg(lambda x: all(x > 0))
1391
+ # Check that providing a user-defined function in agg()
1392
+ # produces the correct index shape when using an object-typed index.
1393
+ expected_index = MultiIndex.from_tuples(
1394
+ [("x", "x"), ("x", "y")], names=("c0", "c1")
1395
+ )
1396
+ expected = Series([False, True], index=expected_index, name="p")
1397
+ tm.assert_series_equal(res, expected)
1398
+
1399
+
1400
+ def test_timeseries_groupby_agg():
1401
+ # GH#43290
1402
+
1403
+ def func(ser):
1404
+ if ser.isna().all():
1405
+ return None
1406
+ return np.sum(ser)
1407
+
1408
+ df = DataFrame([1.0], index=[pd.Timestamp("2018-01-16 00:00:00+00:00")])
1409
+ res = df.groupby(lambda x: 1).agg(func)
1410
+
1411
+ expected = DataFrame([[1.0]], index=[1])
1412
+ tm.assert_frame_equal(res, expected)
1413
+
1414
+
1415
+ def test_groupby_agg_precision(any_real_numeric_dtype):
1416
+ if any_real_numeric_dtype in tm.ALL_INT_NUMPY_DTYPES:
1417
+ max_value = np.iinfo(any_real_numeric_dtype).max
1418
+ if any_real_numeric_dtype in tm.FLOAT_NUMPY_DTYPES:
1419
+ max_value = np.finfo(any_real_numeric_dtype).max
1420
+ if any_real_numeric_dtype in tm.FLOAT_EA_DTYPES:
1421
+ max_value = np.finfo(any_real_numeric_dtype.lower()).max
1422
+ if any_real_numeric_dtype in tm.ALL_INT_EA_DTYPES:
1423
+ max_value = np.iinfo(any_real_numeric_dtype.lower()).max
1424
+
1425
+ df = DataFrame(
1426
+ {
1427
+ "key1": ["a"],
1428
+ "key2": ["b"],
1429
+ "key3": pd.array([max_value], dtype=any_real_numeric_dtype),
1430
+ }
1431
+ )
1432
+ arrays = [["a"], ["b"]]
1433
+ index = MultiIndex.from_arrays(arrays, names=("key1", "key2"))
1434
+
1435
+ expected = DataFrame(
1436
+ {"key3": pd.array([max_value], dtype=any_real_numeric_dtype)}, index=index
1437
+ )
1438
+ result = df.groupby(["key1", "key2"]).agg(lambda x: x)
1439
+ tm.assert_frame_equal(result, expected)
1440
+
1441
+
1442
+ def test_groupby_aggregate_directory(reduction_func):
1443
+ # GH#32793
1444
+ if reduction_func in ["corrwith", "nth"]:
1445
+ return None
1446
+
1447
+ obj = DataFrame([[0, 1], [0, np.nan]])
1448
+
1449
+ result_reduced_series = obj.groupby(0).agg(reduction_func)
1450
+ result_reduced_frame = obj.groupby(0).agg({1: reduction_func})
1451
+
1452
+ if reduction_func in ["size", "ngroup"]:
1453
+ # names are different: None / 1
1454
+ tm.assert_series_equal(
1455
+ result_reduced_series, result_reduced_frame[1], check_names=False
1456
+ )
1457
+ else:
1458
+ tm.assert_frame_equal(result_reduced_series, result_reduced_frame)
1459
+ tm.assert_series_equal(
1460
+ result_reduced_series.dtypes, result_reduced_frame.dtypes
1461
+ )
1462
+
1463
+
1464
+ def test_group_mean_timedelta_nat():
1465
+ # GH43132
1466
+ data = Series(["1 day", "3 days", "NaT"], dtype="timedelta64[ns]")
1467
+ expected = Series(["2 days"], dtype="timedelta64[ns]", index=np.array([0]))
1468
+
1469
+ result = data.groupby([0, 0, 0]).mean()
1470
+
1471
+ tm.assert_series_equal(result, expected)
1472
+
1473
+
1474
+ @pytest.mark.parametrize(
1475
+ "input_data, expected_output",
1476
+ [
1477
+ ( # no timezone
1478
+ ["2021-01-01T00:00", "NaT", "2021-01-01T02:00"],
1479
+ ["2021-01-01T01:00"],
1480
+ ),
1481
+ ( # timezone
1482
+ ["2021-01-01T00:00-0100", "NaT", "2021-01-01T02:00-0100"],
1483
+ ["2021-01-01T01:00-0100"],
1484
+ ),
1485
+ ],
1486
+ )
1487
+ def test_group_mean_datetime64_nat(input_data, expected_output):
1488
+ # GH43132
1489
+ data = to_datetime(Series(input_data))
1490
+ expected = to_datetime(Series(expected_output, index=np.array([0])))
1491
+
1492
+ result = data.groupby([0, 0, 0]).mean()
1493
+ tm.assert_series_equal(result, expected)
1494
+
1495
+
1496
+ @pytest.mark.parametrize(
1497
+ "func, output", [("mean", [8 + 18j, 10 + 22j]), ("sum", [40 + 90j, 50 + 110j])]
1498
+ )
1499
+ def test_groupby_complex(func, output):
1500
+ # GH#43701
1501
+ data = Series(np.arange(20).reshape(10, 2).dot([1, 2j]))
1502
+ result = data.groupby(data.index % 2).agg(func)
1503
+ expected = Series(output)
1504
+ tm.assert_series_equal(result, expected)
1505
+
1506
+
1507
+ @pytest.mark.parametrize("func", ["min", "max", "var"])
1508
+ def test_groupby_complex_raises(func):
1509
+ # GH#43701
1510
+ data = Series(np.arange(20).reshape(10, 2).dot([1, 2j]))
1511
+ msg = "No matching signature found"
1512
+ with pytest.raises(TypeError, match=msg):
1513
+ data.groupby(data.index % 2).agg(func)
1514
+
1515
+
1516
+ @pytest.mark.parametrize(
1517
+ "func", [["min"], ["mean", "max"], {"b": "sum"}, {"b": "prod", "c": "median"}]
1518
+ )
1519
+ def test_multi_axis_1_raises(func):
1520
+ # GH#46995
1521
+ df = DataFrame({"a": [1, 1, 2], "b": [3, 4, 5], "c": [6, 7, 8]})
1522
+ msg = "DataFrame.groupby with axis=1 is deprecated"
1523
+ with tm.assert_produces_warning(FutureWarning, match=msg):
1524
+ gb = df.groupby("a", axis=1)
1525
+ with pytest.raises(NotImplementedError, match="axis other than 0 is not supported"):
1526
+ gb.agg(func)
1527
+
1528
+
1529
+ @pytest.mark.parametrize(
1530
+ "test, constant",
1531
+ [
1532
+ ([[20, "A"], [20, "B"], [10, "C"]], {0: [10, 20], 1: ["C", ["A", "B"]]}),
1533
+ ([[20, "A"], [20, "B"], [30, "C"]], {0: [20, 30], 1: [["A", "B"], "C"]}),
1534
+ ([["a", 1], ["a", 1], ["b", 2], ["b", 3]], {0: ["a", "b"], 1: [1, [2, 3]]}),
1535
+ pytest.param(
1536
+ [["a", 1], ["a", 2], ["b", 3], ["b", 3]],
1537
+ {0: ["a", "b"], 1: [[1, 2], 3]},
1538
+ marks=pytest.mark.xfail,
1539
+ ),
1540
+ ],
1541
+ )
1542
+ def test_agg_of_mode_list(test, constant):
1543
+ # GH#25581
1544
+ df1 = DataFrame(test)
1545
+ result = df1.groupby(0).agg(Series.mode)
1546
+ # Mode usually only returns 1 value, but can return a list in the case of a tie.
1547
+
1548
+ expected = DataFrame(constant)
1549
+ expected = expected.set_index(0)
1550
+
1551
+ tm.assert_frame_equal(result, expected)
1552
+
1553
+
1554
+ def test_dataframe_groupy_agg_list_like_func_with_args():
1555
+ # GH#50624
1556
+ df = DataFrame({"x": [1, 2, 3], "y": ["a", "b", "c"]})
1557
+ gb = df.groupby("y")
1558
+
1559
+ def foo1(x, a=1, c=0):
1560
+ return x.sum() + a + c
1561
+
1562
+ def foo2(x, b=2, c=0):
1563
+ return x.sum() + b + c
1564
+
1565
+ msg = r"foo1\(\) got an unexpected keyword argument 'b'"
1566
+ with pytest.raises(TypeError, match=msg):
1567
+ gb.agg([foo1, foo2], 3, b=3, c=4)
1568
+
1569
+ result = gb.agg([foo1, foo2], 3, c=4)
1570
+ expected = DataFrame(
1571
+ [[8, 8], [9, 9], [10, 10]],
1572
+ index=Index(["a", "b", "c"], name="y"),
1573
+ columns=MultiIndex.from_tuples([("x", "foo1"), ("x", "foo2")]),
1574
+ )
1575
+ tm.assert_frame_equal(result, expected)
1576
+
1577
+
1578
+ def test_series_groupy_agg_list_like_func_with_args():
1579
+ # GH#50624
1580
+ s = Series([1, 2, 3])
1581
+ sgb = s.groupby(s)
1582
+
1583
+ def foo1(x, a=1, c=0):
1584
+ return x.sum() + a + c
1585
+
1586
+ def foo2(x, b=2, c=0):
1587
+ return x.sum() + b + c
1588
+
1589
+ msg = r"foo1\(\) got an unexpected keyword argument 'b'"
1590
+ with pytest.raises(TypeError, match=msg):
1591
+ sgb.agg([foo1, foo2], 3, b=3, c=4)
1592
+
1593
+ result = sgb.agg([foo1, foo2], 3, c=4)
1594
+ expected = DataFrame(
1595
+ [[8, 8], [9, 9], [10, 10]], index=Index([1, 2, 3]), columns=["foo1", "foo2"]
1596
+ )
1597
+ tm.assert_frame_equal(result, expected)
1598
+
1599
+
1600
+ def test_agg_groupings_selection():
1601
+ # GH#51186 - a selected grouping should be in the output of agg
1602
+ df = DataFrame({"a": [1, 1, 2], "b": [3, 3, 4], "c": [5, 6, 7]})
1603
+ gb = df.groupby(["a", "b"])
1604
+ selected_gb = gb[["b", "c"]]
1605
+ result = selected_gb.agg(lambda x: x.sum())
1606
+ index = MultiIndex(
1607
+ levels=[[1, 2], [3, 4]], codes=[[0, 1], [0, 1]], names=["a", "b"]
1608
+ )
1609
+ expected = DataFrame({"b": [6, 4], "c": [11, 7]}, index=index)
1610
+ tm.assert_frame_equal(result, expected)
1611
+
1612
+
1613
+ def test_agg_multiple_with_as_index_false_subset_to_a_single_column():
1614
+ # GH#50724
1615
+ df = DataFrame({"a": [1, 1, 2], "b": [3, 4, 5]})
1616
+ gb = df.groupby("a", as_index=False)["b"]
1617
+ result = gb.agg(["sum", "mean"])
1618
+ expected = DataFrame({"a": [1, 2], "sum": [7, 5], "mean": [3.5, 5.0]})
1619
+ tm.assert_frame_equal(result, expected)
1620
+
1621
+
1622
+ def test_agg_with_as_index_false_with_list():
1623
+ # GH#52849
1624
+ df = DataFrame({"a1": [0, 0, 1], "a2": [2, 3, 3], "b": [4, 5, 6]})
1625
+ gb = df.groupby(by=["a1", "a2"], as_index=False)
1626
+ result = gb.agg(["sum"])
1627
+
1628
+ expected = DataFrame(
1629
+ data=[[0, 2, 4], [0, 3, 5], [1, 3, 6]],
1630
+ columns=MultiIndex.from_tuples([("a1", ""), ("a2", ""), ("b", "sum")]),
1631
+ )
1632
+ tm.assert_frame_equal(result, expected)
1633
+
1634
+
1635
+ def test_groupby_agg_extension_timedelta_cumsum_with_named_aggregation():
1636
+ # GH#41720
1637
+ expected = DataFrame(
1638
+ {
1639
+ "td": {
1640
+ 0: pd.Timedelta("0 days 01:00:00"),
1641
+ 1: pd.Timedelta("0 days 01:15:00"),
1642
+ 2: pd.Timedelta("0 days 01:15:00"),
1643
+ }
1644
+ }
1645
+ )
1646
+ df = DataFrame(
1647
+ {
1648
+ "td": Series(
1649
+ ["0 days 01:00:00", "0 days 00:15:00", "0 days 01:15:00"],
1650
+ dtype="timedelta64[ns]",
1651
+ ),
1652
+ "grps": ["a", "a", "b"],
1653
+ }
1654
+ )
1655
+ gb = df.groupby("grps")
1656
+ result = gb.agg(td=("td", "cumsum"))
1657
+ tm.assert_frame_equal(result, expected)
1658
+
1659
+
1660
+ def test_groupby_aggregation_empty_group():
1661
+ # https://github.com/pandas-dev/pandas/issues/18869
1662
+ def func(x):
1663
+ if len(x) == 0:
1664
+ raise ValueError("length must not be 0")
1665
+ return len(x)
1666
+
1667
+ df = DataFrame(
1668
+ {"A": pd.Categorical(["a", "a"], categories=["a", "b", "c"]), "B": [1, 1]}
1669
+ )
1670
+ msg = "length must not be 0"
1671
+ with pytest.raises(ValueError, match=msg):
1672
+ df.groupby("A", observed=False).agg(func)
venv/lib/python3.10/site-packages/pandas/tests/groupby/aggregate/test_cython.py ADDED
@@ -0,0 +1,435 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ test cython .agg behavior
3
+ """
4
+
5
+ import numpy as np
6
+ import pytest
7
+
8
+ from pandas.core.dtypes.common import (
9
+ is_float_dtype,
10
+ is_integer_dtype,
11
+ )
12
+
13
+ import pandas as pd
14
+ from pandas import (
15
+ DataFrame,
16
+ Index,
17
+ NaT,
18
+ Series,
19
+ Timedelta,
20
+ Timestamp,
21
+ bdate_range,
22
+ )
23
+ import pandas._testing as tm
24
+ import pandas.core.common as com
25
+
26
+
27
+ @pytest.mark.parametrize(
28
+ "op_name",
29
+ [
30
+ "count",
31
+ "sum",
32
+ "std",
33
+ "var",
34
+ "sem",
35
+ "mean",
36
+ pytest.param(
37
+ "median",
38
+ # ignore mean of empty slice
39
+ # and all-NaN
40
+ marks=[pytest.mark.filterwarnings("ignore::RuntimeWarning")],
41
+ ),
42
+ "prod",
43
+ "min",
44
+ "max",
45
+ ],
46
+ )
47
+ def test_cythonized_aggers(op_name):
48
+ data = {
49
+ "A": [0, 0, 0, 0, 1, 1, 1, 1, 1, 1.0, np.nan, np.nan],
50
+ "B": ["A", "B"] * 6,
51
+ "C": np.random.default_rng(2).standard_normal(12),
52
+ }
53
+ df = DataFrame(data)
54
+ df.loc[2:10:2, "C"] = np.nan
55
+
56
+ op = lambda x: getattr(x, op_name)()
57
+
58
+ # single column
59
+ grouped = df.drop(["B"], axis=1).groupby("A")
60
+ exp = {cat: op(group["C"]) for cat, group in grouped}
61
+ exp = DataFrame({"C": exp})
62
+ exp.index.name = "A"
63
+ result = op(grouped)
64
+ tm.assert_frame_equal(result, exp)
65
+
66
+ # multiple columns
67
+ grouped = df.groupby(["A", "B"])
68
+ expd = {}
69
+ for (cat1, cat2), group in grouped:
70
+ expd.setdefault(cat1, {})[cat2] = op(group["C"])
71
+ exp = DataFrame(expd).T.stack(future_stack=True)
72
+ exp.index.names = ["A", "B"]
73
+ exp.name = "C"
74
+
75
+ result = op(grouped)["C"]
76
+ if op_name in ["sum", "prod"]:
77
+ tm.assert_series_equal(result, exp)
78
+
79
+
80
+ def test_cython_agg_boolean():
81
+ frame = DataFrame(
82
+ {
83
+ "a": np.random.default_rng(2).integers(0, 5, 50),
84
+ "b": np.random.default_rng(2).integers(0, 2, 50).astype("bool"),
85
+ }
86
+ )
87
+ result = frame.groupby("a")["b"].mean()
88
+ msg = "using SeriesGroupBy.mean"
89
+ with tm.assert_produces_warning(FutureWarning, match=msg):
90
+ # GH#53425
91
+ expected = frame.groupby("a")["b"].agg(np.mean)
92
+
93
+ tm.assert_series_equal(result, expected)
94
+
95
+
96
+ def test_cython_agg_nothing_to_agg():
97
+ frame = DataFrame(
98
+ {"a": np.random.default_rng(2).integers(0, 5, 50), "b": ["foo", "bar"] * 25}
99
+ )
100
+
101
+ msg = "Cannot use numeric_only=True with SeriesGroupBy.mean and non-numeric dtypes"
102
+ with pytest.raises(TypeError, match=msg):
103
+ frame.groupby("a")["b"].mean(numeric_only=True)
104
+
105
+ frame = DataFrame(
106
+ {"a": np.random.default_rng(2).integers(0, 5, 50), "b": ["foo", "bar"] * 25}
107
+ )
108
+
109
+ result = frame[["b"]].groupby(frame["a"]).mean(numeric_only=True)
110
+ expected = DataFrame(
111
+ [], index=frame["a"].sort_values().drop_duplicates(), columns=[]
112
+ )
113
+ tm.assert_frame_equal(result, expected)
114
+
115
+
116
+ def test_cython_agg_nothing_to_agg_with_dates():
117
+ frame = DataFrame(
118
+ {
119
+ "a": np.random.default_rng(2).integers(0, 5, 50),
120
+ "b": ["foo", "bar"] * 25,
121
+ "dates": pd.date_range("now", periods=50, freq="min"),
122
+ }
123
+ )
124
+ msg = "Cannot use numeric_only=True with SeriesGroupBy.mean and non-numeric dtypes"
125
+ with pytest.raises(TypeError, match=msg):
126
+ frame.groupby("b").dates.mean(numeric_only=True)
127
+
128
+
129
+ def test_cython_agg_frame_columns():
130
+ # #2113
131
+ df = DataFrame({"x": [1, 2, 3], "y": [3, 4, 5]})
132
+
133
+ msg = "DataFrame.groupby with axis=1 is deprecated"
134
+ with tm.assert_produces_warning(FutureWarning, match=msg):
135
+ df.groupby(level=0, axis="columns").mean()
136
+ with tm.assert_produces_warning(FutureWarning, match=msg):
137
+ df.groupby(level=0, axis="columns").mean()
138
+ with tm.assert_produces_warning(FutureWarning, match=msg):
139
+ df.groupby(level=0, axis="columns").mean()
140
+ with tm.assert_produces_warning(FutureWarning, match=msg):
141
+ df.groupby(level=0, axis="columns").mean()
142
+
143
+
144
+ def test_cython_agg_return_dict():
145
+ # GH 16741
146
+ df = DataFrame(
147
+ {
148
+ "A": ["foo", "bar", "foo", "bar", "foo", "bar", "foo", "foo"],
149
+ "B": ["one", "one", "two", "three", "two", "two", "one", "three"],
150
+ "C": np.random.default_rng(2).standard_normal(8),
151
+ "D": np.random.default_rng(2).standard_normal(8),
152
+ }
153
+ )
154
+
155
+ ts = df.groupby("A")["B"].agg(lambda x: x.value_counts().to_dict())
156
+ expected = Series(
157
+ [{"two": 1, "one": 1, "three": 1}, {"two": 2, "one": 2, "three": 1}],
158
+ index=Index(["bar", "foo"], name="A"),
159
+ name="B",
160
+ )
161
+ tm.assert_series_equal(ts, expected)
162
+
163
+
164
+ def test_cython_fail_agg():
165
+ dr = bdate_range("1/1/2000", periods=50)
166
+ ts = Series(["A", "B", "C", "D", "E"] * 10, index=dr)
167
+
168
+ grouped = ts.groupby(lambda x: x.month)
169
+ summed = grouped.sum()
170
+ msg = "using SeriesGroupBy.sum"
171
+ with tm.assert_produces_warning(FutureWarning, match=msg):
172
+ # GH#53425
173
+ expected = grouped.agg(np.sum)
174
+ tm.assert_series_equal(summed, expected)
175
+
176
+
177
+ @pytest.mark.parametrize(
178
+ "op, targop",
179
+ [
180
+ ("mean", np.mean),
181
+ ("median", np.median),
182
+ ("var", np.var),
183
+ ("sum", np.sum),
184
+ ("prod", np.prod),
185
+ ("min", np.min),
186
+ ("max", np.max),
187
+ ("first", lambda x: x.iloc[0]),
188
+ ("last", lambda x: x.iloc[-1]),
189
+ ],
190
+ )
191
+ def test__cython_agg_general(op, targop):
192
+ df = DataFrame(np.random.default_rng(2).standard_normal(1000))
193
+ labels = np.random.default_rng(2).integers(0, 50, size=1000).astype(float)
194
+
195
+ result = df.groupby(labels)._cython_agg_general(op, alt=None, numeric_only=True)
196
+ warn = FutureWarning if targop in com._cython_table else None
197
+ msg = f"using DataFrameGroupBy.{op}"
198
+ with tm.assert_produces_warning(warn, match=msg):
199
+ # GH#53425
200
+ expected = df.groupby(labels).agg(targop)
201
+ tm.assert_frame_equal(result, expected)
202
+
203
+
204
+ @pytest.mark.parametrize(
205
+ "op, targop",
206
+ [
207
+ ("mean", np.mean),
208
+ ("median", lambda x: np.median(x) if len(x) > 0 else np.nan),
209
+ ("var", lambda x: np.var(x, ddof=1)),
210
+ ("min", np.min),
211
+ ("max", np.max),
212
+ ],
213
+ )
214
+ def test_cython_agg_empty_buckets(op, targop, observed):
215
+ df = DataFrame([11, 12, 13])
216
+ grps = range(0, 55, 5)
217
+
218
+ # calling _cython_agg_general directly, instead of via the user API
219
+ # which sets different values for min_count, so do that here.
220
+ g = df.groupby(pd.cut(df[0], grps), observed=observed)
221
+ result = g._cython_agg_general(op, alt=None, numeric_only=True)
222
+
223
+ g = df.groupby(pd.cut(df[0], grps), observed=observed)
224
+ expected = g.agg(lambda x: targop(x))
225
+ tm.assert_frame_equal(result, expected)
226
+
227
+
228
+ def test_cython_agg_empty_buckets_nanops(observed):
229
+ # GH-18869 can't call nanops on empty groups, so hardcode expected
230
+ # for these
231
+ df = DataFrame([11, 12, 13], columns=["a"])
232
+ grps = np.arange(0, 25, 5, dtype=int)
233
+ # add / sum
234
+ result = df.groupby(pd.cut(df["a"], grps), observed=observed)._cython_agg_general(
235
+ "sum", alt=None, numeric_only=True
236
+ )
237
+ intervals = pd.interval_range(0, 20, freq=5)
238
+ expected = DataFrame(
239
+ {"a": [0, 0, 36, 0]},
240
+ index=pd.CategoricalIndex(intervals, name="a", ordered=True),
241
+ )
242
+ if observed:
243
+ expected = expected[expected.a != 0]
244
+
245
+ tm.assert_frame_equal(result, expected)
246
+
247
+ # prod
248
+ result = df.groupby(pd.cut(df["a"], grps), observed=observed)._cython_agg_general(
249
+ "prod", alt=None, numeric_only=True
250
+ )
251
+ expected = DataFrame(
252
+ {"a": [1, 1, 1716, 1]},
253
+ index=pd.CategoricalIndex(intervals, name="a", ordered=True),
254
+ )
255
+ if observed:
256
+ expected = expected[expected.a != 1]
257
+
258
+ tm.assert_frame_equal(result, expected)
259
+
260
+
261
+ @pytest.mark.parametrize("op", ["first", "last", "max", "min"])
262
+ @pytest.mark.parametrize(
263
+ "data", [Timestamp("2016-10-14 21:00:44.557"), Timedelta("17088 days 21:00:44.557")]
264
+ )
265
+ def test_cython_with_timestamp_and_nat(op, data):
266
+ # https://github.com/pandas-dev/pandas/issues/19526
267
+ df = DataFrame({"a": [0, 1], "b": [data, NaT]})
268
+ index = Index([0, 1], name="a")
269
+
270
+ # We will group by a and test the cython aggregations
271
+ expected = DataFrame({"b": [data, NaT]}, index=index)
272
+
273
+ result = df.groupby("a").aggregate(op)
274
+ tm.assert_frame_equal(expected, result)
275
+
276
+
277
+ @pytest.mark.parametrize(
278
+ "agg",
279
+ [
280
+ "min",
281
+ "max",
282
+ "count",
283
+ "sum",
284
+ "prod",
285
+ "var",
286
+ "mean",
287
+ "median",
288
+ "ohlc",
289
+ "cumprod",
290
+ "cumsum",
291
+ "shift",
292
+ "any",
293
+ "all",
294
+ "quantile",
295
+ "first",
296
+ "last",
297
+ "rank",
298
+ "cummin",
299
+ "cummax",
300
+ ],
301
+ )
302
+ def test_read_only_buffer_source_agg(agg):
303
+ # https://github.com/pandas-dev/pandas/issues/36014
304
+ df = DataFrame(
305
+ {
306
+ "sepal_length": [5.1, 4.9, 4.7, 4.6, 5.0],
307
+ "species": ["setosa", "setosa", "setosa", "setosa", "setosa"],
308
+ }
309
+ )
310
+ df._mgr.arrays[0].flags.writeable = False
311
+
312
+ result = df.groupby(["species"]).agg({"sepal_length": agg})
313
+ expected = df.copy().groupby(["species"]).agg({"sepal_length": agg})
314
+
315
+ tm.assert_equal(result, expected)
316
+
317
+
318
+ @pytest.mark.parametrize(
319
+ "op_name",
320
+ [
321
+ "count",
322
+ "sum",
323
+ "std",
324
+ "var",
325
+ "sem",
326
+ "mean",
327
+ "median",
328
+ "prod",
329
+ "min",
330
+ "max",
331
+ ],
332
+ )
333
+ def test_cython_agg_nullable_int(op_name):
334
+ # ensure that the cython-based aggregations don't fail for nullable dtype
335
+ # (eg https://github.com/pandas-dev/pandas/issues/37415)
336
+ df = DataFrame(
337
+ {
338
+ "A": ["A", "B"] * 5,
339
+ "B": pd.array([1, 2, 3, 4, 5, 6, 7, 8, 9, pd.NA], dtype="Int64"),
340
+ }
341
+ )
342
+ result = getattr(df.groupby("A")["B"], op_name)()
343
+ df2 = df.assign(B=df["B"].astype("float64"))
344
+ expected = getattr(df2.groupby("A")["B"], op_name)()
345
+ if op_name in ("mean", "median"):
346
+ convert_integer = False
347
+ else:
348
+ convert_integer = True
349
+ expected = expected.convert_dtypes(convert_integer=convert_integer)
350
+ tm.assert_series_equal(result, expected)
351
+
352
+
353
+ @pytest.mark.parametrize("dtype", ["Int64", "Float64", "boolean"])
354
+ def test_count_masked_returns_masked_dtype(dtype):
355
+ df = DataFrame(
356
+ {
357
+ "A": [1, 1],
358
+ "B": pd.array([1, pd.NA], dtype=dtype),
359
+ "C": pd.array([1, 1], dtype=dtype),
360
+ }
361
+ )
362
+ result = df.groupby("A").count()
363
+ expected = DataFrame(
364
+ [[1, 2]], index=Index([1], name="A"), columns=["B", "C"], dtype="Int64"
365
+ )
366
+ tm.assert_frame_equal(result, expected)
367
+
368
+
369
+ @pytest.mark.parametrize("with_na", [True, False])
370
+ @pytest.mark.parametrize(
371
+ "op_name, action",
372
+ [
373
+ # ("count", "always_int"),
374
+ ("sum", "large_int"),
375
+ # ("std", "always_float"),
376
+ ("var", "always_float"),
377
+ # ("sem", "always_float"),
378
+ ("mean", "always_float"),
379
+ ("median", "always_float"),
380
+ ("prod", "large_int"),
381
+ ("min", "preserve"),
382
+ ("max", "preserve"),
383
+ ("first", "preserve"),
384
+ ("last", "preserve"),
385
+ ],
386
+ )
387
+ @pytest.mark.parametrize(
388
+ "data",
389
+ [
390
+ pd.array([1, 2, 3, 4], dtype="Int64"),
391
+ pd.array([1, 2, 3, 4], dtype="Int8"),
392
+ pd.array([0.1, 0.2, 0.3, 0.4], dtype="Float32"),
393
+ pd.array([0.1, 0.2, 0.3, 0.4], dtype="Float64"),
394
+ pd.array([True, True, False, False], dtype="boolean"),
395
+ ],
396
+ )
397
+ def test_cython_agg_EA_known_dtypes(data, op_name, action, with_na):
398
+ if with_na:
399
+ data[3] = pd.NA
400
+
401
+ df = DataFrame({"key": ["a", "a", "b", "b"], "col": data})
402
+ grouped = df.groupby("key")
403
+
404
+ if action == "always_int":
405
+ # always Int64
406
+ expected_dtype = pd.Int64Dtype()
407
+ elif action == "large_int":
408
+ # for any int/bool use Int64, for float preserve dtype
409
+ if is_float_dtype(data.dtype):
410
+ expected_dtype = data.dtype
411
+ elif is_integer_dtype(data.dtype):
412
+ # match the numpy dtype we'd get with the non-nullable analogue
413
+ expected_dtype = data.dtype
414
+ else:
415
+ expected_dtype = pd.Int64Dtype()
416
+ elif action == "always_float":
417
+ # for any int/bool use Float64, for float preserve dtype
418
+ if is_float_dtype(data.dtype):
419
+ expected_dtype = data.dtype
420
+ else:
421
+ expected_dtype = pd.Float64Dtype()
422
+ elif action == "preserve":
423
+ expected_dtype = data.dtype
424
+
425
+ result = getattr(grouped, op_name)()
426
+ assert result["col"].dtype == expected_dtype
427
+
428
+ result = grouped.aggregate(op_name)
429
+ assert result["col"].dtype == expected_dtype
430
+
431
+ result = getattr(grouped["col"], op_name)()
432
+ assert result.dtype == expected_dtype
433
+
434
+ result = grouped["col"].aggregate(op_name)
435
+ assert result.dtype == expected_dtype
venv/lib/python3.10/site-packages/pandas/tests/groupby/aggregate/test_numba.py ADDED
@@ -0,0 +1,392 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import numpy as np
2
+ import pytest
3
+
4
+ from pandas.errors import NumbaUtilError
5
+
6
+ from pandas import (
7
+ DataFrame,
8
+ Index,
9
+ NamedAgg,
10
+ Series,
11
+ option_context,
12
+ )
13
+ import pandas._testing as tm
14
+
15
+ pytestmark = pytest.mark.single_cpu
16
+
17
+
18
+ def test_correct_function_signature():
19
+ pytest.importorskip("numba")
20
+
21
+ def incorrect_function(x):
22
+ return sum(x) * 2.7
23
+
24
+ data = DataFrame(
25
+ {"key": ["a", "a", "b", "b", "a"], "data": [1.0, 2.0, 3.0, 4.0, 5.0]},
26
+ columns=["key", "data"],
27
+ )
28
+ with pytest.raises(NumbaUtilError, match="The first 2"):
29
+ data.groupby("key").agg(incorrect_function, engine="numba")
30
+
31
+ with pytest.raises(NumbaUtilError, match="The first 2"):
32
+ data.groupby("key")["data"].agg(incorrect_function, engine="numba")
33
+
34
+
35
+ def test_check_nopython_kwargs():
36
+ pytest.importorskip("numba")
37
+
38
+ def incorrect_function(values, index):
39
+ return sum(values) * 2.7
40
+
41
+ data = DataFrame(
42
+ {"key": ["a", "a", "b", "b", "a"], "data": [1.0, 2.0, 3.0, 4.0, 5.0]},
43
+ columns=["key", "data"],
44
+ )
45
+ with pytest.raises(NumbaUtilError, match="numba does not support"):
46
+ data.groupby("key").agg(incorrect_function, engine="numba", a=1)
47
+
48
+ with pytest.raises(NumbaUtilError, match="numba does not support"):
49
+ data.groupby("key")["data"].agg(incorrect_function, engine="numba", a=1)
50
+
51
+
52
+ @pytest.mark.filterwarnings("ignore")
53
+ # Filter warnings when parallel=True and the function can't be parallelized by Numba
54
+ @pytest.mark.parametrize("jit", [True, False])
55
+ @pytest.mark.parametrize("pandas_obj", ["Series", "DataFrame"])
56
+ @pytest.mark.parametrize("as_index", [True, False])
57
+ def test_numba_vs_cython(jit, pandas_obj, nogil, parallel, nopython, as_index):
58
+ pytest.importorskip("numba")
59
+
60
+ def func_numba(values, index):
61
+ return np.mean(values) * 2.7
62
+
63
+ if jit:
64
+ # Test accepted jitted functions
65
+ import numba
66
+
67
+ func_numba = numba.jit(func_numba)
68
+
69
+ data = DataFrame(
70
+ {0: ["a", "a", "b", "b", "a"], 1: [1.0, 2.0, 3.0, 4.0, 5.0]}, columns=[0, 1]
71
+ )
72
+ engine_kwargs = {"nogil": nogil, "parallel": parallel, "nopython": nopython}
73
+ grouped = data.groupby(0, as_index=as_index)
74
+ if pandas_obj == "Series":
75
+ grouped = grouped[1]
76
+
77
+ result = grouped.agg(func_numba, engine="numba", engine_kwargs=engine_kwargs)
78
+ expected = grouped.agg(lambda x: np.mean(x) * 2.7, engine="cython")
79
+
80
+ tm.assert_equal(result, expected)
81
+
82
+
83
+ @pytest.mark.filterwarnings("ignore")
84
+ # Filter warnings when parallel=True and the function can't be parallelized by Numba
85
+ @pytest.mark.parametrize("jit", [True, False])
86
+ @pytest.mark.parametrize("pandas_obj", ["Series", "DataFrame"])
87
+ def test_cache(jit, pandas_obj, nogil, parallel, nopython):
88
+ # Test that the functions are cached correctly if we switch functions
89
+ pytest.importorskip("numba")
90
+
91
+ def func_1(values, index):
92
+ return np.mean(values) - 3.4
93
+
94
+ def func_2(values, index):
95
+ return np.mean(values) * 2.7
96
+
97
+ if jit:
98
+ import numba
99
+
100
+ func_1 = numba.jit(func_1)
101
+ func_2 = numba.jit(func_2)
102
+
103
+ data = DataFrame(
104
+ {0: ["a", "a", "b", "b", "a"], 1: [1.0, 2.0, 3.0, 4.0, 5.0]}, columns=[0, 1]
105
+ )
106
+ engine_kwargs = {"nogil": nogil, "parallel": parallel, "nopython": nopython}
107
+ grouped = data.groupby(0)
108
+ if pandas_obj == "Series":
109
+ grouped = grouped[1]
110
+
111
+ result = grouped.agg(func_1, engine="numba", engine_kwargs=engine_kwargs)
112
+ expected = grouped.agg(lambda x: np.mean(x) - 3.4, engine="cython")
113
+ tm.assert_equal(result, expected)
114
+
115
+ # Add func_2 to the cache
116
+ result = grouped.agg(func_2, engine="numba", engine_kwargs=engine_kwargs)
117
+ expected = grouped.agg(lambda x: np.mean(x) * 2.7, engine="cython")
118
+ tm.assert_equal(result, expected)
119
+
120
+ # Retest func_1 which should use the cache
121
+ result = grouped.agg(func_1, engine="numba", engine_kwargs=engine_kwargs)
122
+ expected = grouped.agg(lambda x: np.mean(x) - 3.4, engine="cython")
123
+ tm.assert_equal(result, expected)
124
+
125
+
126
+ def test_use_global_config():
127
+ pytest.importorskip("numba")
128
+
129
+ def func_1(values, index):
130
+ return np.mean(values) - 3.4
131
+
132
+ data = DataFrame(
133
+ {0: ["a", "a", "b", "b", "a"], 1: [1.0, 2.0, 3.0, 4.0, 5.0]}, columns=[0, 1]
134
+ )
135
+ grouped = data.groupby(0)
136
+ expected = grouped.agg(func_1, engine="numba")
137
+ with option_context("compute.use_numba", True):
138
+ result = grouped.agg(func_1, engine=None)
139
+ tm.assert_frame_equal(expected, result)
140
+
141
+
142
+ @pytest.mark.parametrize(
143
+ "agg_kwargs",
144
+ [
145
+ {"func": ["min", "max"]},
146
+ {"func": "min"},
147
+ {"func": {1: ["min", "max"], 2: "sum"}},
148
+ {"bmin": NamedAgg(column=1, aggfunc="min")},
149
+ ],
150
+ )
151
+ def test_multifunc_numba_vs_cython_frame(agg_kwargs):
152
+ pytest.importorskip("numba")
153
+ data = DataFrame(
154
+ {
155
+ 0: ["a", "a", "b", "b", "a"],
156
+ 1: [1.0, 2.0, 3.0, 4.0, 5.0],
157
+ 2: [1, 2, 3, 4, 5],
158
+ },
159
+ columns=[0, 1, 2],
160
+ )
161
+ grouped = data.groupby(0)
162
+ result = grouped.agg(**agg_kwargs, engine="numba")
163
+ expected = grouped.agg(**agg_kwargs, engine="cython")
164
+ tm.assert_frame_equal(result, expected)
165
+
166
+
167
+ @pytest.mark.parametrize(
168
+ "agg_kwargs,expected_func",
169
+ [
170
+ ({"func": lambda values, index: values.sum()}, "sum"),
171
+ # FIXME
172
+ pytest.param(
173
+ {
174
+ "func": [
175
+ lambda values, index: values.sum(),
176
+ lambda values, index: values.min(),
177
+ ]
178
+ },
179
+ ["sum", "min"],
180
+ marks=pytest.mark.xfail(
181
+ reason="This doesn't work yet! Fails in nopython pipeline!"
182
+ ),
183
+ ),
184
+ ],
185
+ )
186
+ def test_multifunc_numba_udf_frame(agg_kwargs, expected_func):
187
+ pytest.importorskip("numba")
188
+ data = DataFrame(
189
+ {
190
+ 0: ["a", "a", "b", "b", "a"],
191
+ 1: [1.0, 2.0, 3.0, 4.0, 5.0],
192
+ 2: [1, 2, 3, 4, 5],
193
+ },
194
+ columns=[0, 1, 2],
195
+ )
196
+ grouped = data.groupby(0)
197
+ result = grouped.agg(**agg_kwargs, engine="numba")
198
+ expected = grouped.agg(expected_func, engine="cython")
199
+ # check_dtype can be removed if GH 44952 is addressed
200
+ # Currently, UDFs still always return float64 while reductions can preserve dtype
201
+ tm.assert_frame_equal(result, expected, check_dtype=False)
202
+
203
+
204
+ @pytest.mark.parametrize(
205
+ "agg_kwargs",
206
+ [{"func": ["min", "max"]}, {"func": "min"}, {"min_val": "min", "max_val": "max"}],
207
+ )
208
+ def test_multifunc_numba_vs_cython_series(agg_kwargs):
209
+ pytest.importorskip("numba")
210
+ labels = ["a", "a", "b", "b", "a"]
211
+ data = Series([1.0, 2.0, 3.0, 4.0, 5.0])
212
+ grouped = data.groupby(labels)
213
+ agg_kwargs["engine"] = "numba"
214
+ result = grouped.agg(**agg_kwargs)
215
+ agg_kwargs["engine"] = "cython"
216
+ expected = grouped.agg(**agg_kwargs)
217
+ if isinstance(expected, DataFrame):
218
+ tm.assert_frame_equal(result, expected)
219
+ else:
220
+ tm.assert_series_equal(result, expected)
221
+
222
+
223
+ @pytest.mark.single_cpu
224
+ @pytest.mark.parametrize(
225
+ "data,agg_kwargs",
226
+ [
227
+ (Series([1.0, 2.0, 3.0, 4.0, 5.0]), {"func": ["min", "max"]}),
228
+ (Series([1.0, 2.0, 3.0, 4.0, 5.0]), {"func": "min"}),
229
+ (
230
+ DataFrame(
231
+ {1: [1.0, 2.0, 3.0, 4.0, 5.0], 2: [1, 2, 3, 4, 5]}, columns=[1, 2]
232
+ ),
233
+ {"func": ["min", "max"]},
234
+ ),
235
+ (
236
+ DataFrame(
237
+ {1: [1.0, 2.0, 3.0, 4.0, 5.0], 2: [1, 2, 3, 4, 5]}, columns=[1, 2]
238
+ ),
239
+ {"func": "min"},
240
+ ),
241
+ (
242
+ DataFrame(
243
+ {1: [1.0, 2.0, 3.0, 4.0, 5.0], 2: [1, 2, 3, 4, 5]}, columns=[1, 2]
244
+ ),
245
+ {"func": {1: ["min", "max"], 2: "sum"}},
246
+ ),
247
+ (
248
+ DataFrame(
249
+ {1: [1.0, 2.0, 3.0, 4.0, 5.0], 2: [1, 2, 3, 4, 5]}, columns=[1, 2]
250
+ ),
251
+ {"min_col": NamedAgg(column=1, aggfunc="min")},
252
+ ),
253
+ ],
254
+ )
255
+ def test_multifunc_numba_kwarg_propagation(data, agg_kwargs):
256
+ pytest.importorskip("numba")
257
+ labels = ["a", "a", "b", "b", "a"]
258
+ grouped = data.groupby(labels)
259
+ result = grouped.agg(**agg_kwargs, engine="numba", engine_kwargs={"parallel": True})
260
+ expected = grouped.agg(**agg_kwargs, engine="numba")
261
+ if isinstance(expected, DataFrame):
262
+ tm.assert_frame_equal(result, expected)
263
+ else:
264
+ tm.assert_series_equal(result, expected)
265
+
266
+
267
+ def test_args_not_cached():
268
+ # GH 41647
269
+ pytest.importorskip("numba")
270
+
271
+ def sum_last(values, index, n):
272
+ return values[-n:].sum()
273
+
274
+ df = DataFrame({"id": [0, 0, 1, 1], "x": [1, 1, 1, 1]})
275
+ grouped_x = df.groupby("id")["x"]
276
+ result = grouped_x.agg(sum_last, 1, engine="numba")
277
+ expected = Series([1.0] * 2, name="x", index=Index([0, 1], name="id"))
278
+ tm.assert_series_equal(result, expected)
279
+
280
+ result = grouped_x.agg(sum_last, 2, engine="numba")
281
+ expected = Series([2.0] * 2, name="x", index=Index([0, 1], name="id"))
282
+ tm.assert_series_equal(result, expected)
283
+
284
+
285
+ def test_index_data_correctly_passed():
286
+ # GH 43133
287
+ pytest.importorskip("numba")
288
+
289
+ def f(values, index):
290
+ return np.mean(index)
291
+
292
+ df = DataFrame({"group": ["A", "A", "B"], "v": [4, 5, 6]}, index=[-1, -2, -3])
293
+ result = df.groupby("group").aggregate(f, engine="numba")
294
+ expected = DataFrame(
295
+ [-1.5, -3.0], columns=["v"], index=Index(["A", "B"], name="group")
296
+ )
297
+ tm.assert_frame_equal(result, expected)
298
+
299
+
300
+ def test_engine_kwargs_not_cached():
301
+ # If the user passes a different set of engine_kwargs don't return the same
302
+ # jitted function
303
+ pytest.importorskip("numba")
304
+ nogil = True
305
+ parallel = False
306
+ nopython = True
307
+
308
+ def func_kwargs(values, index):
309
+ return nogil + parallel + nopython
310
+
311
+ engine_kwargs = {"nopython": nopython, "nogil": nogil, "parallel": parallel}
312
+ df = DataFrame({"value": [0, 0, 0]})
313
+ result = df.groupby(level=0).aggregate(
314
+ func_kwargs, engine="numba", engine_kwargs=engine_kwargs
315
+ )
316
+ expected = DataFrame({"value": [2.0, 2.0, 2.0]})
317
+ tm.assert_frame_equal(result, expected)
318
+
319
+ nogil = False
320
+ engine_kwargs = {"nopython": nopython, "nogil": nogil, "parallel": parallel}
321
+ result = df.groupby(level=0).aggregate(
322
+ func_kwargs, engine="numba", engine_kwargs=engine_kwargs
323
+ )
324
+ expected = DataFrame({"value": [1.0, 1.0, 1.0]})
325
+ tm.assert_frame_equal(result, expected)
326
+
327
+
328
+ @pytest.mark.filterwarnings("ignore")
329
+ def test_multiindex_one_key(nogil, parallel, nopython):
330
+ pytest.importorskip("numba")
331
+
332
+ def numba_func(values, index):
333
+ return 1
334
+
335
+ df = DataFrame([{"A": 1, "B": 2, "C": 3}]).set_index(["A", "B"])
336
+ engine_kwargs = {"nopython": nopython, "nogil": nogil, "parallel": parallel}
337
+ result = df.groupby("A").agg(
338
+ numba_func, engine="numba", engine_kwargs=engine_kwargs
339
+ )
340
+ expected = DataFrame([1.0], index=Index([1], name="A"), columns=["C"])
341
+ tm.assert_frame_equal(result, expected)
342
+
343
+
344
+ def test_multiindex_multi_key_not_supported(nogil, parallel, nopython):
345
+ pytest.importorskip("numba")
346
+
347
+ def numba_func(values, index):
348
+ return 1
349
+
350
+ df = DataFrame([{"A": 1, "B": 2, "C": 3}]).set_index(["A", "B"])
351
+ engine_kwargs = {"nopython": nopython, "nogil": nogil, "parallel": parallel}
352
+ with pytest.raises(NotImplementedError, match="more than 1 grouping labels"):
353
+ df.groupby(["A", "B"]).agg(
354
+ numba_func, engine="numba", engine_kwargs=engine_kwargs
355
+ )
356
+
357
+
358
+ def test_multilabel_numba_vs_cython(numba_supported_reductions):
359
+ pytest.importorskip("numba")
360
+ reduction, kwargs = numba_supported_reductions
361
+ df = DataFrame(
362
+ {
363
+ "A": ["foo", "bar", "foo", "bar", "foo", "bar", "foo", "foo"],
364
+ "B": ["one", "one", "two", "three", "two", "two", "one", "three"],
365
+ "C": np.random.default_rng(2).standard_normal(8),
366
+ "D": np.random.default_rng(2).standard_normal(8),
367
+ }
368
+ )
369
+ gb = df.groupby(["A", "B"])
370
+ res_agg = gb.agg(reduction, engine="numba", **kwargs)
371
+ expected_agg = gb.agg(reduction, engine="cython", **kwargs)
372
+ tm.assert_frame_equal(res_agg, expected_agg)
373
+ # Test that calling the aggregation directly also works
374
+ direct_res = getattr(gb, reduction)(engine="numba", **kwargs)
375
+ direct_expected = getattr(gb, reduction)(engine="cython", **kwargs)
376
+ tm.assert_frame_equal(direct_res, direct_expected)
377
+
378
+
379
+ def test_multilabel_udf_numba_vs_cython():
380
+ pytest.importorskip("numba")
381
+ df = DataFrame(
382
+ {
383
+ "A": ["foo", "bar", "foo", "bar", "foo", "bar", "foo", "foo"],
384
+ "B": ["one", "one", "two", "three", "two", "two", "one", "three"],
385
+ "C": np.random.default_rng(2).standard_normal(8),
386
+ "D": np.random.default_rng(2).standard_normal(8),
387
+ }
388
+ )
389
+ gb = df.groupby(["A", "B"])
390
+ result = gb.agg(lambda values, index: values.min(), engine="numba")
391
+ expected = gb.agg(lambda x: x.min(), engine="cython")
392
+ tm.assert_frame_equal(result, expected)
venv/lib/python3.10/site-packages/pandas/tests/groupby/aggregate/test_other.py ADDED
@@ -0,0 +1,675 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ test all other .agg behavior
3
+ """
4
+
5
+ import datetime as dt
6
+ from functools import partial
7
+
8
+ import numpy as np
9
+ import pytest
10
+
11
+ from pandas.errors import SpecificationError
12
+
13
+ import pandas as pd
14
+ from pandas import (
15
+ DataFrame,
16
+ Index,
17
+ MultiIndex,
18
+ PeriodIndex,
19
+ Series,
20
+ date_range,
21
+ period_range,
22
+ )
23
+ import pandas._testing as tm
24
+
25
+ from pandas.io.formats.printing import pprint_thing
26
+
27
+
28
+ def test_agg_partial_failure_raises():
29
+ # GH#43741
30
+
31
+ df = DataFrame(
32
+ {
33
+ "data1": np.random.default_rng(2).standard_normal(5),
34
+ "data2": np.random.default_rng(2).standard_normal(5),
35
+ "key1": ["a", "a", "b", "b", "a"],
36
+ "key2": ["one", "two", "one", "two", "one"],
37
+ }
38
+ )
39
+ grouped = df.groupby("key1")
40
+
41
+ def peak_to_peak(arr):
42
+ return arr.max() - arr.min()
43
+
44
+ with pytest.raises(TypeError, match="unsupported operand type"):
45
+ grouped.agg([peak_to_peak])
46
+
47
+ with pytest.raises(TypeError, match="unsupported operand type"):
48
+ grouped.agg(peak_to_peak)
49
+
50
+
51
+ def test_agg_datetimes_mixed():
52
+ data = [[1, "2012-01-01", 1.0], [2, "2012-01-02", 2.0], [3, None, 3.0]]
53
+
54
+ df1 = DataFrame(
55
+ {
56
+ "key": [x[0] for x in data],
57
+ "date": [x[1] for x in data],
58
+ "value": [x[2] for x in data],
59
+ }
60
+ )
61
+
62
+ data = [
63
+ [
64
+ row[0],
65
+ (dt.datetime.strptime(row[1], "%Y-%m-%d").date() if row[1] else None),
66
+ row[2],
67
+ ]
68
+ for row in data
69
+ ]
70
+
71
+ df2 = DataFrame(
72
+ {
73
+ "key": [x[0] for x in data],
74
+ "date": [x[1] for x in data],
75
+ "value": [x[2] for x in data],
76
+ }
77
+ )
78
+
79
+ df1["weights"] = df1["value"] / df1["value"].sum()
80
+ gb1 = df1.groupby("date").aggregate("sum")
81
+
82
+ df2["weights"] = df1["value"] / df1["value"].sum()
83
+ gb2 = df2.groupby("date").aggregate("sum")
84
+
85
+ assert len(gb1) == len(gb2)
86
+
87
+
88
+ def test_agg_period_index():
89
+ prng = period_range("2012-1-1", freq="M", periods=3)
90
+ df = DataFrame(np.random.default_rng(2).standard_normal((3, 2)), index=prng)
91
+ rs = df.groupby(level=0).sum()
92
+ assert isinstance(rs.index, PeriodIndex)
93
+
94
+ # GH 3579
95
+ index = period_range(start="1999-01", periods=5, freq="M")
96
+ s1 = Series(np.random.default_rng(2).random(len(index)), index=index)
97
+ s2 = Series(np.random.default_rng(2).random(len(index)), index=index)
98
+ df = DataFrame.from_dict({"s1": s1, "s2": s2})
99
+ grouped = df.groupby(df.index.month)
100
+ list(grouped)
101
+
102
+
103
+ def test_agg_dict_parameter_cast_result_dtypes():
104
+ # GH 12821
105
+
106
+ df = DataFrame(
107
+ {
108
+ "class": ["A", "A", "B", "B", "C", "C", "D", "D"],
109
+ "time": date_range("1/1/2011", periods=8, freq="h"),
110
+ }
111
+ )
112
+ df.loc[[0, 1, 2, 5], "time"] = None
113
+
114
+ # test for `first` function
115
+ exp = df.loc[[0, 3, 4, 6]].set_index("class")
116
+ grouped = df.groupby("class")
117
+ tm.assert_frame_equal(grouped.first(), exp)
118
+ tm.assert_frame_equal(grouped.agg("first"), exp)
119
+ tm.assert_frame_equal(grouped.agg({"time": "first"}), exp)
120
+ tm.assert_series_equal(grouped.time.first(), exp["time"])
121
+ tm.assert_series_equal(grouped.time.agg("first"), exp["time"])
122
+
123
+ # test for `last` function
124
+ exp = df.loc[[0, 3, 4, 7]].set_index("class")
125
+ grouped = df.groupby("class")
126
+ tm.assert_frame_equal(grouped.last(), exp)
127
+ tm.assert_frame_equal(grouped.agg("last"), exp)
128
+ tm.assert_frame_equal(grouped.agg({"time": "last"}), exp)
129
+ tm.assert_series_equal(grouped.time.last(), exp["time"])
130
+ tm.assert_series_equal(grouped.time.agg("last"), exp["time"])
131
+
132
+ # count
133
+ exp = Series([2, 2, 2, 2], index=Index(list("ABCD"), name="class"), name="time")
134
+ tm.assert_series_equal(grouped.time.agg(len), exp)
135
+ tm.assert_series_equal(grouped.time.size(), exp)
136
+
137
+ exp = Series([0, 1, 1, 2], index=Index(list("ABCD"), name="class"), name="time")
138
+ tm.assert_series_equal(grouped.time.count(), exp)
139
+
140
+
141
+ def test_agg_cast_results_dtypes():
142
+ # similar to GH12821
143
+ # xref #11444
144
+ u = [dt.datetime(2015, x + 1, 1) for x in range(12)]
145
+ v = list("aaabbbbbbccd")
146
+ df = DataFrame({"X": v, "Y": u})
147
+
148
+ result = df.groupby("X")["Y"].agg(len)
149
+ expected = df.groupby("X")["Y"].count()
150
+ tm.assert_series_equal(result, expected)
151
+
152
+
153
+ def test_aggregate_float64_no_int64():
154
+ # see gh-11199
155
+ df = DataFrame({"a": [1, 2, 3, 4, 5], "b": [1, 2, 2, 4, 5], "c": [1, 2, 3, 4, 5]})
156
+
157
+ expected = DataFrame({"a": [1, 2.5, 4, 5]}, index=[1, 2, 4, 5])
158
+ expected.index.name = "b"
159
+
160
+ result = df.groupby("b")[["a"]].mean()
161
+ tm.assert_frame_equal(result, expected)
162
+
163
+ expected = DataFrame({"a": [1, 2.5, 4, 5], "c": [1, 2.5, 4, 5]}, index=[1, 2, 4, 5])
164
+ expected.index.name = "b"
165
+
166
+ result = df.groupby("b")[["a", "c"]].mean()
167
+ tm.assert_frame_equal(result, expected)
168
+
169
+
170
+ def test_aggregate_api_consistency():
171
+ # GH 9052
172
+ # make sure that the aggregates via dict
173
+ # are consistent
174
+ df = DataFrame(
175
+ {
176
+ "A": ["foo", "bar", "foo", "bar", "foo", "bar", "foo", "foo"],
177
+ "B": ["one", "one", "two", "two", "two", "two", "one", "two"],
178
+ "C": np.random.default_rng(2).standard_normal(8) + 1.0,
179
+ "D": np.arange(8),
180
+ }
181
+ )
182
+
183
+ grouped = df.groupby(["A", "B"])
184
+ c_mean = grouped["C"].mean()
185
+ c_sum = grouped["C"].sum()
186
+ d_mean = grouped["D"].mean()
187
+ d_sum = grouped["D"].sum()
188
+
189
+ result = grouped["D"].agg(["sum", "mean"])
190
+ expected = pd.concat([d_sum, d_mean], axis=1)
191
+ expected.columns = ["sum", "mean"]
192
+ tm.assert_frame_equal(result, expected, check_like=True)
193
+
194
+ result = grouped.agg(["sum", "mean"])
195
+ expected = pd.concat([c_sum, c_mean, d_sum, d_mean], axis=1)
196
+ expected.columns = MultiIndex.from_product([["C", "D"], ["sum", "mean"]])
197
+ tm.assert_frame_equal(result, expected, check_like=True)
198
+
199
+ result = grouped[["D", "C"]].agg(["sum", "mean"])
200
+ expected = pd.concat([d_sum, d_mean, c_sum, c_mean], axis=1)
201
+ expected.columns = MultiIndex.from_product([["D", "C"], ["sum", "mean"]])
202
+ tm.assert_frame_equal(result, expected, check_like=True)
203
+
204
+ result = grouped.agg({"C": "mean", "D": "sum"})
205
+ expected = pd.concat([d_sum, c_mean], axis=1)
206
+ tm.assert_frame_equal(result, expected, check_like=True)
207
+
208
+ result = grouped.agg({"C": ["mean", "sum"], "D": ["mean", "sum"]})
209
+ expected = pd.concat([c_mean, c_sum, d_mean, d_sum], axis=1)
210
+ expected.columns = MultiIndex.from_product([["C", "D"], ["mean", "sum"]])
211
+
212
+ msg = r"Column\(s\) \['r', 'r2'\] do not exist"
213
+ with pytest.raises(KeyError, match=msg):
214
+ grouped[["D", "C"]].agg({"r": "sum", "r2": "mean"})
215
+
216
+
217
+ def test_agg_dict_renaming_deprecation():
218
+ # 15931
219
+ df = DataFrame({"A": [1, 1, 1, 2, 2], "B": range(5), "C": range(5)})
220
+
221
+ msg = r"nested renamer is not supported"
222
+ with pytest.raises(SpecificationError, match=msg):
223
+ df.groupby("A").agg(
224
+ {"B": {"foo": ["sum", "max"]}, "C": {"bar": ["count", "min"]}}
225
+ )
226
+
227
+ msg = r"Column\(s\) \['ma'\] do not exist"
228
+ with pytest.raises(KeyError, match=msg):
229
+ df.groupby("A")[["B", "C"]].agg({"ma": "max"})
230
+
231
+ msg = r"nested renamer is not supported"
232
+ with pytest.raises(SpecificationError, match=msg):
233
+ df.groupby("A").B.agg({"foo": "count"})
234
+
235
+
236
+ def test_agg_compat():
237
+ # GH 12334
238
+ df = DataFrame(
239
+ {
240
+ "A": ["foo", "bar", "foo", "bar", "foo", "bar", "foo", "foo"],
241
+ "B": ["one", "one", "two", "two", "two", "two", "one", "two"],
242
+ "C": np.random.default_rng(2).standard_normal(8) + 1.0,
243
+ "D": np.arange(8),
244
+ }
245
+ )
246
+
247
+ g = df.groupby(["A", "B"])
248
+
249
+ msg = r"nested renamer is not supported"
250
+ with pytest.raises(SpecificationError, match=msg):
251
+ g["D"].agg({"C": ["sum", "std"]})
252
+
253
+ with pytest.raises(SpecificationError, match=msg):
254
+ g["D"].agg({"C": "sum", "D": "std"})
255
+
256
+
257
+ def test_agg_nested_dicts():
258
+ # API change for disallowing these types of nested dicts
259
+ df = DataFrame(
260
+ {
261
+ "A": ["foo", "bar", "foo", "bar", "foo", "bar", "foo", "foo"],
262
+ "B": ["one", "one", "two", "two", "two", "two", "one", "two"],
263
+ "C": np.random.default_rng(2).standard_normal(8) + 1.0,
264
+ "D": np.arange(8),
265
+ }
266
+ )
267
+
268
+ g = df.groupby(["A", "B"])
269
+
270
+ msg = r"nested renamer is not supported"
271
+ with pytest.raises(SpecificationError, match=msg):
272
+ g.aggregate({"r1": {"C": ["mean", "sum"]}, "r2": {"D": ["mean", "sum"]}})
273
+
274
+ with pytest.raises(SpecificationError, match=msg):
275
+ g.agg({"C": {"ra": ["mean", "std"]}, "D": {"rb": ["mean", "std"]}})
276
+
277
+ # same name as the original column
278
+ # GH9052
279
+ with pytest.raises(SpecificationError, match=msg):
280
+ g["D"].agg({"result1": np.sum, "result2": np.mean})
281
+
282
+ with pytest.raises(SpecificationError, match=msg):
283
+ g["D"].agg({"D": np.sum, "result2": np.mean})
284
+
285
+
286
+ def test_agg_item_by_item_raise_typeerror():
287
+ df = DataFrame(np.random.default_rng(2).integers(10, size=(20, 10)))
288
+
289
+ def raiseException(df):
290
+ pprint_thing("----------------------------------------")
291
+ pprint_thing(df.to_string())
292
+ raise TypeError("test")
293
+
294
+ with pytest.raises(TypeError, match="test"):
295
+ df.groupby(0).agg(raiseException)
296
+
297
+
298
+ def test_series_agg_multikey():
299
+ ts = Series(
300
+ np.arange(10, dtype=np.float64), index=date_range("2020-01-01", periods=10)
301
+ )
302
+ grouped = ts.groupby([lambda x: x.year, lambda x: x.month])
303
+
304
+ result = grouped.agg("sum")
305
+ expected = grouped.sum()
306
+ tm.assert_series_equal(result, expected)
307
+
308
+
309
+ def test_series_agg_multi_pure_python():
310
+ data = DataFrame(
311
+ {
312
+ "A": [
313
+ "foo",
314
+ "foo",
315
+ "foo",
316
+ "foo",
317
+ "bar",
318
+ "bar",
319
+ "bar",
320
+ "bar",
321
+ "foo",
322
+ "foo",
323
+ "foo",
324
+ ],
325
+ "B": [
326
+ "one",
327
+ "one",
328
+ "one",
329
+ "two",
330
+ "one",
331
+ "one",
332
+ "one",
333
+ "two",
334
+ "two",
335
+ "two",
336
+ "one",
337
+ ],
338
+ "C": [
339
+ "dull",
340
+ "dull",
341
+ "shiny",
342
+ "dull",
343
+ "dull",
344
+ "shiny",
345
+ "shiny",
346
+ "dull",
347
+ "shiny",
348
+ "shiny",
349
+ "shiny",
350
+ ],
351
+ "D": np.random.default_rng(2).standard_normal(11),
352
+ "E": np.random.default_rng(2).standard_normal(11),
353
+ "F": np.random.default_rng(2).standard_normal(11),
354
+ }
355
+ )
356
+
357
+ def bad(x):
358
+ assert len(x.values.base) > 0
359
+ return "foo"
360
+
361
+ result = data.groupby(["A", "B"]).agg(bad)
362
+ expected = data.groupby(["A", "B"]).agg(lambda x: "foo")
363
+ tm.assert_frame_equal(result, expected)
364
+
365
+
366
+ def test_agg_consistency():
367
+ # agg with ([]) and () not consistent
368
+ # GH 6715
369
+ def P1(a):
370
+ return np.percentile(a.dropna(), q=1)
371
+
372
+ df = DataFrame(
373
+ {
374
+ "col1": [1, 2, 3, 4],
375
+ "col2": [10, 25, 26, 31],
376
+ "date": [
377
+ dt.date(2013, 2, 10),
378
+ dt.date(2013, 2, 10),
379
+ dt.date(2013, 2, 11),
380
+ dt.date(2013, 2, 11),
381
+ ],
382
+ }
383
+ )
384
+
385
+ g = df.groupby("date")
386
+
387
+ expected = g.agg([P1])
388
+ expected.columns = expected.columns.levels[0]
389
+
390
+ result = g.agg(P1)
391
+ tm.assert_frame_equal(result, expected)
392
+
393
+
394
+ def test_agg_callables():
395
+ # GH 7929
396
+ df = DataFrame({"foo": [1, 2], "bar": [3, 4]}).astype(np.int64)
397
+
398
+ class fn_class:
399
+ def __call__(self, x):
400
+ return sum(x)
401
+
402
+ equiv_callables = [
403
+ sum,
404
+ np.sum,
405
+ lambda x: sum(x),
406
+ lambda x: x.sum(),
407
+ partial(sum),
408
+ fn_class(),
409
+ ]
410
+
411
+ expected = df.groupby("foo").agg("sum")
412
+ for ecall in equiv_callables:
413
+ warn = FutureWarning if ecall is sum or ecall is np.sum else None
414
+ msg = "using DataFrameGroupBy.sum"
415
+ with tm.assert_produces_warning(warn, match=msg):
416
+ result = df.groupby("foo").agg(ecall)
417
+ tm.assert_frame_equal(result, expected)
418
+
419
+
420
+ def test_agg_over_numpy_arrays():
421
+ # GH 3788
422
+ df = DataFrame(
423
+ [
424
+ [1, np.array([10, 20, 30])],
425
+ [1, np.array([40, 50, 60])],
426
+ [2, np.array([20, 30, 40])],
427
+ ],
428
+ columns=["category", "arraydata"],
429
+ )
430
+ gb = df.groupby("category")
431
+
432
+ expected_data = [[np.array([50, 70, 90])], [np.array([20, 30, 40])]]
433
+ expected_index = Index([1, 2], name="category")
434
+ expected_column = ["arraydata"]
435
+ expected = DataFrame(expected_data, index=expected_index, columns=expected_column)
436
+
437
+ alt = gb.sum(numeric_only=False)
438
+ tm.assert_frame_equal(alt, expected)
439
+
440
+ result = gb.agg("sum", numeric_only=False)
441
+ tm.assert_frame_equal(result, expected)
442
+
443
+ # FIXME: the original version of this test called `gb.agg(sum)`
444
+ # and that raises TypeError if `numeric_only=False` is passed
445
+
446
+
447
+ @pytest.mark.parametrize("as_period", [True, False])
448
+ def test_agg_tzaware_non_datetime_result(as_period):
449
+ # discussed in GH#29589, fixed in GH#29641, operating on tzaware values
450
+ # with function that is not dtype-preserving
451
+ dti = date_range("2012-01-01", periods=4, tz="UTC")
452
+ if as_period:
453
+ dti = dti.tz_localize(None).to_period("D")
454
+
455
+ df = DataFrame({"a": [0, 0, 1, 1], "b": dti})
456
+ gb = df.groupby("a")
457
+
458
+ # Case that _does_ preserve the dtype
459
+ result = gb["b"].agg(lambda x: x.iloc[0])
460
+ expected = Series(dti[::2], name="b")
461
+ expected.index.name = "a"
462
+ tm.assert_series_equal(result, expected)
463
+
464
+ # Cases that do _not_ preserve the dtype
465
+ result = gb["b"].agg(lambda x: x.iloc[0].year)
466
+ expected = Series([2012, 2012], name="b")
467
+ expected.index.name = "a"
468
+ tm.assert_series_equal(result, expected)
469
+
470
+ result = gb["b"].agg(lambda x: x.iloc[-1] - x.iloc[0])
471
+ expected = Series([pd.Timedelta(days=1), pd.Timedelta(days=1)], name="b")
472
+ expected.index.name = "a"
473
+ if as_period:
474
+ expected = Series([pd.offsets.Day(1), pd.offsets.Day(1)], name="b")
475
+ expected.index.name = "a"
476
+ tm.assert_series_equal(result, expected)
477
+
478
+
479
+ def test_agg_timezone_round_trip():
480
+ # GH 15426
481
+ ts = pd.Timestamp("2016-01-01 12:00:00", tz="US/Pacific")
482
+ df = DataFrame({"a": 1, "b": [ts + dt.timedelta(minutes=nn) for nn in range(10)]})
483
+
484
+ result1 = df.groupby("a")["b"].agg("min").iloc[0]
485
+ result2 = df.groupby("a")["b"].agg(lambda x: np.min(x)).iloc[0]
486
+ result3 = df.groupby("a")["b"].min().iloc[0]
487
+
488
+ assert result1 == ts
489
+ assert result2 == ts
490
+ assert result3 == ts
491
+
492
+ dates = [
493
+ pd.Timestamp(f"2016-01-0{i:d} 12:00:00", tz="US/Pacific") for i in range(1, 5)
494
+ ]
495
+ df = DataFrame({"A": ["a", "b"] * 2, "B": dates})
496
+ grouped = df.groupby("A")
497
+
498
+ ts = df["B"].iloc[0]
499
+ assert ts == grouped.nth(0)["B"].iloc[0]
500
+ assert ts == grouped.head(1)["B"].iloc[0]
501
+ assert ts == grouped.first()["B"].iloc[0]
502
+
503
+ # GH#27110 applying iloc should return a DataFrame
504
+ msg = "DataFrameGroupBy.apply operated on the grouping columns"
505
+ with tm.assert_produces_warning(DeprecationWarning, match=msg):
506
+ assert ts == grouped.apply(lambda x: x.iloc[0]).iloc[0, 1]
507
+
508
+ ts = df["B"].iloc[2]
509
+ assert ts == grouped.last()["B"].iloc[0]
510
+
511
+ # GH#27110 applying iloc should return a DataFrame
512
+ msg = "DataFrameGroupBy.apply operated on the grouping columns"
513
+ with tm.assert_produces_warning(DeprecationWarning, match=msg):
514
+ assert ts == grouped.apply(lambda x: x.iloc[-1]).iloc[0, 1]
515
+
516
+
517
+ def test_sum_uint64_overflow():
518
+ # see gh-14758
519
+ # Convert to uint64 and don't overflow
520
+ df = DataFrame([[1, 2], [3, 4], [5, 6]], dtype=object)
521
+ df = df + 9223372036854775807
522
+
523
+ index = Index(
524
+ [9223372036854775808, 9223372036854775810, 9223372036854775812], dtype=np.uint64
525
+ )
526
+ expected = DataFrame(
527
+ {1: [9223372036854775809, 9223372036854775811, 9223372036854775813]},
528
+ index=index,
529
+ dtype=object,
530
+ )
531
+
532
+ expected.index.name = 0
533
+ result = df.groupby(0).sum(numeric_only=False)
534
+ tm.assert_frame_equal(result, expected)
535
+
536
+ # out column is non-numeric, so with numeric_only=True it is dropped
537
+ result2 = df.groupby(0).sum(numeric_only=True)
538
+ expected2 = expected[[]]
539
+ tm.assert_frame_equal(result2, expected2)
540
+
541
+
542
+ @pytest.mark.parametrize(
543
+ "structure, expected",
544
+ [
545
+ (tuple, DataFrame({"C": {(1, 1): (1, 1, 1), (3, 4): (3, 4, 4)}})),
546
+ (list, DataFrame({"C": {(1, 1): [1, 1, 1], (3, 4): [3, 4, 4]}})),
547
+ (
548
+ lambda x: tuple(x),
549
+ DataFrame({"C": {(1, 1): (1, 1, 1), (3, 4): (3, 4, 4)}}),
550
+ ),
551
+ (
552
+ lambda x: list(x),
553
+ DataFrame({"C": {(1, 1): [1, 1, 1], (3, 4): [3, 4, 4]}}),
554
+ ),
555
+ ],
556
+ )
557
+ def test_agg_structs_dataframe(structure, expected):
558
+ df = DataFrame(
559
+ {"A": [1, 1, 1, 3, 3, 3], "B": [1, 1, 1, 4, 4, 4], "C": [1, 1, 1, 3, 4, 4]}
560
+ )
561
+
562
+ result = df.groupby(["A", "B"]).aggregate(structure)
563
+ expected.index.names = ["A", "B"]
564
+ tm.assert_frame_equal(result, expected)
565
+
566
+
567
+ @pytest.mark.parametrize(
568
+ "structure, expected",
569
+ [
570
+ (tuple, Series([(1, 1, 1), (3, 4, 4)], index=[1, 3], name="C")),
571
+ (list, Series([[1, 1, 1], [3, 4, 4]], index=[1, 3], name="C")),
572
+ (lambda x: tuple(x), Series([(1, 1, 1), (3, 4, 4)], index=[1, 3], name="C")),
573
+ (lambda x: list(x), Series([[1, 1, 1], [3, 4, 4]], index=[1, 3], name="C")),
574
+ ],
575
+ )
576
+ def test_agg_structs_series(structure, expected):
577
+ # Issue #18079
578
+ df = DataFrame(
579
+ {"A": [1, 1, 1, 3, 3, 3], "B": [1, 1, 1, 4, 4, 4], "C": [1, 1, 1, 3, 4, 4]}
580
+ )
581
+
582
+ result = df.groupby("A")["C"].aggregate(structure)
583
+ expected.index.name = "A"
584
+ tm.assert_series_equal(result, expected)
585
+
586
+
587
+ def test_agg_category_nansum(observed):
588
+ categories = ["a", "b", "c"]
589
+ df = DataFrame(
590
+ {"A": pd.Categorical(["a", "a", "b"], categories=categories), "B": [1, 2, 3]}
591
+ )
592
+ msg = "using SeriesGroupBy.sum"
593
+ with tm.assert_produces_warning(FutureWarning, match=msg):
594
+ result = df.groupby("A", observed=observed).B.agg(np.nansum)
595
+ expected = Series(
596
+ [3, 3, 0],
597
+ index=pd.CategoricalIndex(["a", "b", "c"], categories=categories, name="A"),
598
+ name="B",
599
+ )
600
+ if observed:
601
+ expected = expected[expected != 0]
602
+ tm.assert_series_equal(result, expected)
603
+
604
+
605
+ def test_agg_list_like_func():
606
+ # GH 18473
607
+ df = DataFrame({"A": [str(x) for x in range(3)], "B": [str(x) for x in range(3)]})
608
+ grouped = df.groupby("A", as_index=False, sort=False)
609
+ result = grouped.agg({"B": lambda x: list(x)})
610
+ expected = DataFrame(
611
+ {"A": [str(x) for x in range(3)], "B": [[str(x)] for x in range(3)]}
612
+ )
613
+ tm.assert_frame_equal(result, expected)
614
+
615
+
616
+ def test_agg_lambda_with_timezone():
617
+ # GH 23683
618
+ df = DataFrame(
619
+ {
620
+ "tag": [1, 1],
621
+ "date": [
622
+ pd.Timestamp("2018-01-01", tz="UTC"),
623
+ pd.Timestamp("2018-01-02", tz="UTC"),
624
+ ],
625
+ }
626
+ )
627
+ result = df.groupby("tag").agg({"date": lambda e: e.head(1)})
628
+ expected = DataFrame(
629
+ [pd.Timestamp("2018-01-01", tz="UTC")],
630
+ index=Index([1], name="tag"),
631
+ columns=["date"],
632
+ )
633
+ tm.assert_frame_equal(result, expected)
634
+
635
+
636
+ @pytest.mark.parametrize(
637
+ "err_cls",
638
+ [
639
+ NotImplementedError,
640
+ RuntimeError,
641
+ KeyError,
642
+ IndexError,
643
+ OSError,
644
+ ValueError,
645
+ ArithmeticError,
646
+ AttributeError,
647
+ ],
648
+ )
649
+ def test_groupby_agg_err_catching(err_cls):
650
+ # make sure we suppress anything other than TypeError or AssertionError
651
+ # in _python_agg_general
652
+
653
+ # Use a non-standard EA to make sure we don't go down ndarray paths
654
+ from pandas.tests.extension.decimal.array import (
655
+ DecimalArray,
656
+ make_data,
657
+ to_decimal,
658
+ )
659
+
660
+ data = make_data()[:5]
661
+ df = DataFrame(
662
+ {"id1": [0, 0, 0, 1, 1], "id2": [0, 1, 0, 1, 1], "decimals": DecimalArray(data)}
663
+ )
664
+
665
+ expected = Series(to_decimal([data[0], data[3]]))
666
+
667
+ def weird_func(x):
668
+ # weird function that raise something other than TypeError or IndexError
669
+ # in _python_agg_general
670
+ if len(x) == 0:
671
+ raise err_cls
672
+ return x.iloc[0]
673
+
674
+ result = df["decimals"].groupby(df["id1"]).agg(weird_func)
675
+ tm.assert_series_equal(result, expected, check_names=False)
venv/lib/python3.10/site-packages/pandas/tests/groupby/conftest.py ADDED
@@ -0,0 +1,208 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import numpy as np
2
+ import pytest
3
+
4
+ from pandas import (
5
+ DataFrame,
6
+ Index,
7
+ Series,
8
+ date_range,
9
+ )
10
+ from pandas.core.groupby.base import (
11
+ reduction_kernels,
12
+ transformation_kernels,
13
+ )
14
+
15
+
16
+ @pytest.fixture(params=[True, False])
17
+ def sort(request):
18
+ return request.param
19
+
20
+
21
+ @pytest.fixture(params=[True, False])
22
+ def as_index(request):
23
+ return request.param
24
+
25
+
26
+ @pytest.fixture(params=[True, False])
27
+ def dropna(request):
28
+ return request.param
29
+
30
+
31
+ @pytest.fixture(params=[True, False])
32
+ def observed(request):
33
+ return request.param
34
+
35
+
36
+ @pytest.fixture
37
+ def df():
38
+ return DataFrame(
39
+ {
40
+ "A": ["foo", "bar", "foo", "bar", "foo", "bar", "foo", "foo"],
41
+ "B": ["one", "one", "two", "three", "two", "two", "one", "three"],
42
+ "C": np.random.default_rng(2).standard_normal(8),
43
+ "D": np.random.default_rng(2).standard_normal(8),
44
+ }
45
+ )
46
+
47
+
48
+ @pytest.fixture
49
+ def ts():
50
+ return Series(
51
+ np.random.default_rng(2).standard_normal(30),
52
+ index=date_range("2000-01-01", periods=30, freq="B"),
53
+ )
54
+
55
+
56
+ @pytest.fixture
57
+ def tsframe():
58
+ return DataFrame(
59
+ np.random.default_rng(2).standard_normal((30, 4)),
60
+ columns=Index(list("ABCD"), dtype=object),
61
+ index=date_range("2000-01-01", periods=30, freq="B"),
62
+ )
63
+
64
+
65
+ @pytest.fixture
66
+ def three_group():
67
+ return DataFrame(
68
+ {
69
+ "A": [
70
+ "foo",
71
+ "foo",
72
+ "foo",
73
+ "foo",
74
+ "bar",
75
+ "bar",
76
+ "bar",
77
+ "bar",
78
+ "foo",
79
+ "foo",
80
+ "foo",
81
+ ],
82
+ "B": [
83
+ "one",
84
+ "one",
85
+ "one",
86
+ "two",
87
+ "one",
88
+ "one",
89
+ "one",
90
+ "two",
91
+ "two",
92
+ "two",
93
+ "one",
94
+ ],
95
+ "C": [
96
+ "dull",
97
+ "dull",
98
+ "shiny",
99
+ "dull",
100
+ "dull",
101
+ "shiny",
102
+ "shiny",
103
+ "dull",
104
+ "shiny",
105
+ "shiny",
106
+ "shiny",
107
+ ],
108
+ "D": np.random.default_rng(2).standard_normal(11),
109
+ "E": np.random.default_rng(2).standard_normal(11),
110
+ "F": np.random.default_rng(2).standard_normal(11),
111
+ }
112
+ )
113
+
114
+
115
+ @pytest.fixture()
116
+ def slice_test_df():
117
+ data = [
118
+ [0, "a", "a0_at_0"],
119
+ [1, "b", "b0_at_1"],
120
+ [2, "a", "a1_at_2"],
121
+ [3, "b", "b1_at_3"],
122
+ [4, "c", "c0_at_4"],
123
+ [5, "a", "a2_at_5"],
124
+ [6, "a", "a3_at_6"],
125
+ [7, "a", "a4_at_7"],
126
+ ]
127
+ df = DataFrame(data, columns=["Index", "Group", "Value"])
128
+ return df.set_index("Index")
129
+
130
+
131
+ @pytest.fixture()
132
+ def slice_test_grouped(slice_test_df):
133
+ return slice_test_df.groupby("Group", as_index=False)
134
+
135
+
136
+ @pytest.fixture(params=sorted(reduction_kernels))
137
+ def reduction_func(request):
138
+ """
139
+ yields the string names of all groupby reduction functions, one at a time.
140
+ """
141
+ return request.param
142
+
143
+
144
+ @pytest.fixture(params=sorted(transformation_kernels))
145
+ def transformation_func(request):
146
+ """yields the string names of all groupby transformation functions."""
147
+ return request.param
148
+
149
+
150
+ @pytest.fixture(params=sorted(reduction_kernels) + sorted(transformation_kernels))
151
+ def groupby_func(request):
152
+ """yields both aggregation and transformation functions."""
153
+ return request.param
154
+
155
+
156
+ @pytest.fixture(params=[True, False])
157
+ def parallel(request):
158
+ """parallel keyword argument for numba.jit"""
159
+ return request.param
160
+
161
+
162
+ # Can parameterize nogil & nopython over True | False, but limiting per
163
+ # https://github.com/pandas-dev/pandas/pull/41971#issuecomment-860607472
164
+
165
+
166
+ @pytest.fixture(params=[False])
167
+ def nogil(request):
168
+ """nogil keyword argument for numba.jit"""
169
+ return request.param
170
+
171
+
172
+ @pytest.fixture(params=[True])
173
+ def nopython(request):
174
+ """nopython keyword argument for numba.jit"""
175
+ return request.param
176
+
177
+
178
+ @pytest.fixture(
179
+ params=[
180
+ ("mean", {}),
181
+ ("var", {"ddof": 1}),
182
+ ("var", {"ddof": 0}),
183
+ ("std", {"ddof": 1}),
184
+ ("std", {"ddof": 0}),
185
+ ("sum", {}),
186
+ ("min", {}),
187
+ ("max", {}),
188
+ ("sum", {"min_count": 2}),
189
+ ("min", {"min_count": 2}),
190
+ ("max", {"min_count": 2}),
191
+ ],
192
+ ids=[
193
+ "mean",
194
+ "var_1",
195
+ "var_0",
196
+ "std_1",
197
+ "std_0",
198
+ "sum",
199
+ "min",
200
+ "max",
201
+ "sum-min_count",
202
+ "min-min_count",
203
+ "max-min_count",
204
+ ],
205
+ )
206
+ def numba_supported_reductions(request):
207
+ """reductions supported with engine='numba'"""
208
+ return request.param
venv/lib/python3.10/site-packages/pandas/tests/groupby/test_all_methods.py ADDED
@@ -0,0 +1,83 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ Tests that apply to all groupby operation methods.
3
+
4
+ The only tests that should appear here are those that use the `groupby_func` fixture.
5
+ Even if it does use that fixture, prefer a more specific test file if it available
6
+ such as:
7
+
8
+ - test_categorical
9
+ - test_groupby_dropna
10
+ - test_groupby_subclass
11
+ - test_raises
12
+ """
13
+
14
+ import pytest
15
+
16
+ import pandas as pd
17
+ from pandas import DataFrame
18
+ import pandas._testing as tm
19
+ from pandas.tests.groupby import get_groupby_method_args
20
+
21
+
22
+ def test_multiindex_group_all_columns_when_empty(groupby_func):
23
+ # GH 32464
24
+ df = DataFrame({"a": [], "b": [], "c": []}).set_index(["a", "b", "c"])
25
+ gb = df.groupby(["a", "b", "c"], group_keys=False)
26
+ method = getattr(gb, groupby_func)
27
+ args = get_groupby_method_args(groupby_func, df)
28
+
29
+ warn = FutureWarning if groupby_func == "fillna" else None
30
+ warn_msg = "DataFrameGroupBy.fillna is deprecated"
31
+ with tm.assert_produces_warning(warn, match=warn_msg):
32
+ result = method(*args).index
33
+ expected = df.index
34
+ tm.assert_index_equal(result, expected)
35
+
36
+
37
+ def test_duplicate_columns(request, groupby_func, as_index):
38
+ # GH#50806
39
+ if groupby_func == "corrwith":
40
+ msg = "GH#50845 - corrwith fails when there are duplicate columns"
41
+ request.applymarker(pytest.mark.xfail(reason=msg))
42
+ df = DataFrame([[1, 3, 6], [1, 4, 7], [2, 5, 8]], columns=list("abb"))
43
+ args = get_groupby_method_args(groupby_func, df)
44
+ gb = df.groupby("a", as_index=as_index)
45
+ warn = FutureWarning if groupby_func == "fillna" else None
46
+ warn_msg = "DataFrameGroupBy.fillna is deprecated"
47
+ with tm.assert_produces_warning(warn, match=warn_msg):
48
+ result = getattr(gb, groupby_func)(*args)
49
+
50
+ expected_df = df.set_axis(["a", "b", "c"], axis=1)
51
+ expected_args = get_groupby_method_args(groupby_func, expected_df)
52
+ expected_gb = expected_df.groupby("a", as_index=as_index)
53
+ warn = FutureWarning if groupby_func == "fillna" else None
54
+ warn_msg = "DataFrameGroupBy.fillna is deprecated"
55
+ with tm.assert_produces_warning(warn, match=warn_msg):
56
+ expected = getattr(expected_gb, groupby_func)(*expected_args)
57
+ if groupby_func not in ("size", "ngroup", "cumcount"):
58
+ expected = expected.rename(columns={"c": "b"})
59
+ tm.assert_equal(result, expected)
60
+
61
+
62
+ @pytest.mark.parametrize(
63
+ "idx",
64
+ [
65
+ pd.Index(["a", "a"], name="foo"),
66
+ pd.MultiIndex.from_tuples((("a", "a"), ("a", "a")), names=["foo", "bar"]),
67
+ ],
68
+ )
69
+ def test_dup_labels_output_shape(groupby_func, idx):
70
+ if groupby_func in {"size", "ngroup", "cumcount"}:
71
+ pytest.skip(f"Not applicable for {groupby_func}")
72
+
73
+ df = DataFrame([[1, 1]], columns=idx)
74
+ grp_by = df.groupby([0])
75
+
76
+ args = get_groupby_method_args(groupby_func, df)
77
+ warn = FutureWarning if groupby_func == "fillna" else None
78
+ warn_msg = "DataFrameGroupBy.fillna is deprecated"
79
+ with tm.assert_produces_warning(warn, match=warn_msg):
80
+ result = getattr(grp_by, groupby_func)(*args)
81
+
82
+ assert result.shape == (1, 2)
83
+ tm.assert_index_equal(result.columns, idx)
venv/lib/python3.10/site-packages/pandas/tests/groupby/test_api.py ADDED
@@ -0,0 +1,265 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ Tests of the groupby API, including internal consistency and with other pandas objects.
3
+
4
+ Tests in this file should only check the existence, names, and arguments of groupby
5
+ methods. It should not test the results of any groupby operation.
6
+ """
7
+
8
+ import inspect
9
+
10
+ import pytest
11
+
12
+ from pandas import (
13
+ DataFrame,
14
+ Series,
15
+ )
16
+ from pandas.core.groupby.base import (
17
+ groupby_other_methods,
18
+ reduction_kernels,
19
+ transformation_kernels,
20
+ )
21
+ from pandas.core.groupby.generic import (
22
+ DataFrameGroupBy,
23
+ SeriesGroupBy,
24
+ )
25
+
26
+
27
+ def test_tab_completion(multiindex_dataframe_random_data):
28
+ grp = multiindex_dataframe_random_data.groupby(level="second")
29
+ results = {v for v in dir(grp) if not v.startswith("_")}
30
+ expected = {
31
+ "A",
32
+ "B",
33
+ "C",
34
+ "agg",
35
+ "aggregate",
36
+ "apply",
37
+ "boxplot",
38
+ "filter",
39
+ "first",
40
+ "get_group",
41
+ "groups",
42
+ "hist",
43
+ "indices",
44
+ "last",
45
+ "max",
46
+ "mean",
47
+ "median",
48
+ "min",
49
+ "ngroups",
50
+ "nth",
51
+ "ohlc",
52
+ "plot",
53
+ "prod",
54
+ "size",
55
+ "std",
56
+ "sum",
57
+ "transform",
58
+ "var",
59
+ "sem",
60
+ "count",
61
+ "nunique",
62
+ "head",
63
+ "describe",
64
+ "cummax",
65
+ "quantile",
66
+ "rank",
67
+ "cumprod",
68
+ "tail",
69
+ "resample",
70
+ "cummin",
71
+ "fillna",
72
+ "cumsum",
73
+ "cumcount",
74
+ "ngroup",
75
+ "all",
76
+ "shift",
77
+ "skew",
78
+ "take",
79
+ "pct_change",
80
+ "any",
81
+ "corr",
82
+ "corrwith",
83
+ "cov",
84
+ "dtypes",
85
+ "ndim",
86
+ "diff",
87
+ "idxmax",
88
+ "idxmin",
89
+ "ffill",
90
+ "bfill",
91
+ "rolling",
92
+ "expanding",
93
+ "pipe",
94
+ "sample",
95
+ "ewm",
96
+ "value_counts",
97
+ }
98
+ assert results == expected
99
+
100
+
101
+ def test_all_methods_categorized(multiindex_dataframe_random_data):
102
+ grp = multiindex_dataframe_random_data.groupby(
103
+ multiindex_dataframe_random_data.iloc[:, 0]
104
+ )
105
+ names = {_ for _ in dir(grp) if not _.startswith("_")} - set(
106
+ multiindex_dataframe_random_data.columns
107
+ )
108
+ new_names = set(names)
109
+ new_names -= reduction_kernels
110
+ new_names -= transformation_kernels
111
+ new_names -= groupby_other_methods
112
+
113
+ assert not reduction_kernels & transformation_kernels
114
+ assert not reduction_kernels & groupby_other_methods
115
+ assert not transformation_kernels & groupby_other_methods
116
+
117
+ # new public method?
118
+ if new_names:
119
+ msg = f"""
120
+ There are uncategorized methods defined on the Grouper class:
121
+ {new_names}.
122
+
123
+ Was a new method recently added?
124
+
125
+ Every public method On Grouper must appear in exactly one the
126
+ following three lists defined in pandas.core.groupby.base:
127
+ - `reduction_kernels`
128
+ - `transformation_kernels`
129
+ - `groupby_other_methods`
130
+ see the comments in pandas/core/groupby/base.py for guidance on
131
+ how to fix this test.
132
+ """
133
+ raise AssertionError(msg)
134
+
135
+ # removed a public method?
136
+ all_categorized = reduction_kernels | transformation_kernels | groupby_other_methods
137
+ if names != all_categorized:
138
+ msg = f"""
139
+ Some methods which are supposed to be on the Grouper class
140
+ are missing:
141
+ {all_categorized - names}.
142
+
143
+ They're still defined in one of the lists that live in pandas/core/groupby/base.py.
144
+ If you removed a method, you should update them
145
+ """
146
+ raise AssertionError(msg)
147
+
148
+
149
+ def test_frame_consistency(groupby_func):
150
+ # GH#48028
151
+ if groupby_func in ("first", "last"):
152
+ msg = "first and last are entirely different between frame and groupby"
153
+ pytest.skip(reason=msg)
154
+
155
+ if groupby_func in ("cumcount", "ngroup"):
156
+ assert not hasattr(DataFrame, groupby_func)
157
+ return
158
+
159
+ frame_method = getattr(DataFrame, groupby_func)
160
+ gb_method = getattr(DataFrameGroupBy, groupby_func)
161
+ result = set(inspect.signature(gb_method).parameters)
162
+ if groupby_func == "size":
163
+ # "size" is a method on GroupBy but property on DataFrame:
164
+ expected = {"self"}
165
+ else:
166
+ expected = set(inspect.signature(frame_method).parameters)
167
+
168
+ # Exclude certain arguments from result and expected depending on the operation
169
+ # Some of these may be purposeful inconsistencies between the APIs
170
+ exclude_expected, exclude_result = set(), set()
171
+ if groupby_func in ("any", "all"):
172
+ exclude_expected = {"kwargs", "bool_only", "axis"}
173
+ elif groupby_func in ("count",):
174
+ exclude_expected = {"numeric_only", "axis"}
175
+ elif groupby_func in ("nunique",):
176
+ exclude_expected = {"axis"}
177
+ elif groupby_func in ("max", "min"):
178
+ exclude_expected = {"axis", "kwargs", "skipna"}
179
+ exclude_result = {"min_count", "engine", "engine_kwargs"}
180
+ elif groupby_func in ("mean", "std", "sum", "var"):
181
+ exclude_expected = {"axis", "kwargs", "skipna"}
182
+ exclude_result = {"engine", "engine_kwargs"}
183
+ elif groupby_func in ("median", "prod", "sem"):
184
+ exclude_expected = {"axis", "kwargs", "skipna"}
185
+ elif groupby_func in ("backfill", "bfill", "ffill", "pad"):
186
+ exclude_expected = {"downcast", "inplace", "axis", "limit_area"}
187
+ elif groupby_func in ("cummax", "cummin"):
188
+ exclude_expected = {"skipna", "args"}
189
+ exclude_result = {"numeric_only"}
190
+ elif groupby_func in ("cumprod", "cumsum"):
191
+ exclude_expected = {"skipna"}
192
+ elif groupby_func in ("pct_change",):
193
+ exclude_expected = {"kwargs"}
194
+ exclude_result = {"axis"}
195
+ elif groupby_func in ("rank",):
196
+ exclude_expected = {"numeric_only"}
197
+ elif groupby_func in ("quantile",):
198
+ exclude_expected = {"method", "axis"}
199
+
200
+ # Ensure excluded arguments are actually in the signatures
201
+ assert result & exclude_result == exclude_result
202
+ assert expected & exclude_expected == exclude_expected
203
+
204
+ result -= exclude_result
205
+ expected -= exclude_expected
206
+ assert result == expected
207
+
208
+
209
+ def test_series_consistency(request, groupby_func):
210
+ # GH#48028
211
+ if groupby_func in ("first", "last"):
212
+ pytest.skip("first and last are entirely different between Series and groupby")
213
+
214
+ if groupby_func in ("cumcount", "corrwith", "ngroup"):
215
+ assert not hasattr(Series, groupby_func)
216
+ return
217
+
218
+ series_method = getattr(Series, groupby_func)
219
+ gb_method = getattr(SeriesGroupBy, groupby_func)
220
+ result = set(inspect.signature(gb_method).parameters)
221
+ if groupby_func == "size":
222
+ # "size" is a method on GroupBy but property on Series
223
+ expected = {"self"}
224
+ else:
225
+ expected = set(inspect.signature(series_method).parameters)
226
+
227
+ # Exclude certain arguments from result and expected depending on the operation
228
+ # Some of these may be purposeful inconsistencies between the APIs
229
+ exclude_expected, exclude_result = set(), set()
230
+ if groupby_func in ("any", "all"):
231
+ exclude_expected = {"kwargs", "bool_only", "axis"}
232
+ elif groupby_func in ("diff",):
233
+ exclude_result = {"axis"}
234
+ elif groupby_func in ("max", "min"):
235
+ exclude_expected = {"axis", "kwargs", "skipna"}
236
+ exclude_result = {"min_count", "engine", "engine_kwargs"}
237
+ elif groupby_func in ("mean", "std", "sum", "var"):
238
+ exclude_expected = {"axis", "kwargs", "skipna"}
239
+ exclude_result = {"engine", "engine_kwargs"}
240
+ elif groupby_func in ("median", "prod", "sem"):
241
+ exclude_expected = {"axis", "kwargs", "skipna"}
242
+ elif groupby_func in ("backfill", "bfill", "ffill", "pad"):
243
+ exclude_expected = {"downcast", "inplace", "axis", "limit_area"}
244
+ elif groupby_func in ("cummax", "cummin"):
245
+ exclude_expected = {"skipna", "args"}
246
+ exclude_result = {"numeric_only"}
247
+ elif groupby_func in ("cumprod", "cumsum"):
248
+ exclude_expected = {"skipna"}
249
+ elif groupby_func in ("pct_change",):
250
+ exclude_expected = {"kwargs"}
251
+ exclude_result = {"axis"}
252
+ elif groupby_func in ("rank",):
253
+ exclude_expected = {"numeric_only"}
254
+ elif groupby_func in ("idxmin", "idxmax"):
255
+ exclude_expected = {"args", "kwargs"}
256
+ elif groupby_func in ("quantile",):
257
+ exclude_result = {"numeric_only"}
258
+
259
+ # Ensure excluded arguments are actually in the signatures
260
+ assert result & exclude_result == exclude_result
261
+ assert expected & exclude_expected == exclude_expected
262
+
263
+ result -= exclude_result
264
+ expected -= exclude_expected
265
+ assert result == expected
venv/lib/python3.10/site-packages/pandas/tests/groupby/test_apply.py ADDED
@@ -0,0 +1,1606 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from datetime import (
2
+ date,
3
+ datetime,
4
+ )
5
+
6
+ import numpy as np
7
+ import pytest
8
+
9
+ import pandas as pd
10
+ from pandas import (
11
+ DataFrame,
12
+ Index,
13
+ MultiIndex,
14
+ Series,
15
+ bdate_range,
16
+ )
17
+ import pandas._testing as tm
18
+ from pandas.tests.groupby import get_groupby_method_args
19
+
20
+
21
+ def test_apply_func_that_appends_group_to_list_without_copy():
22
+ # GH: 17718
23
+
24
+ df = DataFrame(1, index=list(range(10)) * 10, columns=[0]).reset_index()
25
+ groups = []
26
+
27
+ def store(group):
28
+ groups.append(group)
29
+
30
+ msg = "DataFrameGroupBy.apply operated on the grouping columns"
31
+ with tm.assert_produces_warning(DeprecationWarning, match=msg):
32
+ df.groupby("index").apply(store)
33
+ expected_value = DataFrame(
34
+ {"index": [0] * 10, 0: [1] * 10}, index=pd.RangeIndex(0, 100, 10)
35
+ )
36
+
37
+ tm.assert_frame_equal(groups[0], expected_value)
38
+
39
+
40
+ def test_apply_index_date(using_infer_string):
41
+ # GH 5788
42
+ ts = [
43
+ "2011-05-16 00:00",
44
+ "2011-05-16 01:00",
45
+ "2011-05-16 02:00",
46
+ "2011-05-16 03:00",
47
+ "2011-05-17 02:00",
48
+ "2011-05-17 03:00",
49
+ "2011-05-17 04:00",
50
+ "2011-05-17 05:00",
51
+ "2011-05-18 02:00",
52
+ "2011-05-18 03:00",
53
+ "2011-05-18 04:00",
54
+ "2011-05-18 05:00",
55
+ ]
56
+ df = DataFrame(
57
+ {
58
+ "value": [
59
+ 1.40893,
60
+ 1.40760,
61
+ 1.40750,
62
+ 1.40649,
63
+ 1.40893,
64
+ 1.40760,
65
+ 1.40750,
66
+ 1.40649,
67
+ 1.40893,
68
+ 1.40760,
69
+ 1.40750,
70
+ 1.40649,
71
+ ],
72
+ },
73
+ index=Index(pd.to_datetime(ts), name="date_time"),
74
+ )
75
+ expected = df.groupby(df.index.date).idxmax()
76
+ result = df.groupby(df.index.date).apply(lambda x: x.idxmax())
77
+ tm.assert_frame_equal(result, expected)
78
+
79
+
80
+ def test_apply_index_date_object(using_infer_string):
81
+ # GH 5789
82
+ # don't auto coerce dates
83
+ ts = [
84
+ "2011-05-16 00:00",
85
+ "2011-05-16 01:00",
86
+ "2011-05-16 02:00",
87
+ "2011-05-16 03:00",
88
+ "2011-05-17 02:00",
89
+ "2011-05-17 03:00",
90
+ "2011-05-17 04:00",
91
+ "2011-05-17 05:00",
92
+ "2011-05-18 02:00",
93
+ "2011-05-18 03:00",
94
+ "2011-05-18 04:00",
95
+ "2011-05-18 05:00",
96
+ ]
97
+ df = DataFrame([row.split() for row in ts], columns=["date", "time"])
98
+ df["value"] = [
99
+ 1.40893,
100
+ 1.40760,
101
+ 1.40750,
102
+ 1.40649,
103
+ 1.40893,
104
+ 1.40760,
105
+ 1.40750,
106
+ 1.40649,
107
+ 1.40893,
108
+ 1.40760,
109
+ 1.40750,
110
+ 1.40649,
111
+ ]
112
+ dtype = "string[pyarrow_numpy]" if using_infer_string else object
113
+ exp_idx = Index(
114
+ ["2011-05-16", "2011-05-17", "2011-05-18"], dtype=dtype, name="date"
115
+ )
116
+ expected = Series(["00:00", "02:00", "02:00"], index=exp_idx)
117
+ msg = "DataFrameGroupBy.apply operated on the grouping columns"
118
+ with tm.assert_produces_warning(DeprecationWarning, match=msg):
119
+ result = df.groupby("date", group_keys=False).apply(
120
+ lambda x: x["time"][x["value"].idxmax()]
121
+ )
122
+ tm.assert_series_equal(result, expected)
123
+
124
+
125
+ def test_apply_trivial(using_infer_string):
126
+ # GH 20066
127
+ # trivial apply: ignore input and return a constant dataframe.
128
+ df = DataFrame(
129
+ {"key": ["a", "a", "b", "b", "a"], "data": [1.0, 2.0, 3.0, 4.0, 5.0]},
130
+ columns=["key", "data"],
131
+ )
132
+ dtype = "string" if using_infer_string else "object"
133
+ expected = pd.concat([df.iloc[1:], df.iloc[1:]], axis=1, keys=["float64", dtype])
134
+
135
+ msg = "DataFrame.groupby with axis=1 is deprecated"
136
+ with tm.assert_produces_warning(FutureWarning, match=msg):
137
+ gb = df.groupby([str(x) for x in df.dtypes], axis=1)
138
+ result = gb.apply(lambda x: df.iloc[1:])
139
+
140
+ tm.assert_frame_equal(result, expected)
141
+
142
+
143
+ def test_apply_trivial_fail(using_infer_string):
144
+ # GH 20066
145
+ df = DataFrame(
146
+ {"key": ["a", "a", "b", "b", "a"], "data": [1.0, 2.0, 3.0, 4.0, 5.0]},
147
+ columns=["key", "data"],
148
+ )
149
+ dtype = "string" if using_infer_string else "object"
150
+ expected = pd.concat([df, df], axis=1, keys=["float64", dtype])
151
+ msg = "DataFrame.groupby with axis=1 is deprecated"
152
+ with tm.assert_produces_warning(FutureWarning, match=msg):
153
+ gb = df.groupby([str(x) for x in df.dtypes], axis=1, group_keys=True)
154
+ result = gb.apply(lambda x: df)
155
+
156
+ tm.assert_frame_equal(result, expected)
157
+
158
+
159
+ @pytest.mark.parametrize(
160
+ "df, group_names",
161
+ [
162
+ (DataFrame({"a": [1, 1, 1, 2, 3], "b": ["a", "a", "a", "b", "c"]}), [1, 2, 3]),
163
+ (DataFrame({"a": [0, 0, 1, 1], "b": [0, 1, 0, 1]}), [0, 1]),
164
+ (DataFrame({"a": [1]}), [1]),
165
+ (DataFrame({"a": [1, 1, 1, 2, 2, 1, 1, 2], "b": range(8)}), [1, 2]),
166
+ (DataFrame({"a": [1, 2, 3, 1, 2, 3], "two": [4, 5, 6, 7, 8, 9]}), [1, 2, 3]),
167
+ (
168
+ DataFrame(
169
+ {
170
+ "a": list("aaabbbcccc"),
171
+ "B": [3, 4, 3, 6, 5, 2, 1, 9, 5, 4],
172
+ "C": [4, 0, 2, 2, 2, 7, 8, 6, 2, 8],
173
+ }
174
+ ),
175
+ ["a", "b", "c"],
176
+ ),
177
+ (DataFrame([[1, 2, 3], [2, 2, 3]], columns=["a", "b", "c"]), [1, 2]),
178
+ ],
179
+ ids=[
180
+ "GH2936",
181
+ "GH7739 & GH10519",
182
+ "GH10519",
183
+ "GH2656",
184
+ "GH12155",
185
+ "GH20084",
186
+ "GH21417",
187
+ ],
188
+ )
189
+ def test_group_apply_once_per_group(df, group_names):
190
+ # GH2936, GH7739, GH10519, GH2656, GH12155, GH20084, GH21417
191
+
192
+ # This test should ensure that a function is only evaluated
193
+ # once per group. Previously the function has been evaluated twice
194
+ # on the first group to check if the Cython index slider is safe to use
195
+ # This test ensures that the side effect (append to list) is only triggered
196
+ # once per group
197
+
198
+ names = []
199
+ # cannot parameterize over the functions since they need external
200
+ # `names` to detect side effects
201
+
202
+ def f_copy(group):
203
+ # this takes the fast apply path
204
+ names.append(group.name)
205
+ return group.copy()
206
+
207
+ def f_nocopy(group):
208
+ # this takes the slow apply path
209
+ names.append(group.name)
210
+ return group
211
+
212
+ def f_scalar(group):
213
+ # GH7739, GH2656
214
+ names.append(group.name)
215
+ return 0
216
+
217
+ def f_none(group):
218
+ # GH10519, GH12155, GH21417
219
+ names.append(group.name)
220
+
221
+ def f_constant_df(group):
222
+ # GH2936, GH20084
223
+ names.append(group.name)
224
+ return DataFrame({"a": [1], "b": [1]})
225
+
226
+ for func in [f_copy, f_nocopy, f_scalar, f_none, f_constant_df]:
227
+ del names[:]
228
+
229
+ msg = "DataFrameGroupBy.apply operated on the grouping columns"
230
+ with tm.assert_produces_warning(DeprecationWarning, match=msg):
231
+ df.groupby("a", group_keys=False).apply(func)
232
+ assert names == group_names
233
+
234
+
235
+ def test_group_apply_once_per_group2(capsys):
236
+ # GH: 31111
237
+ # groupby-apply need to execute len(set(group_by_columns)) times
238
+
239
+ expected = 2 # Number of times `apply` should call a function for the current test
240
+
241
+ df = DataFrame(
242
+ {
243
+ "group_by_column": [0, 0, 0, 0, 1, 1, 1, 1],
244
+ "test_column": ["0", "2", "4", "6", "8", "10", "12", "14"],
245
+ },
246
+ index=["0", "2", "4", "6", "8", "10", "12", "14"],
247
+ )
248
+
249
+ msg = "DataFrameGroupBy.apply operated on the grouping columns"
250
+ with tm.assert_produces_warning(DeprecationWarning, match=msg):
251
+ df.groupby("group_by_column", group_keys=False).apply(
252
+ lambda df: print("function_called")
253
+ )
254
+
255
+ result = capsys.readouterr().out.count("function_called")
256
+ # If `groupby` behaves unexpectedly, this test will break
257
+ assert result == expected
258
+
259
+
260
+ def test_apply_fast_slow_identical():
261
+ # GH 31613
262
+
263
+ df = DataFrame({"A": [0, 0, 1], "b": range(3)})
264
+
265
+ # For simple index structures we check for fast/slow apply using
266
+ # an identity check on in/output
267
+ def slow(group):
268
+ return group
269
+
270
+ def fast(group):
271
+ return group.copy()
272
+
273
+ msg = "DataFrameGroupBy.apply operated on the grouping columns"
274
+ with tm.assert_produces_warning(DeprecationWarning, match=msg):
275
+ fast_df = df.groupby("A", group_keys=False).apply(fast)
276
+ with tm.assert_produces_warning(DeprecationWarning, match=msg):
277
+ slow_df = df.groupby("A", group_keys=False).apply(slow)
278
+
279
+ tm.assert_frame_equal(fast_df, slow_df)
280
+
281
+
282
+ @pytest.mark.parametrize(
283
+ "func",
284
+ [
285
+ lambda x: x,
286
+ lambda x: x[:],
287
+ lambda x: x.copy(deep=False),
288
+ lambda x: x.copy(deep=True),
289
+ ],
290
+ )
291
+ def test_groupby_apply_identity_maybecopy_index_identical(func):
292
+ # GH 14927
293
+ # Whether the function returns a copy of the input data or not should not
294
+ # have an impact on the index structure of the result since this is not
295
+ # transparent to the user
296
+
297
+ df = DataFrame({"g": [1, 2, 2, 2], "a": [1, 2, 3, 4], "b": [5, 6, 7, 8]})
298
+
299
+ msg = "DataFrameGroupBy.apply operated on the grouping columns"
300
+ with tm.assert_produces_warning(DeprecationWarning, match=msg):
301
+ result = df.groupby("g", group_keys=False).apply(func)
302
+ tm.assert_frame_equal(result, df)
303
+
304
+
305
+ def test_apply_with_mixed_dtype():
306
+ # GH3480, apply with mixed dtype on axis=1 breaks in 0.11
307
+ df = DataFrame(
308
+ {
309
+ "foo1": np.random.default_rng(2).standard_normal(6),
310
+ "foo2": ["one", "two", "two", "three", "one", "two"],
311
+ }
312
+ )
313
+ result = df.apply(lambda x: x, axis=1).dtypes
314
+ expected = df.dtypes
315
+ tm.assert_series_equal(result, expected)
316
+
317
+ # GH 3610 incorrect dtype conversion with as_index=False
318
+ df = DataFrame({"c1": [1, 2, 6, 6, 8]})
319
+ df["c2"] = df.c1 / 2.0
320
+ result1 = df.groupby("c2").mean().reset_index().c2
321
+ result2 = df.groupby("c2", as_index=False).mean().c2
322
+ tm.assert_series_equal(result1, result2)
323
+
324
+
325
+ def test_groupby_as_index_apply():
326
+ # GH #4648 and #3417
327
+ df = DataFrame(
328
+ {
329
+ "item_id": ["b", "b", "a", "c", "a", "b"],
330
+ "user_id": [1, 2, 1, 1, 3, 1],
331
+ "time": range(6),
332
+ }
333
+ )
334
+
335
+ g_as = df.groupby("user_id", as_index=True)
336
+ g_not_as = df.groupby("user_id", as_index=False)
337
+
338
+ res_as = g_as.head(2).index
339
+ res_not_as = g_not_as.head(2).index
340
+ exp = Index([0, 1, 2, 4])
341
+ tm.assert_index_equal(res_as, exp)
342
+ tm.assert_index_equal(res_not_as, exp)
343
+
344
+ msg = "DataFrameGroupBy.apply operated on the grouping columns"
345
+ with tm.assert_produces_warning(DeprecationWarning, match=msg):
346
+ res_as_apply = g_as.apply(lambda x: x.head(2)).index
347
+ with tm.assert_produces_warning(DeprecationWarning, match=msg):
348
+ res_not_as_apply = g_not_as.apply(lambda x: x.head(2)).index
349
+
350
+ # apply doesn't maintain the original ordering
351
+ # changed in GH5610 as the as_index=False returns a MI here
352
+ exp_not_as_apply = MultiIndex.from_tuples([(0, 0), (0, 2), (1, 1), (2, 4)])
353
+ tp = [(1, 0), (1, 2), (2, 1), (3, 4)]
354
+ exp_as_apply = MultiIndex.from_tuples(tp, names=["user_id", None])
355
+
356
+ tm.assert_index_equal(res_as_apply, exp_as_apply)
357
+ tm.assert_index_equal(res_not_as_apply, exp_not_as_apply)
358
+
359
+ ind = Index(list("abcde"))
360
+ df = DataFrame([[1, 2], [2, 3], [1, 4], [1, 5], [2, 6]], index=ind)
361
+ msg = "DataFrameGroupBy.apply operated on the grouping columns"
362
+ with tm.assert_produces_warning(DeprecationWarning, match=msg):
363
+ res = df.groupby(0, as_index=False, group_keys=False).apply(lambda x: x).index
364
+ tm.assert_index_equal(res, ind)
365
+
366
+
367
+ def test_apply_concat_preserve_names(three_group):
368
+ grouped = three_group.groupby(["A", "B"])
369
+
370
+ def desc(group):
371
+ result = group.describe()
372
+ result.index.name = "stat"
373
+ return result
374
+
375
+ def desc2(group):
376
+ result = group.describe()
377
+ result.index.name = "stat"
378
+ result = result[: len(group)]
379
+ # weirdo
380
+ return result
381
+
382
+ def desc3(group):
383
+ result = group.describe()
384
+
385
+ # names are different
386
+ result.index.name = f"stat_{len(group):d}"
387
+
388
+ result = result[: len(group)]
389
+ # weirdo
390
+ return result
391
+
392
+ msg = "DataFrameGroupBy.apply operated on the grouping columns"
393
+ with tm.assert_produces_warning(DeprecationWarning, match=msg):
394
+ result = grouped.apply(desc)
395
+ assert result.index.names == ("A", "B", "stat")
396
+
397
+ msg = "DataFrameGroupBy.apply operated on the grouping columns"
398
+ with tm.assert_produces_warning(DeprecationWarning, match=msg):
399
+ result2 = grouped.apply(desc2)
400
+ assert result2.index.names == ("A", "B", "stat")
401
+
402
+ msg = "DataFrameGroupBy.apply operated on the grouping columns"
403
+ with tm.assert_produces_warning(DeprecationWarning, match=msg):
404
+ result3 = grouped.apply(desc3)
405
+ assert result3.index.names == ("A", "B", None)
406
+
407
+
408
+ def test_apply_series_to_frame():
409
+ def f(piece):
410
+ with np.errstate(invalid="ignore"):
411
+ logged = np.log(piece)
412
+ return DataFrame(
413
+ {"value": piece, "demeaned": piece - piece.mean(), "logged": logged}
414
+ )
415
+
416
+ dr = bdate_range("1/1/2000", periods=100)
417
+ ts = Series(np.random.default_rng(2).standard_normal(100), index=dr)
418
+
419
+ grouped = ts.groupby(lambda x: x.month, group_keys=False)
420
+ result = grouped.apply(f)
421
+
422
+ assert isinstance(result, DataFrame)
423
+ assert not hasattr(result, "name") # GH49907
424
+ tm.assert_index_equal(result.index, ts.index)
425
+
426
+
427
+ def test_apply_series_yield_constant(df):
428
+ result = df.groupby(["A", "B"])["C"].apply(len)
429
+ assert result.index.names[:2] == ("A", "B")
430
+
431
+
432
+ def test_apply_frame_yield_constant(df):
433
+ # GH13568
434
+ msg = "DataFrameGroupBy.apply operated on the grouping columns"
435
+ with tm.assert_produces_warning(DeprecationWarning, match=msg):
436
+ result = df.groupby(["A", "B"]).apply(len)
437
+ assert isinstance(result, Series)
438
+ assert result.name is None
439
+
440
+ result = df.groupby(["A", "B"])[["C", "D"]].apply(len)
441
+ assert isinstance(result, Series)
442
+ assert result.name is None
443
+
444
+
445
+ def test_apply_frame_to_series(df):
446
+ grouped = df.groupby(["A", "B"])
447
+ msg = "DataFrameGroupBy.apply operated on the grouping columns"
448
+ with tm.assert_produces_warning(DeprecationWarning, match=msg):
449
+ result = grouped.apply(len)
450
+ expected = grouped.count()["C"]
451
+ tm.assert_index_equal(result.index, expected.index)
452
+ tm.assert_numpy_array_equal(result.values, expected.values)
453
+
454
+
455
+ def test_apply_frame_not_as_index_column_name(df):
456
+ # GH 35964 - path within _wrap_applied_output not hit by a test
457
+ grouped = df.groupby(["A", "B"], as_index=False)
458
+ msg = "DataFrameGroupBy.apply operated on the grouping columns"
459
+ with tm.assert_produces_warning(DeprecationWarning, match=msg):
460
+ result = grouped.apply(len)
461
+ expected = grouped.count().rename(columns={"C": np.nan}).drop(columns="D")
462
+ # TODO(GH#34306): Use assert_frame_equal when column name is not np.nan
463
+ tm.assert_index_equal(result.index, expected.index)
464
+ tm.assert_numpy_array_equal(result.values, expected.values)
465
+
466
+
467
+ def test_apply_frame_concat_series():
468
+ def trans(group):
469
+ return group.groupby("B")["C"].sum().sort_values().iloc[:2]
470
+
471
+ def trans2(group):
472
+ grouped = group.groupby(df.reindex(group.index)["B"])
473
+ return grouped.sum().sort_values().iloc[:2]
474
+
475
+ df = DataFrame(
476
+ {
477
+ "A": np.random.default_rng(2).integers(0, 5, 1000),
478
+ "B": np.random.default_rng(2).integers(0, 5, 1000),
479
+ "C": np.random.default_rng(2).standard_normal(1000),
480
+ }
481
+ )
482
+
483
+ msg = "DataFrameGroupBy.apply operated on the grouping columns"
484
+ with tm.assert_produces_warning(DeprecationWarning, match=msg):
485
+ result = df.groupby("A").apply(trans)
486
+ exp = df.groupby("A")["C"].apply(trans2)
487
+ tm.assert_series_equal(result, exp, check_names=False)
488
+ assert result.name == "C"
489
+
490
+
491
+ def test_apply_transform(ts):
492
+ grouped = ts.groupby(lambda x: x.month, group_keys=False)
493
+ result = grouped.apply(lambda x: x * 2)
494
+ expected = grouped.transform(lambda x: x * 2)
495
+ tm.assert_series_equal(result, expected)
496
+
497
+
498
+ def test_apply_multikey_corner(tsframe):
499
+ grouped = tsframe.groupby([lambda x: x.year, lambda x: x.month])
500
+
501
+ def f(group):
502
+ return group.sort_values("A")[-5:]
503
+
504
+ result = grouped.apply(f)
505
+ for key, group in grouped:
506
+ tm.assert_frame_equal(result.loc[key], f(group))
507
+
508
+
509
+ @pytest.mark.parametrize("group_keys", [True, False])
510
+ def test_apply_chunk_view(group_keys):
511
+ # Low level tinkering could be unsafe, make sure not
512
+ df = DataFrame({"key": [1, 1, 1, 2, 2, 2, 3, 3, 3], "value": range(9)})
513
+
514
+ msg = "DataFrameGroupBy.apply operated on the grouping columns"
515
+ with tm.assert_produces_warning(DeprecationWarning, match=msg):
516
+ result = df.groupby("key", group_keys=group_keys).apply(lambda x: x.iloc[:2])
517
+ expected = df.take([0, 1, 3, 4, 6, 7])
518
+ if group_keys:
519
+ expected.index = MultiIndex.from_arrays(
520
+ [[1, 1, 2, 2, 3, 3], expected.index], names=["key", None]
521
+ )
522
+
523
+ tm.assert_frame_equal(result, expected)
524
+
525
+
526
+ def test_apply_no_name_column_conflict():
527
+ df = DataFrame(
528
+ {
529
+ "name": [1, 1, 1, 1, 1, 1, 2, 2, 2, 2],
530
+ "name2": [0, 0, 0, 1, 1, 1, 0, 0, 1, 1],
531
+ "value": range(9, -1, -1),
532
+ }
533
+ )
534
+
535
+ # it works! #2605
536
+ grouped = df.groupby(["name", "name2"])
537
+ msg = "DataFrameGroupBy.apply operated on the grouping columns"
538
+ with tm.assert_produces_warning(DeprecationWarning, match=msg):
539
+ grouped.apply(lambda x: x.sort_values("value", inplace=True))
540
+
541
+
542
+ def test_apply_typecast_fail():
543
+ df = DataFrame(
544
+ {
545
+ "d": [1.0, 1.0, 1.0, 2.0, 2.0, 2.0],
546
+ "c": np.tile(["a", "b", "c"], 2),
547
+ "v": np.arange(1.0, 7.0),
548
+ }
549
+ )
550
+
551
+ def f(group):
552
+ v = group["v"]
553
+ group["v2"] = (v - v.min()) / (v.max() - v.min())
554
+ return group
555
+
556
+ msg = "DataFrameGroupBy.apply operated on the grouping columns"
557
+ with tm.assert_produces_warning(DeprecationWarning, match=msg):
558
+ result = df.groupby("d", group_keys=False).apply(f)
559
+
560
+ expected = df.copy()
561
+ expected["v2"] = np.tile([0.0, 0.5, 1], 2)
562
+
563
+ tm.assert_frame_equal(result, expected)
564
+
565
+
566
+ def test_apply_multiindex_fail():
567
+ index = MultiIndex.from_arrays([[0, 0, 0, 1, 1, 1], [1, 2, 3, 1, 2, 3]])
568
+ df = DataFrame(
569
+ {
570
+ "d": [1.0, 1.0, 1.0, 2.0, 2.0, 2.0],
571
+ "c": np.tile(["a", "b", "c"], 2),
572
+ "v": np.arange(1.0, 7.0),
573
+ },
574
+ index=index,
575
+ )
576
+
577
+ def f(group):
578
+ v = group["v"]
579
+ group["v2"] = (v - v.min()) / (v.max() - v.min())
580
+ return group
581
+
582
+ msg = "DataFrameGroupBy.apply operated on the grouping columns"
583
+ with tm.assert_produces_warning(DeprecationWarning, match=msg):
584
+ result = df.groupby("d", group_keys=False).apply(f)
585
+
586
+ expected = df.copy()
587
+ expected["v2"] = np.tile([0.0, 0.5, 1], 2)
588
+
589
+ tm.assert_frame_equal(result, expected)
590
+
591
+
592
+ def test_apply_corner(tsframe):
593
+ result = tsframe.groupby(lambda x: x.year, group_keys=False).apply(lambda x: x * 2)
594
+ expected = tsframe * 2
595
+ tm.assert_frame_equal(result, expected)
596
+
597
+
598
+ def test_apply_without_copy():
599
+ # GH 5545
600
+ # returning a non-copy in an applied function fails
601
+
602
+ data = DataFrame(
603
+ {
604
+ "id_field": [100, 100, 200, 300],
605
+ "category": ["a", "b", "c", "c"],
606
+ "value": [1, 2, 3, 4],
607
+ }
608
+ )
609
+
610
+ def filt1(x):
611
+ if x.shape[0] == 1:
612
+ return x.copy()
613
+ else:
614
+ return x[x.category == "c"]
615
+
616
+ def filt2(x):
617
+ if x.shape[0] == 1:
618
+ return x
619
+ else:
620
+ return x[x.category == "c"]
621
+
622
+ msg = "DataFrameGroupBy.apply operated on the grouping columns"
623
+ with tm.assert_produces_warning(DeprecationWarning, match=msg):
624
+ expected = data.groupby("id_field").apply(filt1)
625
+ with tm.assert_produces_warning(DeprecationWarning, match=msg):
626
+ result = data.groupby("id_field").apply(filt2)
627
+ tm.assert_frame_equal(result, expected)
628
+
629
+
630
+ @pytest.mark.parametrize("test_series", [True, False])
631
+ def test_apply_with_duplicated_non_sorted_axis(test_series):
632
+ # GH 30667
633
+ df = DataFrame(
634
+ [["x", "p"], ["x", "p"], ["x", "o"]], columns=["X", "Y"], index=[1, 2, 2]
635
+ )
636
+ if test_series:
637
+ ser = df.set_index("Y")["X"]
638
+ result = ser.groupby(level=0, group_keys=False).apply(lambda x: x)
639
+
640
+ # not expecting the order to remain the same for duplicated axis
641
+ result = result.sort_index()
642
+ expected = ser.sort_index()
643
+ tm.assert_series_equal(result, expected)
644
+ else:
645
+ msg = "DataFrameGroupBy.apply operated on the grouping columns"
646
+ with tm.assert_produces_warning(DeprecationWarning, match=msg):
647
+ result = df.groupby("Y", group_keys=False).apply(lambda x: x)
648
+
649
+ # not expecting the order to remain the same for duplicated axis
650
+ result = result.sort_values("Y")
651
+ expected = df.sort_values("Y")
652
+ tm.assert_frame_equal(result, expected)
653
+
654
+
655
+ def test_apply_reindex_values():
656
+ # GH: 26209
657
+ # reindexing from a single column of a groupby object with duplicate indices caused
658
+ # a ValueError (cannot reindex from duplicate axis) in 0.24.2, the problem was
659
+ # solved in #30679
660
+ values = [1, 2, 3, 4]
661
+ indices = [1, 1, 2, 2]
662
+ df = DataFrame({"group": ["Group1", "Group2"] * 2, "value": values}, index=indices)
663
+ expected = Series(values, index=indices, name="value")
664
+
665
+ def reindex_helper(x):
666
+ return x.reindex(np.arange(x.index.min(), x.index.max() + 1))
667
+
668
+ # the following group by raised a ValueError
669
+ result = df.groupby("group", group_keys=False).value.apply(reindex_helper)
670
+ tm.assert_series_equal(expected, result)
671
+
672
+
673
+ def test_apply_corner_cases():
674
+ # #535, can't use sliding iterator
675
+
676
+ N = 1000
677
+ labels = np.random.default_rng(2).integers(0, 100, size=N)
678
+ df = DataFrame(
679
+ {
680
+ "key": labels,
681
+ "value1": np.random.default_rng(2).standard_normal(N),
682
+ "value2": ["foo", "bar", "baz", "qux"] * (N // 4),
683
+ }
684
+ )
685
+
686
+ grouped = df.groupby("key", group_keys=False)
687
+
688
+ def f(g):
689
+ g["value3"] = g["value1"] * 2
690
+ return g
691
+
692
+ msg = "DataFrameGroupBy.apply operated on the grouping columns"
693
+ with tm.assert_produces_warning(DeprecationWarning, match=msg):
694
+ result = grouped.apply(f)
695
+ assert "value3" in result
696
+
697
+
698
+ def test_apply_numeric_coercion_when_datetime():
699
+ # In the past, group-by/apply operations have been over-eager
700
+ # in converting dtypes to numeric, in the presence of datetime
701
+ # columns. Various GH issues were filed, the reproductions
702
+ # for which are here.
703
+
704
+ # GH 15670
705
+ df = DataFrame(
706
+ {"Number": [1, 2], "Date": ["2017-03-02"] * 2, "Str": ["foo", "inf"]}
707
+ )
708
+ msg = "DataFrameGroupBy.apply operated on the grouping columns"
709
+ with tm.assert_produces_warning(DeprecationWarning, match=msg):
710
+ expected = df.groupby(["Number"]).apply(lambda x: x.iloc[0])
711
+ df.Date = pd.to_datetime(df.Date)
712
+ msg = "DataFrameGroupBy.apply operated on the grouping columns"
713
+ with tm.assert_produces_warning(DeprecationWarning, match=msg):
714
+ result = df.groupby(["Number"]).apply(lambda x: x.iloc[0])
715
+ tm.assert_series_equal(result["Str"], expected["Str"])
716
+
717
+ # GH 15421
718
+ df = DataFrame(
719
+ {"A": [10, 20, 30], "B": ["foo", "3", "4"], "T": [pd.Timestamp("12:31:22")] * 3}
720
+ )
721
+
722
+ def get_B(g):
723
+ return g.iloc[0][["B"]]
724
+
725
+ msg = "DataFrameGroupBy.apply operated on the grouping columns"
726
+ with tm.assert_produces_warning(DeprecationWarning, match=msg):
727
+ result = df.groupby("A").apply(get_B)["B"]
728
+ expected = df.B
729
+ expected.index = df.A
730
+ tm.assert_series_equal(result, expected)
731
+
732
+ # GH 14423
733
+ def predictions(tool):
734
+ out = Series(index=["p1", "p2", "useTime"], dtype=object)
735
+ if "step1" in list(tool.State):
736
+ out["p1"] = str(tool[tool.State == "step1"].Machine.values[0])
737
+ if "step2" in list(tool.State):
738
+ out["p2"] = str(tool[tool.State == "step2"].Machine.values[0])
739
+ out["useTime"] = str(tool[tool.State == "step2"].oTime.values[0])
740
+ return out
741
+
742
+ df1 = DataFrame(
743
+ {
744
+ "Key": ["B", "B", "A", "A"],
745
+ "State": ["step1", "step2", "step1", "step2"],
746
+ "oTime": ["", "2016-09-19 05:24:33", "", "2016-09-19 23:59:04"],
747
+ "Machine": ["23", "36L", "36R", "36R"],
748
+ }
749
+ )
750
+ df2 = df1.copy()
751
+ df2.oTime = pd.to_datetime(df2.oTime)
752
+ msg = "DataFrameGroupBy.apply operated on the grouping columns"
753
+ with tm.assert_produces_warning(DeprecationWarning, match=msg):
754
+ expected = df1.groupby("Key").apply(predictions).p1
755
+ with tm.assert_produces_warning(DeprecationWarning, match=msg):
756
+ result = df2.groupby("Key").apply(predictions).p1
757
+ tm.assert_series_equal(expected, result)
758
+
759
+
760
+ def test_apply_aggregating_timedelta_and_datetime():
761
+ # Regression test for GH 15562
762
+ # The following groupby caused ValueErrors and IndexErrors pre 0.20.0
763
+
764
+ df = DataFrame(
765
+ {
766
+ "clientid": ["A", "B", "C"],
767
+ "datetime": [np.datetime64("2017-02-01 00:00:00")] * 3,
768
+ }
769
+ )
770
+ df["time_delta_zero"] = df.datetime - df.datetime
771
+ msg = "DataFrameGroupBy.apply operated on the grouping columns"
772
+ with tm.assert_produces_warning(DeprecationWarning, match=msg):
773
+ result = df.groupby("clientid").apply(
774
+ lambda ddf: Series(
775
+ {"clientid_age": ddf.time_delta_zero.min(), "date": ddf.datetime.min()}
776
+ )
777
+ )
778
+ expected = DataFrame(
779
+ {
780
+ "clientid": ["A", "B", "C"],
781
+ "clientid_age": [np.timedelta64(0, "D")] * 3,
782
+ "date": [np.datetime64("2017-02-01 00:00:00")] * 3,
783
+ }
784
+ ).set_index("clientid")
785
+
786
+ tm.assert_frame_equal(result, expected)
787
+
788
+
789
+ def test_apply_groupby_datetimeindex():
790
+ # GH 26182
791
+ # groupby apply failed on dataframe with DatetimeIndex
792
+
793
+ data = [["A", 10], ["B", 20], ["B", 30], ["C", 40], ["C", 50]]
794
+ df = DataFrame(
795
+ data, columns=["Name", "Value"], index=pd.date_range("2020-09-01", "2020-09-05")
796
+ )
797
+
798
+ result = df.groupby("Name").sum()
799
+
800
+ expected = DataFrame({"Name": ["A", "B", "C"], "Value": [10, 50, 90]})
801
+ expected.set_index("Name", inplace=True)
802
+
803
+ tm.assert_frame_equal(result, expected)
804
+
805
+
806
+ def test_time_field_bug():
807
+ # Test a fix for the following error related to GH issue 11324 When
808
+ # non-key fields in a group-by dataframe contained time-based fields
809
+ # that were not returned by the apply function, an exception would be
810
+ # raised.
811
+
812
+ df = DataFrame({"a": 1, "b": [datetime.now() for nn in range(10)]})
813
+
814
+ def func_with_no_date(batch):
815
+ return Series({"c": 2})
816
+
817
+ def func_with_date(batch):
818
+ return Series({"b": datetime(2015, 1, 1), "c": 2})
819
+
820
+ msg = "DataFrameGroupBy.apply operated on the grouping columns"
821
+ with tm.assert_produces_warning(DeprecationWarning, match=msg):
822
+ dfg_no_conversion = df.groupby(by=["a"]).apply(func_with_no_date)
823
+ dfg_no_conversion_expected = DataFrame({"c": 2}, index=[1])
824
+ dfg_no_conversion_expected.index.name = "a"
825
+
826
+ msg = "DataFrameGroupBy.apply operated on the grouping columns"
827
+ with tm.assert_produces_warning(DeprecationWarning, match=msg):
828
+ dfg_conversion = df.groupby(by=["a"]).apply(func_with_date)
829
+ dfg_conversion_expected = DataFrame(
830
+ {"b": pd.Timestamp(2015, 1, 1).as_unit("ns"), "c": 2}, index=[1]
831
+ )
832
+ dfg_conversion_expected.index.name = "a"
833
+
834
+ tm.assert_frame_equal(dfg_no_conversion, dfg_no_conversion_expected)
835
+ tm.assert_frame_equal(dfg_conversion, dfg_conversion_expected)
836
+
837
+
838
+ def test_gb_apply_list_of_unequal_len_arrays():
839
+ # GH1738
840
+ df = DataFrame(
841
+ {
842
+ "group1": ["a", "a", "a", "b", "b", "b", "a", "a", "a", "b", "b", "b"],
843
+ "group2": ["c", "c", "d", "d", "d", "e", "c", "c", "d", "d", "d", "e"],
844
+ "weight": [1.1, 2, 3, 4, 5, 6, 2, 4, 6, 8, 1, 2],
845
+ "value": [7.1, 8, 9, 10, 11, 12, 8, 7, 6, 5, 4, 3],
846
+ }
847
+ )
848
+ df = df.set_index(["group1", "group2"])
849
+ df_grouped = df.groupby(level=["group1", "group2"], sort=True)
850
+
851
+ def noddy(value, weight):
852
+ out = np.array(value * weight).repeat(3)
853
+ return out
854
+
855
+ # the kernel function returns arrays of unequal length
856
+ # pandas sniffs the first one, sees it's an array and not
857
+ # a list, and assumed the rest are of equal length
858
+ # and so tries a vstack
859
+
860
+ # don't die
861
+ df_grouped.apply(lambda x: noddy(x.value, x.weight))
862
+
863
+
864
+ def test_groupby_apply_all_none():
865
+ # Tests to make sure no errors if apply function returns all None
866
+ # values. Issue 9684.
867
+ test_df = DataFrame({"groups": [0, 0, 1, 1], "random_vars": [8, 7, 4, 5]})
868
+
869
+ def test_func(x):
870
+ pass
871
+
872
+ msg = "DataFrameGroupBy.apply operated on the grouping columns"
873
+ with tm.assert_produces_warning(DeprecationWarning, match=msg):
874
+ result = test_df.groupby("groups").apply(test_func)
875
+ expected = DataFrame()
876
+ tm.assert_frame_equal(result, expected)
877
+
878
+
879
+ def test_groupby_apply_none_first():
880
+ # GH 12824. Tests if apply returns None first.
881
+ test_df1 = DataFrame({"groups": [1, 1, 1, 2], "vars": [0, 1, 2, 3]})
882
+ test_df2 = DataFrame({"groups": [1, 2, 2, 2], "vars": [0, 1, 2, 3]})
883
+
884
+ def test_func(x):
885
+ if x.shape[0] < 2:
886
+ return None
887
+ return x.iloc[[0, -1]]
888
+
889
+ msg = "DataFrameGroupBy.apply operated on the grouping columns"
890
+ with tm.assert_produces_warning(DeprecationWarning, match=msg):
891
+ result1 = test_df1.groupby("groups").apply(test_func)
892
+ with tm.assert_produces_warning(DeprecationWarning, match=msg):
893
+ result2 = test_df2.groupby("groups").apply(test_func)
894
+ index1 = MultiIndex.from_arrays([[1, 1], [0, 2]], names=["groups", None])
895
+ index2 = MultiIndex.from_arrays([[2, 2], [1, 3]], names=["groups", None])
896
+ expected1 = DataFrame({"groups": [1, 1], "vars": [0, 2]}, index=index1)
897
+ expected2 = DataFrame({"groups": [2, 2], "vars": [1, 3]}, index=index2)
898
+ tm.assert_frame_equal(result1, expected1)
899
+ tm.assert_frame_equal(result2, expected2)
900
+
901
+
902
+ def test_groupby_apply_return_empty_chunk():
903
+ # GH 22221: apply filter which returns some empty groups
904
+ df = DataFrame({"value": [0, 1], "group": ["filled", "empty"]})
905
+ groups = df.groupby("group")
906
+ msg = "DataFrameGroupBy.apply operated on the grouping columns"
907
+ with tm.assert_produces_warning(DeprecationWarning, match=msg):
908
+ result = groups.apply(lambda group: group[group.value != 1]["value"])
909
+ expected = Series(
910
+ [0],
911
+ name="value",
912
+ index=MultiIndex.from_product(
913
+ [["empty", "filled"], [0]], names=["group", None]
914
+ ).drop("empty"),
915
+ )
916
+ tm.assert_series_equal(result, expected)
917
+
918
+
919
+ def test_apply_with_mixed_types():
920
+ # gh-20949
921
+ df = DataFrame({"A": "a a b".split(), "B": [1, 2, 3], "C": [4, 6, 5]})
922
+ g = df.groupby("A", group_keys=False)
923
+
924
+ result = g.transform(lambda x: x / x.sum())
925
+ expected = DataFrame({"B": [1 / 3.0, 2 / 3.0, 1], "C": [0.4, 0.6, 1.0]})
926
+ tm.assert_frame_equal(result, expected)
927
+
928
+ result = g.apply(lambda x: x / x.sum())
929
+ tm.assert_frame_equal(result, expected)
930
+
931
+
932
+ def test_func_returns_object():
933
+ # GH 28652
934
+ df = DataFrame({"a": [1, 2]}, index=Index([1, 2]))
935
+ msg = "DataFrameGroupBy.apply operated on the grouping columns"
936
+ with tm.assert_produces_warning(DeprecationWarning, match=msg):
937
+ result = df.groupby("a").apply(lambda g: g.index)
938
+ expected = Series([Index([1]), Index([2])], index=Index([1, 2], name="a"))
939
+
940
+ tm.assert_series_equal(result, expected)
941
+
942
+
943
+ @pytest.mark.parametrize(
944
+ "group_column_dtlike",
945
+ [datetime.today(), datetime.today().date(), datetime.today().time()],
946
+ )
947
+ def test_apply_datetime_issue(group_column_dtlike, using_infer_string):
948
+ # GH-28247
949
+ # groupby-apply throws an error if one of the columns in the DataFrame
950
+ # is a datetime object and the column labels are different from
951
+ # standard int values in range(len(num_columns))
952
+
953
+ df = DataFrame({"a": ["foo"], "b": [group_column_dtlike]})
954
+ msg = "DataFrameGroupBy.apply operated on the grouping columns"
955
+ with tm.assert_produces_warning(DeprecationWarning, match=msg):
956
+ result = df.groupby("a").apply(lambda x: Series(["spam"], index=[42]))
957
+
958
+ dtype = "string" if using_infer_string else "object"
959
+ expected = DataFrame(["spam"], Index(["foo"], dtype=dtype, name="a"), columns=[42])
960
+ tm.assert_frame_equal(result, expected)
961
+
962
+
963
+ def test_apply_series_return_dataframe_groups():
964
+ # GH 10078
965
+ tdf = DataFrame(
966
+ {
967
+ "day": {
968
+ 0: pd.Timestamp("2015-02-24 00:00:00"),
969
+ 1: pd.Timestamp("2015-02-24 00:00:00"),
970
+ 2: pd.Timestamp("2015-02-24 00:00:00"),
971
+ 3: pd.Timestamp("2015-02-24 00:00:00"),
972
+ 4: pd.Timestamp("2015-02-24 00:00:00"),
973
+ },
974
+ "userAgent": {
975
+ 0: "some UA string",
976
+ 1: "some UA string",
977
+ 2: "some UA string",
978
+ 3: "another UA string",
979
+ 4: "some UA string",
980
+ },
981
+ "userId": {
982
+ 0: "17661101",
983
+ 1: "17661101",
984
+ 2: "17661101",
985
+ 3: "17661101",
986
+ 4: "17661101",
987
+ },
988
+ }
989
+ )
990
+
991
+ def most_common_values(df):
992
+ return Series({c: s.value_counts().index[0] for c, s in df.items()})
993
+
994
+ msg = "DataFrameGroupBy.apply operated on the grouping columns"
995
+ with tm.assert_produces_warning(DeprecationWarning, match=msg):
996
+ result = tdf.groupby("day").apply(most_common_values)["userId"]
997
+ expected = Series(
998
+ ["17661101"], index=pd.DatetimeIndex(["2015-02-24"], name="day"), name="userId"
999
+ )
1000
+ tm.assert_series_equal(result, expected)
1001
+
1002
+
1003
+ @pytest.mark.parametrize("category", [False, True])
1004
+ def test_apply_multi_level_name(category):
1005
+ # https://github.com/pandas-dev/pandas/issues/31068
1006
+ b = [1, 2] * 5
1007
+ if category:
1008
+ b = pd.Categorical(b, categories=[1, 2, 3])
1009
+ expected_index = pd.CategoricalIndex([1, 2, 3], categories=[1, 2, 3], name="B")
1010
+ expected_values = [20, 25, 0]
1011
+ else:
1012
+ expected_index = Index([1, 2], name="B")
1013
+ expected_values = [20, 25]
1014
+ expected = DataFrame(
1015
+ {"C": expected_values, "D": expected_values}, index=expected_index
1016
+ )
1017
+
1018
+ df = DataFrame(
1019
+ {"A": np.arange(10), "B": b, "C": list(range(10)), "D": list(range(10))}
1020
+ ).set_index(["A", "B"])
1021
+ result = df.groupby("B", observed=False).apply(lambda x: x.sum())
1022
+ tm.assert_frame_equal(result, expected)
1023
+ assert df.index.names == ["A", "B"]
1024
+
1025
+
1026
+ def test_groupby_apply_datetime_result_dtypes(using_infer_string):
1027
+ # GH 14849
1028
+ data = DataFrame.from_records(
1029
+ [
1030
+ (pd.Timestamp(2016, 1, 1), "red", "dark", 1, "8"),
1031
+ (pd.Timestamp(2015, 1, 1), "green", "stormy", 2, "9"),
1032
+ (pd.Timestamp(2014, 1, 1), "blue", "bright", 3, "10"),
1033
+ (pd.Timestamp(2013, 1, 1), "blue", "calm", 4, "potato"),
1034
+ ],
1035
+ columns=["observation", "color", "mood", "intensity", "score"],
1036
+ )
1037
+ msg = "DataFrameGroupBy.apply operated on the grouping columns"
1038
+ with tm.assert_produces_warning(DeprecationWarning, match=msg):
1039
+ result = data.groupby("color").apply(lambda g: g.iloc[0]).dtypes
1040
+ dtype = "string" if using_infer_string else object
1041
+ expected = Series(
1042
+ [np.dtype("datetime64[ns]"), dtype, dtype, np.int64, dtype],
1043
+ index=["observation", "color", "mood", "intensity", "score"],
1044
+ )
1045
+ tm.assert_series_equal(result, expected)
1046
+
1047
+
1048
+ @pytest.mark.parametrize(
1049
+ "index",
1050
+ [
1051
+ pd.CategoricalIndex(list("abc")),
1052
+ pd.interval_range(0, 3),
1053
+ pd.period_range("2020", periods=3, freq="D"),
1054
+ MultiIndex.from_tuples([("a", 0), ("a", 1), ("b", 0)]),
1055
+ ],
1056
+ )
1057
+ def test_apply_index_has_complex_internals(index):
1058
+ # GH 31248
1059
+ df = DataFrame({"group": [1, 1, 2], "value": [0, 1, 0]}, index=index)
1060
+ msg = "DataFrameGroupBy.apply operated on the grouping columns"
1061
+ with tm.assert_produces_warning(DeprecationWarning, match=msg):
1062
+ result = df.groupby("group", group_keys=False).apply(lambda x: x)
1063
+ tm.assert_frame_equal(result, df)
1064
+
1065
+
1066
+ @pytest.mark.parametrize(
1067
+ "function, expected_values",
1068
+ [
1069
+ (lambda x: x.index.to_list(), [[0, 1], [2, 3]]),
1070
+ (lambda x: set(x.index.to_list()), [{0, 1}, {2, 3}]),
1071
+ (lambda x: tuple(x.index.to_list()), [(0, 1), (2, 3)]),
1072
+ (
1073
+ lambda x: dict(enumerate(x.index.to_list())),
1074
+ [{0: 0, 1: 1}, {0: 2, 1: 3}],
1075
+ ),
1076
+ (
1077
+ lambda x: [{n: i} for (n, i) in enumerate(x.index.to_list())],
1078
+ [[{0: 0}, {1: 1}], [{0: 2}, {1: 3}]],
1079
+ ),
1080
+ ],
1081
+ )
1082
+ def test_apply_function_returns_non_pandas_non_scalar(function, expected_values):
1083
+ # GH 31441
1084
+ df = DataFrame(["A", "A", "B", "B"], columns=["groups"])
1085
+ msg = "DataFrameGroupBy.apply operated on the grouping columns"
1086
+ with tm.assert_produces_warning(DeprecationWarning, match=msg):
1087
+ result = df.groupby("groups").apply(function)
1088
+ expected = Series(expected_values, index=Index(["A", "B"], name="groups"))
1089
+ tm.assert_series_equal(result, expected)
1090
+
1091
+
1092
+ def test_apply_function_returns_numpy_array():
1093
+ # GH 31605
1094
+ def fct(group):
1095
+ return group["B"].values.flatten()
1096
+
1097
+ df = DataFrame({"A": ["a", "a", "b", "none"], "B": [1, 2, 3, np.nan]})
1098
+
1099
+ msg = "DataFrameGroupBy.apply operated on the grouping columns"
1100
+ with tm.assert_produces_warning(DeprecationWarning, match=msg):
1101
+ result = df.groupby("A").apply(fct)
1102
+ expected = Series(
1103
+ [[1.0, 2.0], [3.0], [np.nan]], index=Index(["a", "b", "none"], name="A")
1104
+ )
1105
+ tm.assert_series_equal(result, expected)
1106
+
1107
+
1108
+ @pytest.mark.parametrize("function", [lambda gr: gr.index, lambda gr: gr.index + 1 - 1])
1109
+ def test_apply_function_index_return(function):
1110
+ # GH: 22541
1111
+ df = DataFrame([1, 2, 2, 2, 1, 2, 3, 1, 3, 1], columns=["id"])
1112
+ msg = "DataFrameGroupBy.apply operated on the grouping columns"
1113
+ with tm.assert_produces_warning(DeprecationWarning, match=msg):
1114
+ result = df.groupby("id").apply(function)
1115
+ expected = Series(
1116
+ [Index([0, 4, 7, 9]), Index([1, 2, 3, 5]), Index([6, 8])],
1117
+ index=Index([1, 2, 3], name="id"),
1118
+ )
1119
+ tm.assert_series_equal(result, expected)
1120
+
1121
+
1122
+ def test_apply_function_with_indexing_return_column():
1123
+ # GH#7002, GH#41480, GH#49256
1124
+ df = DataFrame(
1125
+ {
1126
+ "foo1": ["one", "two", "two", "three", "one", "two"],
1127
+ "foo2": [1, 2, 4, 4, 5, 6],
1128
+ }
1129
+ )
1130
+ result = df.groupby("foo1", as_index=False).apply(lambda x: x.mean())
1131
+ expected = DataFrame(
1132
+ {
1133
+ "foo1": ["one", "three", "two"],
1134
+ "foo2": [3.0, 4.0, 4.0],
1135
+ }
1136
+ )
1137
+ tm.assert_frame_equal(result, expected)
1138
+
1139
+
1140
+ @pytest.mark.parametrize(
1141
+ "udf",
1142
+ [(lambda x: x.copy()), (lambda x: x.copy().rename(lambda y: y + 1))],
1143
+ )
1144
+ @pytest.mark.parametrize("group_keys", [True, False])
1145
+ def test_apply_result_type(group_keys, udf):
1146
+ # https://github.com/pandas-dev/pandas/issues/34809
1147
+ # We'd like to control whether the group keys end up in the index
1148
+ # regardless of whether the UDF happens to be a transform.
1149
+ df = DataFrame({"A": ["a", "b"], "B": [1, 2]})
1150
+ msg = "DataFrameGroupBy.apply operated on the grouping columns"
1151
+ with tm.assert_produces_warning(DeprecationWarning, match=msg):
1152
+ df_result = df.groupby("A", group_keys=group_keys).apply(udf)
1153
+ series_result = df.B.groupby(df.A, group_keys=group_keys).apply(udf)
1154
+
1155
+ if group_keys:
1156
+ assert df_result.index.nlevels == 2
1157
+ assert series_result.index.nlevels == 2
1158
+ else:
1159
+ assert df_result.index.nlevels == 1
1160
+ assert series_result.index.nlevels == 1
1161
+
1162
+
1163
+ def test_result_order_group_keys_false():
1164
+ # GH 34998
1165
+ # apply result order should not depend on whether index is the same or just equal
1166
+ df = DataFrame({"A": [2, 1, 2], "B": [1, 2, 3]})
1167
+ msg = "DataFrameGroupBy.apply operated on the grouping columns"
1168
+ with tm.assert_produces_warning(DeprecationWarning, match=msg):
1169
+ result = df.groupby("A", group_keys=False).apply(lambda x: x)
1170
+ with tm.assert_produces_warning(DeprecationWarning, match=msg):
1171
+ expected = df.groupby("A", group_keys=False).apply(lambda x: x.copy())
1172
+ tm.assert_frame_equal(result, expected)
1173
+
1174
+
1175
+ def test_apply_with_timezones_aware():
1176
+ # GH: 27212
1177
+ dates = ["2001-01-01"] * 2 + ["2001-01-02"] * 2 + ["2001-01-03"] * 2
1178
+ index_no_tz = pd.DatetimeIndex(dates)
1179
+ index_tz = pd.DatetimeIndex(dates, tz="UTC")
1180
+ df1 = DataFrame({"x": list(range(2)) * 3, "y": range(6), "t": index_no_tz})
1181
+ df2 = DataFrame({"x": list(range(2)) * 3, "y": range(6), "t": index_tz})
1182
+
1183
+ msg = "DataFrameGroupBy.apply operated on the grouping columns"
1184
+ with tm.assert_produces_warning(DeprecationWarning, match=msg):
1185
+ result1 = df1.groupby("x", group_keys=False).apply(
1186
+ lambda df: df[["x", "y"]].copy()
1187
+ )
1188
+ with tm.assert_produces_warning(DeprecationWarning, match=msg):
1189
+ result2 = df2.groupby("x", group_keys=False).apply(
1190
+ lambda df: df[["x", "y"]].copy()
1191
+ )
1192
+
1193
+ tm.assert_frame_equal(result1, result2)
1194
+
1195
+
1196
+ def test_apply_is_unchanged_when_other_methods_are_called_first(reduction_func):
1197
+ # GH #34656
1198
+ # GH #34271
1199
+ df = DataFrame(
1200
+ {
1201
+ "a": [99, 99, 99, 88, 88, 88],
1202
+ "b": [1, 2, 3, 4, 5, 6],
1203
+ "c": [10, 20, 30, 40, 50, 60],
1204
+ }
1205
+ )
1206
+
1207
+ expected = DataFrame(
1208
+ {"b": [15, 6], "c": [150, 60]},
1209
+ index=Index([88, 99], name="a"),
1210
+ )
1211
+
1212
+ # Check output when no other methods are called before .apply()
1213
+ grp = df.groupby(by="a")
1214
+ msg = "The behavior of DataFrame.sum with axis=None is deprecated"
1215
+ with tm.assert_produces_warning(FutureWarning, match=msg, check_stacklevel=False):
1216
+ result = grp.apply(sum, include_groups=False)
1217
+ tm.assert_frame_equal(result, expected)
1218
+
1219
+ # Check output when another method is called before .apply()
1220
+ grp = df.groupby(by="a")
1221
+ args = get_groupby_method_args(reduction_func, df)
1222
+ _ = getattr(grp, reduction_func)(*args)
1223
+ with tm.assert_produces_warning(FutureWarning, match=msg, check_stacklevel=False):
1224
+ result = grp.apply(sum, include_groups=False)
1225
+ tm.assert_frame_equal(result, expected)
1226
+
1227
+
1228
+ def test_apply_with_date_in_multiindex_does_not_convert_to_timestamp():
1229
+ # GH 29617
1230
+
1231
+ df = DataFrame(
1232
+ {
1233
+ "A": ["a", "a", "a", "b"],
1234
+ "B": [
1235
+ date(2020, 1, 10),
1236
+ date(2020, 1, 10),
1237
+ date(2020, 2, 10),
1238
+ date(2020, 2, 10),
1239
+ ],
1240
+ "C": [1, 2, 3, 4],
1241
+ },
1242
+ index=Index([100, 101, 102, 103], name="idx"),
1243
+ )
1244
+
1245
+ grp = df.groupby(["A", "B"])
1246
+ msg = "DataFrameGroupBy.apply operated on the grouping columns"
1247
+ with tm.assert_produces_warning(DeprecationWarning, match=msg):
1248
+ result = grp.apply(lambda x: x.head(1))
1249
+
1250
+ expected = df.iloc[[0, 2, 3]]
1251
+ expected = expected.reset_index()
1252
+ expected.index = MultiIndex.from_frame(expected[["A", "B", "idx"]])
1253
+ expected = expected.drop(columns="idx")
1254
+
1255
+ tm.assert_frame_equal(result, expected)
1256
+ for val in result.index.levels[1]:
1257
+ assert type(val) is date
1258
+
1259
+
1260
+ def test_apply_by_cols_equals_apply_by_rows_transposed():
1261
+ # GH 16646
1262
+ # Operating on the columns, or transposing and operating on the rows
1263
+ # should give the same result. There was previously a bug where the
1264
+ # by_rows operation would work fine, but by_cols would throw a ValueError
1265
+
1266
+ df = DataFrame(
1267
+ np.random.default_rng(2).random([6, 4]),
1268
+ columns=MultiIndex.from_product([["A", "B"], [1, 2]]),
1269
+ )
1270
+
1271
+ msg = "The 'axis' keyword in DataFrame.groupby is deprecated"
1272
+ with tm.assert_produces_warning(FutureWarning, match=msg):
1273
+ gb = df.T.groupby(axis=0, level=0)
1274
+ by_rows = gb.apply(lambda x: x.droplevel(axis=0, level=0))
1275
+
1276
+ msg = "DataFrame.groupby with axis=1 is deprecated"
1277
+ with tm.assert_produces_warning(FutureWarning, match=msg):
1278
+ gb2 = df.groupby(axis=1, level=0)
1279
+ by_cols = gb2.apply(lambda x: x.droplevel(axis=1, level=0))
1280
+
1281
+ tm.assert_frame_equal(by_cols, by_rows.T)
1282
+ tm.assert_frame_equal(by_cols, df)
1283
+
1284
+
1285
+ @pytest.mark.parametrize("dropna", [True, False])
1286
+ def test_apply_dropna_with_indexed_same(dropna):
1287
+ # GH 38227
1288
+ # GH#43205
1289
+ df = DataFrame(
1290
+ {
1291
+ "col": [1, 2, 3, 4, 5],
1292
+ "group": ["a", np.nan, np.nan, "b", "b"],
1293
+ },
1294
+ index=list("xxyxz"),
1295
+ )
1296
+ msg = "DataFrameGroupBy.apply operated on the grouping columns"
1297
+ with tm.assert_produces_warning(DeprecationWarning, match=msg):
1298
+ result = df.groupby("group", dropna=dropna, group_keys=False).apply(lambda x: x)
1299
+ expected = df.dropna() if dropna else df.iloc[[0, 3, 1, 2, 4]]
1300
+ tm.assert_frame_equal(result, expected)
1301
+
1302
+
1303
+ @pytest.mark.parametrize(
1304
+ "as_index, expected",
1305
+ [
1306
+ [
1307
+ False,
1308
+ DataFrame(
1309
+ [[1, 1, 1], [2, 2, 1]], columns=Index(["a", "b", None], dtype=object)
1310
+ ),
1311
+ ],
1312
+ [
1313
+ True,
1314
+ Series(
1315
+ [1, 1], index=MultiIndex.from_tuples([(1, 1), (2, 2)], names=["a", "b"])
1316
+ ),
1317
+ ],
1318
+ ],
1319
+ )
1320
+ def test_apply_as_index_constant_lambda(as_index, expected):
1321
+ # GH 13217
1322
+ df = DataFrame({"a": [1, 1, 2, 2], "b": [1, 1, 2, 2], "c": [1, 1, 1, 1]})
1323
+ msg = "DataFrameGroupBy.apply operated on the grouping columns"
1324
+ with tm.assert_produces_warning(DeprecationWarning, match=msg):
1325
+ result = df.groupby(["a", "b"], as_index=as_index).apply(lambda x: 1)
1326
+ tm.assert_equal(result, expected)
1327
+
1328
+
1329
+ def test_sort_index_groups():
1330
+ # GH 20420
1331
+ df = DataFrame(
1332
+ {"A": [1, 2, 3, 4, 5], "B": [6, 7, 8, 9, 0], "C": [1, 1, 1, 2, 2]},
1333
+ index=range(5),
1334
+ )
1335
+ msg = "DataFrameGroupBy.apply operated on the grouping columns"
1336
+ with tm.assert_produces_warning(DeprecationWarning, match=msg):
1337
+ result = df.groupby("C").apply(lambda x: x.A.sort_index())
1338
+ expected = Series(
1339
+ range(1, 6),
1340
+ index=MultiIndex.from_tuples(
1341
+ [(1, 0), (1, 1), (1, 2), (2, 3), (2, 4)], names=["C", None]
1342
+ ),
1343
+ name="A",
1344
+ )
1345
+ tm.assert_series_equal(result, expected)
1346
+
1347
+
1348
+ def test_positional_slice_groups_datetimelike():
1349
+ # GH 21651
1350
+ expected = DataFrame(
1351
+ {
1352
+ "date": pd.date_range("2010-01-01", freq="12h", periods=5),
1353
+ "vals": range(5),
1354
+ "let": list("abcde"),
1355
+ }
1356
+ )
1357
+ msg = "DataFrameGroupBy.apply operated on the grouping columns"
1358
+ with tm.assert_produces_warning(DeprecationWarning, match=msg):
1359
+ result = expected.groupby(
1360
+ [expected.let, expected.date.dt.date], group_keys=False
1361
+ ).apply(lambda x: x.iloc[0:])
1362
+ tm.assert_frame_equal(result, expected)
1363
+
1364
+
1365
+ def test_groupby_apply_shape_cache_safety():
1366
+ # GH#42702 this fails if we cache_readonly Block.shape
1367
+ df = DataFrame({"A": ["a", "a", "b"], "B": [1, 2, 3], "C": [4, 6, 5]})
1368
+ gb = df.groupby("A")
1369
+ result = gb[["B", "C"]].apply(lambda x: x.astype(float).max() - x.min())
1370
+
1371
+ expected = DataFrame(
1372
+ {"B": [1.0, 0.0], "C": [2.0, 0.0]}, index=Index(["a", "b"], name="A")
1373
+ )
1374
+ tm.assert_frame_equal(result, expected)
1375
+
1376
+
1377
+ def test_groupby_apply_to_series_name():
1378
+ # GH52444
1379
+ df = DataFrame.from_dict(
1380
+ {
1381
+ "a": ["a", "b", "a", "b"],
1382
+ "b1": ["aa", "ac", "ac", "ad"],
1383
+ "b2": ["aa", "aa", "aa", "ac"],
1384
+ }
1385
+ )
1386
+ grp = df.groupby("a")[["b1", "b2"]]
1387
+ result = grp.apply(lambda x: x.unstack().value_counts())
1388
+
1389
+ expected_idx = MultiIndex.from_arrays(
1390
+ arrays=[["a", "a", "b", "b", "b"], ["aa", "ac", "ac", "ad", "aa"]],
1391
+ names=["a", None],
1392
+ )
1393
+ expected = Series([3, 1, 2, 1, 1], index=expected_idx, name="count")
1394
+ tm.assert_series_equal(result, expected)
1395
+
1396
+
1397
+ @pytest.mark.parametrize("dropna", [True, False])
1398
+ def test_apply_na(dropna):
1399
+ # GH#28984
1400
+ df = DataFrame(
1401
+ {"grp": [1, 1, 2, 2], "y": [1, 0, 2, 5], "z": [1, 2, np.nan, np.nan]}
1402
+ )
1403
+ dfgrp = df.groupby("grp", dropna=dropna)
1404
+ msg = "DataFrameGroupBy.apply operated on the grouping columns"
1405
+ with tm.assert_produces_warning(DeprecationWarning, match=msg):
1406
+ result = dfgrp.apply(lambda grp_df: grp_df.nlargest(1, "z"))
1407
+ with tm.assert_produces_warning(DeprecationWarning, match=msg):
1408
+ expected = dfgrp.apply(lambda x: x.sort_values("z", ascending=False).head(1))
1409
+ tm.assert_frame_equal(result, expected)
1410
+
1411
+
1412
+ def test_apply_empty_string_nan_coerce_bug():
1413
+ # GH#24903
1414
+ msg = "DataFrameGroupBy.apply operated on the grouping columns"
1415
+ with tm.assert_produces_warning(DeprecationWarning, match=msg):
1416
+ result = (
1417
+ DataFrame(
1418
+ {
1419
+ "a": [1, 1, 2, 2],
1420
+ "b": ["", "", "", ""],
1421
+ "c": pd.to_datetime([1, 2, 3, 4], unit="s"),
1422
+ }
1423
+ )
1424
+ .groupby(["a", "b"])
1425
+ .apply(lambda df: df.iloc[-1])
1426
+ )
1427
+ expected = DataFrame(
1428
+ [[1, "", pd.to_datetime(2, unit="s")], [2, "", pd.to_datetime(4, unit="s")]],
1429
+ columns=["a", "b", "c"],
1430
+ index=MultiIndex.from_tuples([(1, ""), (2, "")], names=["a", "b"]),
1431
+ )
1432
+ tm.assert_frame_equal(result, expected)
1433
+
1434
+
1435
+ @pytest.mark.parametrize("index_values", [[1, 2, 3], [1.0, 2.0, 3.0]])
1436
+ def test_apply_index_key_error_bug(index_values):
1437
+ # GH 44310
1438
+ result = DataFrame(
1439
+ {
1440
+ "a": ["aa", "a2", "a3"],
1441
+ "b": [1, 2, 3],
1442
+ },
1443
+ index=Index(index_values),
1444
+ )
1445
+ expected = DataFrame(
1446
+ {
1447
+ "b_mean": [2.0, 3.0, 1.0],
1448
+ },
1449
+ index=Index(["a2", "a3", "aa"], name="a"),
1450
+ )
1451
+ msg = "DataFrameGroupBy.apply operated on the grouping columns"
1452
+ with tm.assert_produces_warning(DeprecationWarning, match=msg):
1453
+ result = result.groupby("a").apply(
1454
+ lambda df: Series([df["b"].mean()], index=["b_mean"])
1455
+ )
1456
+ tm.assert_frame_equal(result, expected)
1457
+
1458
+
1459
+ @pytest.mark.parametrize(
1460
+ "arg,idx",
1461
+ [
1462
+ [
1463
+ [
1464
+ 1,
1465
+ 2,
1466
+ 3,
1467
+ ],
1468
+ [
1469
+ 0.1,
1470
+ 0.3,
1471
+ 0.2,
1472
+ ],
1473
+ ],
1474
+ [
1475
+ [
1476
+ 1,
1477
+ 2,
1478
+ 3,
1479
+ ],
1480
+ [
1481
+ 0.1,
1482
+ 0.2,
1483
+ 0.3,
1484
+ ],
1485
+ ],
1486
+ [
1487
+ [
1488
+ 1,
1489
+ 4,
1490
+ 3,
1491
+ ],
1492
+ [
1493
+ 0.1,
1494
+ 0.4,
1495
+ 0.2,
1496
+ ],
1497
+ ],
1498
+ ],
1499
+ )
1500
+ def test_apply_nonmonotonic_float_index(arg, idx):
1501
+ # GH 34455
1502
+ expected = DataFrame({"col": arg}, index=idx)
1503
+ msg = "DataFrameGroupBy.apply operated on the grouping columns"
1504
+ with tm.assert_produces_warning(DeprecationWarning, match=msg):
1505
+ result = expected.groupby("col", group_keys=False).apply(lambda x: x)
1506
+ tm.assert_frame_equal(result, expected)
1507
+
1508
+
1509
+ @pytest.mark.parametrize("args, kwargs", [([True], {}), ([], {"numeric_only": True})])
1510
+ def test_apply_str_with_args(df, args, kwargs):
1511
+ # GH#46479
1512
+ gb = df.groupby("A")
1513
+ result = gb.apply("sum", *args, **kwargs)
1514
+ expected = gb.sum(numeric_only=True)
1515
+ tm.assert_frame_equal(result, expected)
1516
+
1517
+
1518
+ @pytest.mark.parametrize("name", ["some_name", None])
1519
+ def test_result_name_when_one_group(name):
1520
+ # GH 46369
1521
+ ser = Series([1, 2], name=name)
1522
+ result = ser.groupby(["a", "a"], group_keys=False).apply(lambda x: x)
1523
+ expected = Series([1, 2], name=name)
1524
+
1525
+ tm.assert_series_equal(result, expected)
1526
+
1527
+
1528
+ @pytest.mark.parametrize(
1529
+ "method, op",
1530
+ [
1531
+ ("apply", lambda gb: gb.values[-1]),
1532
+ ("apply", lambda gb: gb["b"].iloc[0]),
1533
+ ("agg", "skew"),
1534
+ ("agg", "prod"),
1535
+ ("agg", "sum"),
1536
+ ],
1537
+ )
1538
+ def test_empty_df(method, op):
1539
+ # GH 47985
1540
+ empty_df = DataFrame({"a": [], "b": []})
1541
+ gb = empty_df.groupby("a", group_keys=True)
1542
+ group = getattr(gb, "b")
1543
+
1544
+ result = getattr(group, method)(op)
1545
+ expected = Series(
1546
+ [], name="b", dtype="float64", index=Index([], dtype="float64", name="a")
1547
+ )
1548
+
1549
+ tm.assert_series_equal(result, expected)
1550
+
1551
+
1552
+ @pytest.mark.parametrize("include_groups", [True, False])
1553
+ def test_include_groups(include_groups):
1554
+ # GH#7155
1555
+ df = DataFrame({"a": [1, 1, 2], "b": [3, 4, 5]})
1556
+ gb = df.groupby("a")
1557
+ warn = DeprecationWarning if include_groups else None
1558
+ msg = "DataFrameGroupBy.apply operated on the grouping columns"
1559
+ with tm.assert_produces_warning(warn, match=msg):
1560
+ result = gb.apply(lambda x: x.sum(), include_groups=include_groups)
1561
+ expected = DataFrame({"a": [2, 2], "b": [7, 5]}, index=Index([1, 2], name="a"))
1562
+ if not include_groups:
1563
+ expected = expected[["b"]]
1564
+ tm.assert_frame_equal(result, expected)
1565
+
1566
+
1567
+ @pytest.mark.parametrize("f", [max, min, sum])
1568
+ @pytest.mark.parametrize("keys", ["jim", ["jim", "joe"]]) # Single key # Multi-key
1569
+ def test_builtins_apply(keys, f):
1570
+ # see gh-8155
1571
+ rs = np.random.default_rng(2)
1572
+ df = DataFrame(rs.integers(1, 7, (10, 2)), columns=["jim", "joe"])
1573
+ df["jolie"] = rs.standard_normal(10)
1574
+
1575
+ gb = df.groupby(keys)
1576
+
1577
+ fname = f.__name__
1578
+
1579
+ warn = None if f is not sum else FutureWarning
1580
+ msg = "The behavior of DataFrame.sum with axis=None is deprecated"
1581
+ with tm.assert_produces_warning(
1582
+ warn, match=msg, check_stacklevel=False, raise_on_extra_warnings=False
1583
+ ):
1584
+ # Also warns on deprecation GH#53425
1585
+ result = gb.apply(f)
1586
+ ngroups = len(df.drop_duplicates(subset=keys))
1587
+
1588
+ assert_msg = f"invalid frame shape: {result.shape} (expected ({ngroups}, 3))"
1589
+ assert result.shape == (ngroups, 3), assert_msg
1590
+
1591
+ npfunc = lambda x: getattr(np, fname)(x, axis=0) # numpy's equivalent function
1592
+ msg = "DataFrameGroupBy.apply operated on the grouping columns"
1593
+ with tm.assert_produces_warning(DeprecationWarning, match=msg):
1594
+ expected = gb.apply(npfunc)
1595
+ tm.assert_frame_equal(result, expected)
1596
+
1597
+ with tm.assert_produces_warning(DeprecationWarning, match=msg):
1598
+ expected2 = gb.apply(lambda x: npfunc(x))
1599
+ tm.assert_frame_equal(result, expected2)
1600
+
1601
+ if f != sum:
1602
+ expected = gb.agg(fname).reset_index()
1603
+ expected.set_index(keys, inplace=True, drop=False)
1604
+ tm.assert_frame_equal(result, expected, check_dtype=False)
1605
+
1606
+ tm.assert_series_equal(getattr(result, fname)(axis=0), getattr(df, fname)(axis=0))
venv/lib/python3.10/site-packages/pandas/tests/groupby/test_apply_mutate.py ADDED
@@ -0,0 +1,163 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import numpy as np
2
+
3
+ import pandas as pd
4
+ import pandas._testing as tm
5
+
6
+
7
+ def test_group_by_copy():
8
+ # GH#44803
9
+ df = pd.DataFrame(
10
+ {
11
+ "name": ["Alice", "Bob", "Carl"],
12
+ "age": [20, 21, 20],
13
+ }
14
+ ).set_index("name")
15
+
16
+ msg = "DataFrameGroupBy.apply operated on the grouping columns"
17
+ with tm.assert_produces_warning(DeprecationWarning, match=msg):
18
+ grp_by_same_value = df.groupby(["age"], group_keys=False).apply(
19
+ lambda group: group
20
+ )
21
+ msg = "DataFrameGroupBy.apply operated on the grouping columns"
22
+ with tm.assert_produces_warning(DeprecationWarning, match=msg):
23
+ grp_by_copy = df.groupby(["age"], group_keys=False).apply(
24
+ lambda group: group.copy()
25
+ )
26
+ tm.assert_frame_equal(grp_by_same_value, grp_by_copy)
27
+
28
+
29
+ def test_mutate_groups():
30
+ # GH3380
31
+
32
+ df = pd.DataFrame(
33
+ {
34
+ "cat1": ["a"] * 8 + ["b"] * 6,
35
+ "cat2": ["c"] * 2
36
+ + ["d"] * 2
37
+ + ["e"] * 2
38
+ + ["f"] * 2
39
+ + ["c"] * 2
40
+ + ["d"] * 2
41
+ + ["e"] * 2,
42
+ "cat3": [f"g{x}" for x in range(1, 15)],
43
+ "val": np.random.default_rng(2).integers(100, size=14),
44
+ }
45
+ )
46
+
47
+ def f_copy(x):
48
+ x = x.copy()
49
+ x["rank"] = x.val.rank(method="min")
50
+ return x.groupby("cat2")["rank"].min()
51
+
52
+ def f_no_copy(x):
53
+ x["rank"] = x.val.rank(method="min")
54
+ return x.groupby("cat2")["rank"].min()
55
+
56
+ msg = "DataFrameGroupBy.apply operated on the grouping columns"
57
+ with tm.assert_produces_warning(DeprecationWarning, match=msg):
58
+ grpby_copy = df.groupby("cat1").apply(f_copy)
59
+ with tm.assert_produces_warning(DeprecationWarning, match=msg):
60
+ grpby_no_copy = df.groupby("cat1").apply(f_no_copy)
61
+ tm.assert_series_equal(grpby_copy, grpby_no_copy)
62
+
63
+
64
+ def test_no_mutate_but_looks_like():
65
+ # GH 8467
66
+ # first show's mutation indicator
67
+ # second does not, but should yield the same results
68
+ df = pd.DataFrame({"key": [1, 1, 1, 2, 2, 2, 3, 3, 3], "value": range(9)})
69
+
70
+ msg = "DataFrameGroupBy.apply operated on the grouping columns"
71
+ with tm.assert_produces_warning(DeprecationWarning, match=msg):
72
+ result1 = df.groupby("key", group_keys=True).apply(lambda x: x[:].key)
73
+ with tm.assert_produces_warning(DeprecationWarning, match=msg):
74
+ result2 = df.groupby("key", group_keys=True).apply(lambda x: x.key)
75
+ tm.assert_series_equal(result1, result2)
76
+
77
+
78
+ def test_apply_function_with_indexing(warn_copy_on_write):
79
+ # GH: 33058
80
+ df = pd.DataFrame(
81
+ {"col1": ["A", "A", "A", "B", "B", "B"], "col2": [1, 2, 3, 4, 5, 6]}
82
+ )
83
+
84
+ def fn(x):
85
+ x.loc[x.index[-1], "col2"] = 0
86
+ return x.col2
87
+
88
+ msg = "DataFrameGroupBy.apply operated on the grouping columns"
89
+ with tm.assert_produces_warning(
90
+ DeprecationWarning, match=msg, raise_on_extra_warnings=not warn_copy_on_write
91
+ ):
92
+ result = df.groupby(["col1"], as_index=False).apply(fn)
93
+ expected = pd.Series(
94
+ [1, 2, 0, 4, 5, 0],
95
+ index=pd.MultiIndex.from_tuples(
96
+ [(0, 0), (0, 1), (0, 2), (1, 3), (1, 4), (1, 5)]
97
+ ),
98
+ name="col2",
99
+ )
100
+ tm.assert_series_equal(result, expected)
101
+
102
+
103
+ def test_apply_mutate_columns_multiindex():
104
+ # GH 12652
105
+ df = pd.DataFrame(
106
+ {
107
+ ("C", "julian"): [1, 2, 3],
108
+ ("B", "geoffrey"): [1, 2, 3],
109
+ ("A", "julian"): [1, 2, 3],
110
+ ("B", "julian"): [1, 2, 3],
111
+ ("A", "geoffrey"): [1, 2, 3],
112
+ ("C", "geoffrey"): [1, 2, 3],
113
+ },
114
+ columns=pd.MultiIndex.from_tuples(
115
+ [
116
+ ("A", "julian"),
117
+ ("A", "geoffrey"),
118
+ ("B", "julian"),
119
+ ("B", "geoffrey"),
120
+ ("C", "julian"),
121
+ ("C", "geoffrey"),
122
+ ]
123
+ ),
124
+ )
125
+
126
+ def add_column(grouped):
127
+ name = grouped.columns[0][1]
128
+ grouped["sum", name] = grouped.sum(axis=1)
129
+ return grouped
130
+
131
+ msg = "DataFrame.groupby with axis=1 is deprecated"
132
+ with tm.assert_produces_warning(FutureWarning, match=msg):
133
+ gb = df.groupby(level=1, axis=1)
134
+ result = gb.apply(add_column)
135
+ expected = pd.DataFrame(
136
+ [
137
+ [1, 1, 1, 3, 1, 1, 1, 3],
138
+ [2, 2, 2, 6, 2, 2, 2, 6],
139
+ [
140
+ 3,
141
+ 3,
142
+ 3,
143
+ 9,
144
+ 3,
145
+ 3,
146
+ 3,
147
+ 9,
148
+ ],
149
+ ],
150
+ columns=pd.MultiIndex.from_tuples(
151
+ [
152
+ ("geoffrey", "A", "geoffrey"),
153
+ ("geoffrey", "B", "geoffrey"),
154
+ ("geoffrey", "C", "geoffrey"),
155
+ ("geoffrey", "sum", "geoffrey"),
156
+ ("julian", "A", "julian"),
157
+ ("julian", "B", "julian"),
158
+ ("julian", "C", "julian"),
159
+ ("julian", "sum", "julian"),
160
+ ]
161
+ ),
162
+ )
163
+ tm.assert_frame_equal(result, expected)
venv/lib/python3.10/site-packages/pandas/tests/groupby/test_bin_groupby.py ADDED
@@ -0,0 +1,65 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import numpy as np
2
+ import pytest
3
+
4
+ from pandas._libs import lib
5
+ import pandas.util._test_decorators as td
6
+
7
+ import pandas as pd
8
+ import pandas._testing as tm
9
+
10
+
11
+ def assert_block_lengths(x):
12
+ assert len(x) == len(x._mgr.blocks[0].mgr_locs)
13
+ return 0
14
+
15
+
16
+ def cumsum_max(x):
17
+ x.cumsum().max()
18
+ return 0
19
+
20
+
21
+ @pytest.mark.parametrize(
22
+ "func",
23
+ [
24
+ cumsum_max,
25
+ pytest.param(assert_block_lengths, marks=td.skip_array_manager_invalid_test),
26
+ ],
27
+ )
28
+ def test_mgr_locs_updated(func):
29
+ # https://github.com/pandas-dev/pandas/issues/31802
30
+ # Some operations may require creating new blocks, which requires
31
+ # valid mgr_locs
32
+ df = pd.DataFrame({"A": ["a", "a", "a"], "B": ["a", "b", "b"], "C": [1, 1, 1]})
33
+ result = df.groupby(["A", "B"]).agg(func)
34
+ expected = pd.DataFrame(
35
+ {"C": [0, 0]},
36
+ index=pd.MultiIndex.from_product([["a"], ["a", "b"]], names=["A", "B"]),
37
+ )
38
+ tm.assert_frame_equal(result, expected)
39
+
40
+
41
+ @pytest.mark.parametrize(
42
+ "binner,closed,expected",
43
+ [
44
+ (
45
+ np.array([0, 3, 6, 9], dtype=np.int64),
46
+ "left",
47
+ np.array([2, 5, 6], dtype=np.int64),
48
+ ),
49
+ (
50
+ np.array([0, 3, 6, 9], dtype=np.int64),
51
+ "right",
52
+ np.array([3, 6, 6], dtype=np.int64),
53
+ ),
54
+ (np.array([0, 3, 6], dtype=np.int64), "left", np.array([2, 5], dtype=np.int64)),
55
+ (
56
+ np.array([0, 3, 6], dtype=np.int64),
57
+ "right",
58
+ np.array([3, 6], dtype=np.int64),
59
+ ),
60
+ ],
61
+ )
62
+ def test_generate_bins(binner, closed, expected):
63
+ values = np.array([1, 2, 3, 4, 5, 6], dtype=np.int64)
64
+ result = lib.generate_bins_dt64(values, binner, closed=closed)
65
+ tm.assert_numpy_array_equal(result, expected)
venv/lib/python3.10/site-packages/pandas/tests/groupby/test_categorical.py ADDED
@@ -0,0 +1,2169 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from datetime import datetime
2
+
3
+ import numpy as np
4
+ import pytest
5
+
6
+ import pandas as pd
7
+ from pandas import (
8
+ Categorical,
9
+ CategoricalIndex,
10
+ DataFrame,
11
+ Index,
12
+ MultiIndex,
13
+ Series,
14
+ qcut,
15
+ )
16
+ import pandas._testing as tm
17
+ from pandas.api.typing import SeriesGroupBy
18
+ from pandas.tests.groupby import get_groupby_method_args
19
+
20
+
21
+ def cartesian_product_for_groupers(result, args, names, fill_value=np.nan):
22
+ """Reindex to a cartesian production for the groupers,
23
+ preserving the nature (Categorical) of each grouper
24
+ """
25
+
26
+ def f(a):
27
+ if isinstance(a, (CategoricalIndex, Categorical)):
28
+ categories = a.categories
29
+ a = Categorical.from_codes(
30
+ np.arange(len(categories)), categories=categories, ordered=a.ordered
31
+ )
32
+ return a
33
+
34
+ index = MultiIndex.from_product(map(f, args), names=names)
35
+ return result.reindex(index, fill_value=fill_value).sort_index()
36
+
37
+
38
+ _results_for_groupbys_with_missing_categories = {
39
+ # This maps the builtin groupby functions to their expected outputs for
40
+ # missing categories when they are called on a categorical grouper with
41
+ # observed=False. Some functions are expected to return NaN, some zero.
42
+ # These expected values can be used across several tests (i.e. they are
43
+ # the same for SeriesGroupBy and DataFrameGroupBy) but they should only be
44
+ # hardcoded in one place.
45
+ "all": np.nan,
46
+ "any": np.nan,
47
+ "count": 0,
48
+ "corrwith": np.nan,
49
+ "first": np.nan,
50
+ "idxmax": np.nan,
51
+ "idxmin": np.nan,
52
+ "last": np.nan,
53
+ "max": np.nan,
54
+ "mean": np.nan,
55
+ "median": np.nan,
56
+ "min": np.nan,
57
+ "nth": np.nan,
58
+ "nunique": 0,
59
+ "prod": np.nan,
60
+ "quantile": np.nan,
61
+ "sem": np.nan,
62
+ "size": 0,
63
+ "skew": np.nan,
64
+ "std": np.nan,
65
+ "sum": 0,
66
+ "var": np.nan,
67
+ }
68
+
69
+
70
+ def test_apply_use_categorical_name(df):
71
+ cats = qcut(df.C, 4)
72
+
73
+ def get_stats(group):
74
+ return {
75
+ "min": group.min(),
76
+ "max": group.max(),
77
+ "count": group.count(),
78
+ "mean": group.mean(),
79
+ }
80
+
81
+ result = df.groupby(cats, observed=False).D.apply(get_stats)
82
+ assert result.index.names[0] == "C"
83
+
84
+
85
+ def test_basic(using_infer_string): # TODO: split this test
86
+ cats = Categorical(
87
+ ["a", "a", "a", "b", "b", "b", "c", "c", "c"],
88
+ categories=["a", "b", "c", "d"],
89
+ ordered=True,
90
+ )
91
+ data = DataFrame({"a": [1, 1, 1, 2, 2, 2, 3, 4, 5], "b": cats})
92
+
93
+ exp_index = CategoricalIndex(list("abcd"), name="b", ordered=True)
94
+ expected = DataFrame({"a": [1, 2, 4, np.nan]}, index=exp_index)
95
+ result = data.groupby("b", observed=False).mean()
96
+ tm.assert_frame_equal(result, expected)
97
+
98
+ cat1 = Categorical(["a", "a", "b", "b"], categories=["a", "b", "z"], ordered=True)
99
+ cat2 = Categorical(["c", "d", "c", "d"], categories=["c", "d", "y"], ordered=True)
100
+ df = DataFrame({"A": cat1, "B": cat2, "values": [1, 2, 3, 4]})
101
+
102
+ # single grouper
103
+ gb = df.groupby("A", observed=False)
104
+ exp_idx = CategoricalIndex(["a", "b", "z"], name="A", ordered=True)
105
+ expected = DataFrame({"values": Series([3, 7, 0], index=exp_idx)})
106
+ result = gb.sum(numeric_only=True)
107
+ tm.assert_frame_equal(result, expected)
108
+
109
+ # GH 8623
110
+ x = DataFrame(
111
+ [[1, "John P. Doe"], [2, "Jane Dove"], [1, "John P. Doe"]],
112
+ columns=["person_id", "person_name"],
113
+ )
114
+ x["person_name"] = Categorical(x.person_name)
115
+
116
+ g = x.groupby(["person_id"], observed=False)
117
+ result = g.transform(lambda x: x)
118
+ tm.assert_frame_equal(result, x[["person_name"]])
119
+
120
+ result = x.drop_duplicates("person_name")
121
+ expected = x.iloc[[0, 1]]
122
+ tm.assert_frame_equal(result, expected)
123
+
124
+ def f(x):
125
+ return x.drop_duplicates("person_name").iloc[0]
126
+
127
+ msg = "DataFrameGroupBy.apply operated on the grouping columns"
128
+ with tm.assert_produces_warning(DeprecationWarning, match=msg):
129
+ result = g.apply(f)
130
+ expected = x.iloc[[0, 1]].copy()
131
+ expected.index = Index([1, 2], name="person_id")
132
+ dtype = "string[pyarrow_numpy]" if using_infer_string else object
133
+ expected["person_name"] = expected["person_name"].astype(dtype)
134
+ tm.assert_frame_equal(result, expected)
135
+
136
+ # GH 9921
137
+ # Monotonic
138
+ df = DataFrame({"a": [5, 15, 25]})
139
+ c = pd.cut(df.a, bins=[0, 10, 20, 30, 40])
140
+
141
+ msg = "using SeriesGroupBy.sum"
142
+ with tm.assert_produces_warning(FutureWarning, match=msg):
143
+ # GH#53425
144
+ result = df.a.groupby(c, observed=False).transform(sum)
145
+ tm.assert_series_equal(result, df["a"])
146
+
147
+ tm.assert_series_equal(
148
+ df.a.groupby(c, observed=False).transform(lambda xs: np.sum(xs)), df["a"]
149
+ )
150
+ msg = "using DataFrameGroupBy.sum"
151
+ with tm.assert_produces_warning(FutureWarning, match=msg):
152
+ # GH#53425
153
+ result = df.groupby(c, observed=False).transform(sum)
154
+ expected = df[["a"]]
155
+ tm.assert_frame_equal(result, expected)
156
+
157
+ gbc = df.groupby(c, observed=False)
158
+ result = gbc.transform(lambda xs: np.max(xs, axis=0))
159
+ tm.assert_frame_equal(result, df[["a"]])
160
+
161
+ result2 = gbc.transform(lambda xs: np.max(xs, axis=0))
162
+ msg = "using DataFrameGroupBy.max"
163
+ with tm.assert_produces_warning(FutureWarning, match=msg):
164
+ # GH#53425
165
+ result3 = gbc.transform(max)
166
+ result4 = gbc.transform(np.maximum.reduce)
167
+ result5 = gbc.transform(lambda xs: np.maximum.reduce(xs))
168
+ tm.assert_frame_equal(result2, df[["a"]], check_dtype=False)
169
+ tm.assert_frame_equal(result3, df[["a"]], check_dtype=False)
170
+ tm.assert_frame_equal(result4, df[["a"]])
171
+ tm.assert_frame_equal(result5, df[["a"]])
172
+
173
+ # Filter
174
+ tm.assert_series_equal(df.a.groupby(c, observed=False).filter(np.all), df["a"])
175
+ tm.assert_frame_equal(df.groupby(c, observed=False).filter(np.all), df)
176
+
177
+ # Non-monotonic
178
+ df = DataFrame({"a": [5, 15, 25, -5]})
179
+ c = pd.cut(df.a, bins=[-10, 0, 10, 20, 30, 40])
180
+
181
+ msg = "using SeriesGroupBy.sum"
182
+ with tm.assert_produces_warning(FutureWarning, match=msg):
183
+ # GH#53425
184
+ result = df.a.groupby(c, observed=False).transform(sum)
185
+ tm.assert_series_equal(result, df["a"])
186
+
187
+ tm.assert_series_equal(
188
+ df.a.groupby(c, observed=False).transform(lambda xs: np.sum(xs)), df["a"]
189
+ )
190
+ msg = "using DataFrameGroupBy.sum"
191
+ with tm.assert_produces_warning(FutureWarning, match=msg):
192
+ # GH#53425
193
+ result = df.groupby(c, observed=False).transform(sum)
194
+ expected = df[["a"]]
195
+ tm.assert_frame_equal(result, expected)
196
+
197
+ tm.assert_frame_equal(
198
+ df.groupby(c, observed=False).transform(lambda xs: np.sum(xs)), df[["a"]]
199
+ )
200
+
201
+ # GH 9603
202
+ df = DataFrame({"a": [1, 0, 0, 0]})
203
+ c = pd.cut(df.a, [0, 1, 2, 3, 4], labels=Categorical(list("abcd")))
204
+ result = df.groupby(c, observed=False).apply(len)
205
+
206
+ exp_index = CategoricalIndex(c.values.categories, ordered=c.values.ordered)
207
+ expected = Series([1, 0, 0, 0], index=exp_index)
208
+ expected.index.name = "a"
209
+ tm.assert_series_equal(result, expected)
210
+
211
+ # more basic
212
+ levels = ["foo", "bar", "baz", "qux"]
213
+ codes = np.random.default_rng(2).integers(0, 4, size=100)
214
+
215
+ cats = Categorical.from_codes(codes, levels, ordered=True)
216
+
217
+ data = DataFrame(np.random.default_rng(2).standard_normal((100, 4)))
218
+
219
+ result = data.groupby(cats, observed=False).mean()
220
+
221
+ expected = data.groupby(np.asarray(cats), observed=False).mean()
222
+ exp_idx = CategoricalIndex(levels, categories=cats.categories, ordered=True)
223
+ expected = expected.reindex(exp_idx)
224
+
225
+ tm.assert_frame_equal(result, expected)
226
+
227
+ grouped = data.groupby(cats, observed=False)
228
+ desc_result = grouped.describe()
229
+
230
+ idx = cats.codes.argsort()
231
+ ord_labels = np.asarray(cats).take(idx)
232
+ ord_data = data.take(idx)
233
+
234
+ exp_cats = Categorical(
235
+ ord_labels, ordered=True, categories=["foo", "bar", "baz", "qux"]
236
+ )
237
+ expected = ord_data.groupby(exp_cats, sort=False, observed=False).describe()
238
+ tm.assert_frame_equal(desc_result, expected)
239
+
240
+ # GH 10460
241
+ expc = Categorical.from_codes(np.arange(4).repeat(8), levels, ordered=True)
242
+ exp = CategoricalIndex(expc)
243
+ tm.assert_index_equal(
244
+ (desc_result.stack(future_stack=True).index.get_level_values(0)), exp
245
+ )
246
+ exp = Index(["count", "mean", "std", "min", "25%", "50%", "75%", "max"] * 4)
247
+ tm.assert_index_equal(
248
+ (desc_result.stack(future_stack=True).index.get_level_values(1)), exp
249
+ )
250
+
251
+
252
+ def test_level_get_group(observed):
253
+ # GH15155
254
+ df = DataFrame(
255
+ data=np.arange(2, 22, 2),
256
+ index=MultiIndex(
257
+ levels=[CategoricalIndex(["a", "b"]), range(10)],
258
+ codes=[[0] * 5 + [1] * 5, range(10)],
259
+ names=["Index1", "Index2"],
260
+ ),
261
+ )
262
+ g = df.groupby(level=["Index1"], observed=observed)
263
+
264
+ # expected should equal test.loc[["a"]]
265
+ # GH15166
266
+ expected = DataFrame(
267
+ data=np.arange(2, 12, 2),
268
+ index=MultiIndex(
269
+ levels=[CategoricalIndex(["a", "b"]), range(5)],
270
+ codes=[[0] * 5, range(5)],
271
+ names=["Index1", "Index2"],
272
+ ),
273
+ )
274
+ msg = "you will need to pass a length-1 tuple"
275
+ with tm.assert_produces_warning(FutureWarning, match=msg):
276
+ # GH#25971 - warn when not passing a length-1 tuple
277
+ result = g.get_group("a")
278
+
279
+ tm.assert_frame_equal(result, expected)
280
+
281
+
282
+ def test_sorting_with_different_categoricals():
283
+ # GH 24271
284
+ df = DataFrame(
285
+ {
286
+ "group": ["A"] * 6 + ["B"] * 6,
287
+ "dose": ["high", "med", "low"] * 4,
288
+ "outcomes": np.arange(12.0),
289
+ }
290
+ )
291
+
292
+ df.dose = Categorical(df.dose, categories=["low", "med", "high"], ordered=True)
293
+
294
+ result = df.groupby("group")["dose"].value_counts()
295
+ result = result.sort_index(level=0, sort_remaining=True)
296
+ index = ["low", "med", "high", "low", "med", "high"]
297
+ index = Categorical(index, categories=["low", "med", "high"], ordered=True)
298
+ index = [["A", "A", "A", "B", "B", "B"], CategoricalIndex(index)]
299
+ index = MultiIndex.from_arrays(index, names=["group", "dose"])
300
+ expected = Series([2] * 6, index=index, name="count")
301
+ tm.assert_series_equal(result, expected)
302
+
303
+
304
+ @pytest.mark.parametrize("ordered", [True, False])
305
+ def test_apply(ordered):
306
+ # GH 10138
307
+
308
+ dense = Categorical(list("abc"), ordered=ordered)
309
+
310
+ # 'b' is in the categories but not in the list
311
+ missing = Categorical(list("aaa"), categories=["a", "b"], ordered=ordered)
312
+ values = np.arange(len(dense))
313
+ df = DataFrame({"missing": missing, "dense": dense, "values": values})
314
+ grouped = df.groupby(["missing", "dense"], observed=True)
315
+
316
+ # missing category 'b' should still exist in the output index
317
+ idx = MultiIndex.from_arrays([missing, dense], names=["missing", "dense"])
318
+ expected = DataFrame([0, 1, 2.0], index=idx, columns=["values"])
319
+
320
+ result = grouped.apply(lambda x: np.mean(x, axis=0))
321
+ tm.assert_frame_equal(result, expected)
322
+
323
+ result = grouped.mean()
324
+ tm.assert_frame_equal(result, expected)
325
+
326
+ msg = "using DataFrameGroupBy.mean"
327
+ with tm.assert_produces_warning(FutureWarning, match=msg):
328
+ # GH#53425
329
+ result = grouped.agg(np.mean)
330
+ tm.assert_frame_equal(result, expected)
331
+
332
+ # but for transform we should still get back the original index
333
+ idx = MultiIndex.from_arrays([missing, dense], names=["missing", "dense"])
334
+ expected = Series(1, index=idx)
335
+ msg = "DataFrameGroupBy.apply operated on the grouping columns"
336
+ with tm.assert_produces_warning(DeprecationWarning, match=msg):
337
+ result = grouped.apply(lambda x: 1)
338
+ tm.assert_series_equal(result, expected)
339
+
340
+
341
+ def test_observed(observed):
342
+ # multiple groupers, don't re-expand the output space
343
+ # of the grouper
344
+ # gh-14942 (implement)
345
+ # gh-10132 (back-compat)
346
+ # gh-8138 (back-compat)
347
+ # gh-8869
348
+
349
+ cat1 = Categorical(["a", "a", "b", "b"], categories=["a", "b", "z"], ordered=True)
350
+ cat2 = Categorical(["c", "d", "c", "d"], categories=["c", "d", "y"], ordered=True)
351
+ df = DataFrame({"A": cat1, "B": cat2, "values": [1, 2, 3, 4]})
352
+ df["C"] = ["foo", "bar"] * 2
353
+
354
+ # multiple groupers with a non-cat
355
+ gb = df.groupby(["A", "B", "C"], observed=observed)
356
+ exp_index = MultiIndex.from_arrays(
357
+ [cat1, cat2, ["foo", "bar"] * 2], names=["A", "B", "C"]
358
+ )
359
+ expected = DataFrame({"values": Series([1, 2, 3, 4], index=exp_index)}).sort_index()
360
+ result = gb.sum()
361
+ if not observed:
362
+ expected = cartesian_product_for_groupers(
363
+ expected, [cat1, cat2, ["foo", "bar"]], list("ABC"), fill_value=0
364
+ )
365
+
366
+ tm.assert_frame_equal(result, expected)
367
+
368
+ gb = df.groupby(["A", "B"], observed=observed)
369
+ exp_index = MultiIndex.from_arrays([cat1, cat2], names=["A", "B"])
370
+ expected = DataFrame(
371
+ {"values": [1, 2, 3, 4], "C": ["foo", "bar", "foo", "bar"]}, index=exp_index
372
+ )
373
+ result = gb.sum()
374
+ if not observed:
375
+ expected = cartesian_product_for_groupers(
376
+ expected, [cat1, cat2], list("AB"), fill_value=0
377
+ )
378
+
379
+ tm.assert_frame_equal(result, expected)
380
+
381
+ # https://github.com/pandas-dev/pandas/issues/8138
382
+ d = {
383
+ "cat": Categorical(
384
+ ["a", "b", "a", "b"], categories=["a", "b", "c"], ordered=True
385
+ ),
386
+ "ints": [1, 1, 2, 2],
387
+ "val": [10, 20, 30, 40],
388
+ }
389
+ df = DataFrame(d)
390
+
391
+ # Grouping on a single column
392
+ groups_single_key = df.groupby("cat", observed=observed)
393
+ result = groups_single_key.mean()
394
+
395
+ exp_index = CategoricalIndex(
396
+ list("ab"), name="cat", categories=list("abc"), ordered=True
397
+ )
398
+ expected = DataFrame({"ints": [1.5, 1.5], "val": [20.0, 30]}, index=exp_index)
399
+ if not observed:
400
+ index = CategoricalIndex(
401
+ list("abc"), name="cat", categories=list("abc"), ordered=True
402
+ )
403
+ expected = expected.reindex(index)
404
+
405
+ tm.assert_frame_equal(result, expected)
406
+
407
+ # Grouping on two columns
408
+ groups_double_key = df.groupby(["cat", "ints"], observed=observed)
409
+ result = groups_double_key.agg("mean")
410
+ expected = DataFrame(
411
+ {
412
+ "val": [10.0, 30.0, 20.0, 40.0],
413
+ "cat": Categorical(
414
+ ["a", "a", "b", "b"], categories=["a", "b", "c"], ordered=True
415
+ ),
416
+ "ints": [1, 2, 1, 2],
417
+ }
418
+ ).set_index(["cat", "ints"])
419
+ if not observed:
420
+ expected = cartesian_product_for_groupers(
421
+ expected, [df.cat.values, [1, 2]], ["cat", "ints"]
422
+ )
423
+
424
+ tm.assert_frame_equal(result, expected)
425
+
426
+ # GH 10132
427
+ for key in [("a", 1), ("b", 2), ("b", 1), ("a", 2)]:
428
+ c, i = key
429
+ result = groups_double_key.get_group(key)
430
+ expected = df[(df.cat == c) & (df.ints == i)]
431
+ tm.assert_frame_equal(result, expected)
432
+
433
+ # gh-8869
434
+ # with as_index
435
+ d = {
436
+ "foo": [10, 8, 4, 8, 4, 1, 1],
437
+ "bar": [10, 20, 30, 40, 50, 60, 70],
438
+ "baz": ["d", "c", "e", "a", "a", "d", "c"],
439
+ }
440
+ df = DataFrame(d)
441
+ cat = pd.cut(df["foo"], np.linspace(0, 10, 3))
442
+ df["range"] = cat
443
+ groups = df.groupby(["range", "baz"], as_index=False, observed=observed)
444
+ result = groups.agg("mean")
445
+
446
+ groups2 = df.groupby(["range", "baz"], as_index=True, observed=observed)
447
+ expected = groups2.agg("mean").reset_index()
448
+ tm.assert_frame_equal(result, expected)
449
+
450
+
451
+ def test_observed_codes_remap(observed):
452
+ d = {"C1": [3, 3, 4, 5], "C2": [1, 2, 3, 4], "C3": [10, 100, 200, 34]}
453
+ df = DataFrame(d)
454
+ values = pd.cut(df["C1"], [1, 2, 3, 6])
455
+ values.name = "cat"
456
+ groups_double_key = df.groupby([values, "C2"], observed=observed)
457
+
458
+ idx = MultiIndex.from_arrays([values, [1, 2, 3, 4]], names=["cat", "C2"])
459
+ expected = DataFrame(
460
+ {"C1": [3.0, 3.0, 4.0, 5.0], "C3": [10.0, 100.0, 200.0, 34.0]}, index=idx
461
+ )
462
+ if not observed:
463
+ expected = cartesian_product_for_groupers(
464
+ expected, [values.values, [1, 2, 3, 4]], ["cat", "C2"]
465
+ )
466
+
467
+ result = groups_double_key.agg("mean")
468
+ tm.assert_frame_equal(result, expected)
469
+
470
+
471
+ def test_observed_perf():
472
+ # we create a cartesian product, so this is
473
+ # non-performant if we don't use observed values
474
+ # gh-14942
475
+ df = DataFrame(
476
+ {
477
+ "cat": np.random.default_rng(2).integers(0, 255, size=30000),
478
+ "int_id": np.random.default_rng(2).integers(0, 255, size=30000),
479
+ "other_id": np.random.default_rng(2).integers(0, 10000, size=30000),
480
+ "foo": 0,
481
+ }
482
+ )
483
+ df["cat"] = df.cat.astype(str).astype("category")
484
+
485
+ grouped = df.groupby(["cat", "int_id", "other_id"], observed=True)
486
+ result = grouped.count()
487
+ assert result.index.levels[0].nunique() == df.cat.nunique()
488
+ assert result.index.levels[1].nunique() == df.int_id.nunique()
489
+ assert result.index.levels[2].nunique() == df.other_id.nunique()
490
+
491
+
492
+ def test_observed_groups(observed):
493
+ # gh-20583
494
+ # test that we have the appropriate groups
495
+
496
+ cat = Categorical(["a", "c", "a"], categories=["a", "b", "c"])
497
+ df = DataFrame({"cat": cat, "vals": [1, 2, 3]})
498
+ g = df.groupby("cat", observed=observed)
499
+
500
+ result = g.groups
501
+ if observed:
502
+ expected = {"a": Index([0, 2], dtype="int64"), "c": Index([1], dtype="int64")}
503
+ else:
504
+ expected = {
505
+ "a": Index([0, 2], dtype="int64"),
506
+ "b": Index([], dtype="int64"),
507
+ "c": Index([1], dtype="int64"),
508
+ }
509
+
510
+ tm.assert_dict_equal(result, expected)
511
+
512
+
513
+ @pytest.mark.parametrize(
514
+ "keys, expected_values, expected_index_levels",
515
+ [
516
+ ("a", [15, 9, 0], CategoricalIndex([1, 2, 3], name="a")),
517
+ (
518
+ ["a", "b"],
519
+ [7, 8, 0, 0, 0, 9, 0, 0, 0],
520
+ [CategoricalIndex([1, 2, 3], name="a"), Index([4, 5, 6])],
521
+ ),
522
+ (
523
+ ["a", "a2"],
524
+ [15, 0, 0, 0, 9, 0, 0, 0, 0],
525
+ [
526
+ CategoricalIndex([1, 2, 3], name="a"),
527
+ CategoricalIndex([1, 2, 3], name="a"),
528
+ ],
529
+ ),
530
+ ],
531
+ )
532
+ @pytest.mark.parametrize("test_series", [True, False])
533
+ def test_unobserved_in_index(keys, expected_values, expected_index_levels, test_series):
534
+ # GH#49354 - ensure unobserved cats occur when grouping by index levels
535
+ df = DataFrame(
536
+ {
537
+ "a": Categorical([1, 1, 2], categories=[1, 2, 3]),
538
+ "a2": Categorical([1, 1, 2], categories=[1, 2, 3]),
539
+ "b": [4, 5, 6],
540
+ "c": [7, 8, 9],
541
+ }
542
+ ).set_index(["a", "a2"])
543
+ if "b" not in keys:
544
+ # Only keep b when it is used for grouping for consistent columns in the result
545
+ df = df.drop(columns="b")
546
+
547
+ gb = df.groupby(keys, observed=False)
548
+ if test_series:
549
+ gb = gb["c"]
550
+ result = gb.sum()
551
+
552
+ if len(keys) == 1:
553
+ index = expected_index_levels
554
+ else:
555
+ codes = [[0, 0, 0, 1, 1, 1, 2, 2, 2], 3 * [0, 1, 2]]
556
+ index = MultiIndex(
557
+ expected_index_levels,
558
+ codes=codes,
559
+ names=keys,
560
+ )
561
+ expected = DataFrame({"c": expected_values}, index=index)
562
+ if test_series:
563
+ expected = expected["c"]
564
+ tm.assert_equal(result, expected)
565
+
566
+
567
+ def test_observed_groups_with_nan(observed):
568
+ # GH 24740
569
+ df = DataFrame(
570
+ {
571
+ "cat": Categorical(["a", np.nan, "a"], categories=["a", "b", "d"]),
572
+ "vals": [1, 2, 3],
573
+ }
574
+ )
575
+ g = df.groupby("cat", observed=observed)
576
+ result = g.groups
577
+ if observed:
578
+ expected = {"a": Index([0, 2], dtype="int64")}
579
+ else:
580
+ expected = {
581
+ "a": Index([0, 2], dtype="int64"),
582
+ "b": Index([], dtype="int64"),
583
+ "d": Index([], dtype="int64"),
584
+ }
585
+ tm.assert_dict_equal(result, expected)
586
+
587
+
588
+ def test_observed_nth():
589
+ # GH 26385
590
+ cat = Categorical(["a", np.nan, np.nan], categories=["a", "b", "c"])
591
+ ser = Series([1, 2, 3])
592
+ df = DataFrame({"cat": cat, "ser": ser})
593
+
594
+ result = df.groupby("cat", observed=False)["ser"].nth(0)
595
+ expected = df["ser"].iloc[[0]]
596
+ tm.assert_series_equal(result, expected)
597
+
598
+
599
+ def test_dataframe_categorical_with_nan(observed):
600
+ # GH 21151
601
+ s1 = Categorical([np.nan, "a", np.nan, "a"], categories=["a", "b", "c"])
602
+ s2 = Series([1, 2, 3, 4])
603
+ df = DataFrame({"s1": s1, "s2": s2})
604
+ result = df.groupby("s1", observed=observed).first().reset_index()
605
+ if observed:
606
+ expected = DataFrame(
607
+ {"s1": Categorical(["a"], categories=["a", "b", "c"]), "s2": [2]}
608
+ )
609
+ else:
610
+ expected = DataFrame(
611
+ {
612
+ "s1": Categorical(["a", "b", "c"], categories=["a", "b", "c"]),
613
+ "s2": [2, np.nan, np.nan],
614
+ }
615
+ )
616
+ tm.assert_frame_equal(result, expected)
617
+
618
+
619
+ @pytest.mark.parametrize("ordered", [True, False])
620
+ @pytest.mark.parametrize("observed", [True, False])
621
+ @pytest.mark.parametrize("sort", [True, False])
622
+ def test_dataframe_categorical_ordered_observed_sort(ordered, observed, sort):
623
+ # GH 25871: Fix groupby sorting on ordered Categoricals
624
+ # GH 25167: Groupby with observed=True doesn't sort
625
+
626
+ # Build a dataframe with cat having one unobserved category ('missing'),
627
+ # and a Series with identical values
628
+ label = Categorical(
629
+ ["d", "a", "b", "a", "d", "b"],
630
+ categories=["a", "b", "missing", "d"],
631
+ ordered=ordered,
632
+ )
633
+ val = Series(["d", "a", "b", "a", "d", "b"])
634
+ df = DataFrame({"label": label, "val": val})
635
+
636
+ # aggregate on the Categorical
637
+ result = df.groupby("label", observed=observed, sort=sort)["val"].aggregate("first")
638
+
639
+ # If ordering works, we expect index labels equal to aggregation results,
640
+ # except for 'observed=False': label 'missing' has aggregation None
641
+ label = Series(result.index.array, dtype="object")
642
+ aggr = Series(result.array)
643
+ if not observed:
644
+ aggr[aggr.isna()] = "missing"
645
+ if not all(label == aggr):
646
+ msg = (
647
+ "Labels and aggregation results not consistently sorted\n"
648
+ f"for (ordered={ordered}, observed={observed}, sort={sort})\n"
649
+ f"Result:\n{result}"
650
+ )
651
+ assert False, msg
652
+
653
+
654
+ def test_datetime():
655
+ # GH9049: ensure backward compatibility
656
+ levels = pd.date_range("2014-01-01", periods=4)
657
+ codes = np.random.default_rng(2).integers(0, 4, size=100)
658
+
659
+ cats = Categorical.from_codes(codes, levels, ordered=True)
660
+
661
+ data = DataFrame(np.random.default_rng(2).standard_normal((100, 4)))
662
+ result = data.groupby(cats, observed=False).mean()
663
+
664
+ expected = data.groupby(np.asarray(cats), observed=False).mean()
665
+ expected = expected.reindex(levels)
666
+ expected.index = CategoricalIndex(
667
+ expected.index, categories=expected.index, ordered=True
668
+ )
669
+
670
+ tm.assert_frame_equal(result, expected)
671
+
672
+ grouped = data.groupby(cats, observed=False)
673
+ desc_result = grouped.describe()
674
+
675
+ idx = cats.codes.argsort()
676
+ ord_labels = cats.take(idx)
677
+ ord_data = data.take(idx)
678
+ expected = ord_data.groupby(ord_labels, observed=False).describe()
679
+ tm.assert_frame_equal(desc_result, expected)
680
+ tm.assert_index_equal(desc_result.index, expected.index)
681
+ tm.assert_index_equal(
682
+ desc_result.index.get_level_values(0), expected.index.get_level_values(0)
683
+ )
684
+
685
+ # GH 10460
686
+ expc = Categorical.from_codes(np.arange(4).repeat(8), levels, ordered=True)
687
+ exp = CategoricalIndex(expc)
688
+ tm.assert_index_equal(
689
+ (desc_result.stack(future_stack=True).index.get_level_values(0)), exp
690
+ )
691
+ exp = Index(["count", "mean", "std", "min", "25%", "50%", "75%", "max"] * 4)
692
+ tm.assert_index_equal(
693
+ (desc_result.stack(future_stack=True).index.get_level_values(1)), exp
694
+ )
695
+
696
+
697
+ def test_categorical_index():
698
+ s = np.random.default_rng(2)
699
+ levels = ["foo", "bar", "baz", "qux"]
700
+ codes = s.integers(0, 4, size=20)
701
+ cats = Categorical.from_codes(codes, levels, ordered=True)
702
+ df = DataFrame(np.repeat(np.arange(20), 4).reshape(-1, 4), columns=list("abcd"))
703
+ df["cats"] = cats
704
+
705
+ # with a cat index
706
+ result = df.set_index("cats").groupby(level=0, observed=False).sum()
707
+ expected = df[list("abcd")].groupby(cats.codes, observed=False).sum()
708
+ expected.index = CategoricalIndex(
709
+ Categorical.from_codes([0, 1, 2, 3], levels, ordered=True), name="cats"
710
+ )
711
+ tm.assert_frame_equal(result, expected)
712
+
713
+ # with a cat column, should produce a cat index
714
+ result = df.groupby("cats", observed=False).sum()
715
+ expected = df[list("abcd")].groupby(cats.codes, observed=False).sum()
716
+ expected.index = CategoricalIndex(
717
+ Categorical.from_codes([0, 1, 2, 3], levels, ordered=True), name="cats"
718
+ )
719
+ tm.assert_frame_equal(result, expected)
720
+
721
+
722
+ def test_describe_categorical_columns():
723
+ # GH 11558
724
+ cats = CategoricalIndex(
725
+ ["qux", "foo", "baz", "bar"],
726
+ categories=["foo", "bar", "baz", "qux"],
727
+ ordered=True,
728
+ )
729
+ df = DataFrame(np.random.default_rng(2).standard_normal((20, 4)), columns=cats)
730
+ result = df.groupby([1, 2, 3, 4] * 5).describe()
731
+
732
+ tm.assert_index_equal(result.stack(future_stack=True).columns, cats)
733
+ tm.assert_categorical_equal(
734
+ result.stack(future_stack=True).columns.values, cats.values
735
+ )
736
+
737
+
738
+ def test_unstack_categorical():
739
+ # GH11558 (example is taken from the original issue)
740
+ df = DataFrame(
741
+ {"a": range(10), "medium": ["A", "B"] * 5, "artist": list("XYXXY") * 2}
742
+ )
743
+ df["medium"] = df["medium"].astype("category")
744
+
745
+ gcat = df.groupby(["artist", "medium"], observed=False)["a"].count().unstack()
746
+ result = gcat.describe()
747
+
748
+ exp_columns = CategoricalIndex(["A", "B"], ordered=False, name="medium")
749
+ tm.assert_index_equal(result.columns, exp_columns)
750
+ tm.assert_categorical_equal(result.columns.values, exp_columns.values)
751
+
752
+ result = gcat["A"] + gcat["B"]
753
+ expected = Series([6, 4], index=Index(["X", "Y"], name="artist"))
754
+ tm.assert_series_equal(result, expected)
755
+
756
+
757
+ def test_bins_unequal_len():
758
+ # GH3011
759
+ series = Series([np.nan, np.nan, 1, 1, 2, 2, 3, 3, 4, 4])
760
+ bins = pd.cut(series.dropna().values, 4)
761
+
762
+ # len(bins) != len(series) here
763
+ with pytest.raises(ValueError, match="Grouper and axis must be same length"):
764
+ series.groupby(bins).mean()
765
+
766
+
767
+ @pytest.mark.parametrize(
768
+ ["series", "data"],
769
+ [
770
+ # Group a series with length and index equal to those of the grouper.
771
+ (Series(range(4)), {"A": [0, 3], "B": [1, 2]}),
772
+ # Group a series with length equal to that of the grouper and index unequal to
773
+ # that of the grouper.
774
+ (Series(range(4)).rename(lambda idx: idx + 1), {"A": [2], "B": [0, 1]}),
775
+ # GH44179: Group a series with length unequal to that of the grouper.
776
+ (Series(range(7)), {"A": [0, 3], "B": [1, 2]}),
777
+ ],
778
+ )
779
+ def test_categorical_series(series, data):
780
+ # Group the given series by a series with categorical data type such that group A
781
+ # takes indices 0 and 3 and group B indices 1 and 2, obtaining the values mapped in
782
+ # the given data.
783
+ groupby = series.groupby(Series(list("ABBA"), dtype="category"), observed=False)
784
+ result = groupby.aggregate(list)
785
+ expected = Series(data, index=CategoricalIndex(data.keys()))
786
+ tm.assert_series_equal(result, expected)
787
+
788
+
789
+ def test_as_index():
790
+ # GH13204
791
+ df = DataFrame(
792
+ {
793
+ "cat": Categorical([1, 2, 2], [1, 2, 3]),
794
+ "A": [10, 11, 11],
795
+ "B": [101, 102, 103],
796
+ }
797
+ )
798
+ result = df.groupby(["cat", "A"], as_index=False, observed=True).sum()
799
+ expected = DataFrame(
800
+ {
801
+ "cat": Categorical([1, 2], categories=df.cat.cat.categories),
802
+ "A": [10, 11],
803
+ "B": [101, 205],
804
+ },
805
+ columns=["cat", "A", "B"],
806
+ )
807
+ tm.assert_frame_equal(result, expected)
808
+
809
+ # function grouper
810
+ f = lambda r: df.loc[r, "A"]
811
+ msg = "A grouping .* was excluded from the result"
812
+ with tm.assert_produces_warning(FutureWarning, match=msg):
813
+ result = df.groupby(["cat", f], as_index=False, observed=True).sum()
814
+ expected = DataFrame(
815
+ {
816
+ "cat": Categorical([1, 2], categories=df.cat.cat.categories),
817
+ "A": [10, 22],
818
+ "B": [101, 205],
819
+ },
820
+ columns=["cat", "A", "B"],
821
+ )
822
+ tm.assert_frame_equal(result, expected)
823
+
824
+ # another not in-axis grouper (conflicting names in index)
825
+ s = Series(["a", "b", "b"], name="cat")
826
+ msg = "A grouping .* was excluded from the result"
827
+ with tm.assert_produces_warning(FutureWarning, match=msg):
828
+ result = df.groupby(["cat", s], as_index=False, observed=True).sum()
829
+ tm.assert_frame_equal(result, expected)
830
+
831
+ # is original index dropped?
832
+ group_columns = ["cat", "A"]
833
+ expected = DataFrame(
834
+ {
835
+ "cat": Categorical([1, 2], categories=df.cat.cat.categories),
836
+ "A": [10, 11],
837
+ "B": [101, 205],
838
+ },
839
+ columns=["cat", "A", "B"],
840
+ )
841
+
842
+ for name in [None, "X", "B"]:
843
+ df.index = Index(list("abc"), name=name)
844
+ result = df.groupby(group_columns, as_index=False, observed=True).sum()
845
+
846
+ tm.assert_frame_equal(result, expected)
847
+
848
+
849
+ def test_preserve_categories():
850
+ # GH-13179
851
+ categories = list("abc")
852
+
853
+ # ordered=True
854
+ df = DataFrame({"A": Categorical(list("ba"), categories=categories, ordered=True)})
855
+ sort_index = CategoricalIndex(categories, categories, ordered=True, name="A")
856
+ nosort_index = CategoricalIndex(list("bac"), categories, ordered=True, name="A")
857
+ tm.assert_index_equal(
858
+ df.groupby("A", sort=True, observed=False).first().index, sort_index
859
+ )
860
+ # GH#42482 - don't sort result when sort=False, even when ordered=True
861
+ tm.assert_index_equal(
862
+ df.groupby("A", sort=False, observed=False).first().index, nosort_index
863
+ )
864
+
865
+ # ordered=False
866
+ df = DataFrame({"A": Categorical(list("ba"), categories=categories, ordered=False)})
867
+ sort_index = CategoricalIndex(categories, categories, ordered=False, name="A")
868
+ # GH#48749 - don't change order of categories
869
+ # GH#42482 - don't sort result when sort=False, even when ordered=True
870
+ nosort_index = CategoricalIndex(list("bac"), list("abc"), ordered=False, name="A")
871
+ tm.assert_index_equal(
872
+ df.groupby("A", sort=True, observed=False).first().index, sort_index
873
+ )
874
+ tm.assert_index_equal(
875
+ df.groupby("A", sort=False, observed=False).first().index, nosort_index
876
+ )
877
+
878
+
879
+ def test_preserve_categorical_dtype():
880
+ # GH13743, GH13854
881
+ df = DataFrame(
882
+ {
883
+ "A": [1, 2, 1, 1, 2],
884
+ "B": [10, 16, 22, 28, 34],
885
+ "C1": Categorical(list("abaab"), categories=list("bac"), ordered=False),
886
+ "C2": Categorical(list("abaab"), categories=list("bac"), ordered=True),
887
+ }
888
+ )
889
+ # single grouper
890
+ exp_full = DataFrame(
891
+ {
892
+ "A": [2.0, 1.0, np.nan],
893
+ "B": [25.0, 20.0, np.nan],
894
+ "C1": Categorical(list("bac"), categories=list("bac"), ordered=False),
895
+ "C2": Categorical(list("bac"), categories=list("bac"), ordered=True),
896
+ }
897
+ )
898
+ for col in ["C1", "C2"]:
899
+ result1 = df.groupby(by=col, as_index=False, observed=False).mean(
900
+ numeric_only=True
901
+ )
902
+ result2 = (
903
+ df.groupby(by=col, as_index=True, observed=False)
904
+ .mean(numeric_only=True)
905
+ .reset_index()
906
+ )
907
+ expected = exp_full.reindex(columns=result1.columns)
908
+ tm.assert_frame_equal(result1, expected)
909
+ tm.assert_frame_equal(result2, expected)
910
+
911
+
912
+ @pytest.mark.parametrize(
913
+ "func, values",
914
+ [
915
+ ("first", ["second", "first"]),
916
+ ("last", ["fourth", "third"]),
917
+ ("min", ["fourth", "first"]),
918
+ ("max", ["second", "third"]),
919
+ ],
920
+ )
921
+ def test_preserve_on_ordered_ops(func, values):
922
+ # gh-18502
923
+ # preserve the categoricals on ops
924
+ c = Categorical(["first", "second", "third", "fourth"], ordered=True)
925
+ df = DataFrame({"payload": [-1, -2, -1, -2], "col": c})
926
+ g = df.groupby("payload")
927
+ result = getattr(g, func)()
928
+ expected = DataFrame(
929
+ {"payload": [-2, -1], "col": Series(values, dtype=c.dtype)}
930
+ ).set_index("payload")
931
+ tm.assert_frame_equal(result, expected)
932
+
933
+ # we should also preserve categorical for SeriesGroupBy
934
+ sgb = df.groupby("payload")["col"]
935
+ result = getattr(sgb, func)()
936
+ expected = expected["col"]
937
+ tm.assert_series_equal(result, expected)
938
+
939
+
940
+ def test_categorical_no_compress():
941
+ data = Series(np.random.default_rng(2).standard_normal(9))
942
+
943
+ codes = np.array([0, 0, 0, 1, 1, 1, 2, 2, 2])
944
+ cats = Categorical.from_codes(codes, [0, 1, 2], ordered=True)
945
+
946
+ result = data.groupby(cats, observed=False).mean()
947
+ exp = data.groupby(codes, observed=False).mean()
948
+
949
+ exp.index = CategoricalIndex(
950
+ exp.index, categories=cats.categories, ordered=cats.ordered
951
+ )
952
+ tm.assert_series_equal(result, exp)
953
+
954
+ codes = np.array([0, 0, 0, 1, 1, 1, 3, 3, 3])
955
+ cats = Categorical.from_codes(codes, [0, 1, 2, 3], ordered=True)
956
+
957
+ result = data.groupby(cats, observed=False).mean()
958
+ exp = data.groupby(codes, observed=False).mean().reindex(cats.categories)
959
+ exp.index = CategoricalIndex(
960
+ exp.index, categories=cats.categories, ordered=cats.ordered
961
+ )
962
+ tm.assert_series_equal(result, exp)
963
+
964
+ cats = Categorical(
965
+ ["a", "a", "a", "b", "b", "b", "c", "c", "c"],
966
+ categories=["a", "b", "c", "d"],
967
+ ordered=True,
968
+ )
969
+ data = DataFrame({"a": [1, 1, 1, 2, 2, 2, 3, 4, 5], "b": cats})
970
+
971
+ result = data.groupby("b", observed=False).mean()
972
+ result = result["a"].values
973
+ exp = np.array([1, 2, 4, np.nan])
974
+ tm.assert_numpy_array_equal(result, exp)
975
+
976
+
977
+ def test_groupby_empty_with_category():
978
+ # GH-9614
979
+ # test fix for when group by on None resulted in
980
+ # coercion of dtype categorical -> float
981
+ df = DataFrame({"A": [None] * 3, "B": Categorical(["train", "train", "test"])})
982
+ result = df.groupby("A").first()["B"]
983
+ expected = Series(
984
+ Categorical([], categories=["test", "train"]),
985
+ index=Series([], dtype="object", name="A"),
986
+ name="B",
987
+ )
988
+ tm.assert_series_equal(result, expected)
989
+
990
+
991
+ def test_sort():
992
+ # https://stackoverflow.com/questions/23814368/sorting-pandas-
993
+ # categorical-labels-after-groupby
994
+ # This should result in a properly sorted Series so that the plot
995
+ # has a sorted x axis
996
+ # self.cat.groupby(['value_group'])['value_group'].count().plot(kind='bar')
997
+
998
+ df = DataFrame({"value": np.random.default_rng(2).integers(0, 10000, 100)})
999
+ labels = [f"{i} - {i+499}" for i in range(0, 10000, 500)]
1000
+ cat_labels = Categorical(labels, labels)
1001
+
1002
+ df = df.sort_values(by=["value"], ascending=True)
1003
+ df["value_group"] = pd.cut(
1004
+ df.value, range(0, 10500, 500), right=False, labels=cat_labels
1005
+ )
1006
+
1007
+ res = df.groupby(["value_group"], observed=False)["value_group"].count()
1008
+ exp = res[sorted(res.index, key=lambda x: float(x.split()[0]))]
1009
+ exp.index = CategoricalIndex(exp.index, name=exp.index.name)
1010
+ tm.assert_series_equal(res, exp)
1011
+
1012
+
1013
+ @pytest.mark.parametrize("ordered", [True, False])
1014
+ def test_sort2(sort, ordered):
1015
+ # dataframe groupby sort was being ignored # GH 8868
1016
+ # GH#48749 - don't change order of categories
1017
+ # GH#42482 - don't sort result when sort=False, even when ordered=True
1018
+ df = DataFrame(
1019
+ [
1020
+ ["(7.5, 10]", 10, 10],
1021
+ ["(7.5, 10]", 8, 20],
1022
+ ["(2.5, 5]", 5, 30],
1023
+ ["(5, 7.5]", 6, 40],
1024
+ ["(2.5, 5]", 4, 50],
1025
+ ["(0, 2.5]", 1, 60],
1026
+ ["(5, 7.5]", 7, 70],
1027
+ ],
1028
+ columns=["range", "foo", "bar"],
1029
+ )
1030
+ df["range"] = Categorical(df["range"], ordered=ordered)
1031
+ result = df.groupby("range", sort=sort, observed=False).first()
1032
+
1033
+ if sort:
1034
+ data_values = [[1, 60], [5, 30], [6, 40], [10, 10]]
1035
+ index_values = ["(0, 2.5]", "(2.5, 5]", "(5, 7.5]", "(7.5, 10]"]
1036
+ else:
1037
+ data_values = [[10, 10], [5, 30], [6, 40], [1, 60]]
1038
+ index_values = ["(7.5, 10]", "(2.5, 5]", "(5, 7.5]", "(0, 2.5]"]
1039
+ expected = DataFrame(
1040
+ data_values,
1041
+ columns=["foo", "bar"],
1042
+ index=CategoricalIndex(index_values, name="range", ordered=ordered),
1043
+ )
1044
+
1045
+ tm.assert_frame_equal(result, expected)
1046
+
1047
+
1048
+ @pytest.mark.parametrize("ordered", [True, False])
1049
+ def test_sort_datetimelike(sort, ordered):
1050
+ # GH10505
1051
+ # GH#42482 - don't sort result when sort=False, even when ordered=True
1052
+
1053
+ # use same data as test_groupby_sort_categorical, which category is
1054
+ # corresponding to datetime.month
1055
+ df = DataFrame(
1056
+ {
1057
+ "dt": [
1058
+ datetime(2011, 7, 1),
1059
+ datetime(2011, 7, 1),
1060
+ datetime(2011, 2, 1),
1061
+ datetime(2011, 5, 1),
1062
+ datetime(2011, 2, 1),
1063
+ datetime(2011, 1, 1),
1064
+ datetime(2011, 5, 1),
1065
+ ],
1066
+ "foo": [10, 8, 5, 6, 4, 1, 7],
1067
+ "bar": [10, 20, 30, 40, 50, 60, 70],
1068
+ },
1069
+ columns=["dt", "foo", "bar"],
1070
+ )
1071
+
1072
+ # ordered=True
1073
+ df["dt"] = Categorical(df["dt"], ordered=ordered)
1074
+ if sort:
1075
+ data_values = [[1, 60], [5, 30], [6, 40], [10, 10]]
1076
+ index_values = [
1077
+ datetime(2011, 1, 1),
1078
+ datetime(2011, 2, 1),
1079
+ datetime(2011, 5, 1),
1080
+ datetime(2011, 7, 1),
1081
+ ]
1082
+ else:
1083
+ data_values = [[10, 10], [5, 30], [6, 40], [1, 60]]
1084
+ index_values = [
1085
+ datetime(2011, 7, 1),
1086
+ datetime(2011, 2, 1),
1087
+ datetime(2011, 5, 1),
1088
+ datetime(2011, 1, 1),
1089
+ ]
1090
+ expected = DataFrame(
1091
+ data_values,
1092
+ columns=["foo", "bar"],
1093
+ index=CategoricalIndex(index_values, name="dt", ordered=ordered),
1094
+ )
1095
+ result = df.groupby("dt", sort=sort, observed=False).first()
1096
+ tm.assert_frame_equal(result, expected)
1097
+
1098
+
1099
+ def test_empty_sum():
1100
+ # https://github.com/pandas-dev/pandas/issues/18678
1101
+ df = DataFrame(
1102
+ {"A": Categorical(["a", "a", "b"], categories=["a", "b", "c"]), "B": [1, 2, 1]}
1103
+ )
1104
+ expected_idx = CategoricalIndex(["a", "b", "c"], name="A")
1105
+
1106
+ # 0 by default
1107
+ result = df.groupby("A", observed=False).B.sum()
1108
+ expected = Series([3, 1, 0], expected_idx, name="B")
1109
+ tm.assert_series_equal(result, expected)
1110
+
1111
+ # min_count=0
1112
+ result = df.groupby("A", observed=False).B.sum(min_count=0)
1113
+ expected = Series([3, 1, 0], expected_idx, name="B")
1114
+ tm.assert_series_equal(result, expected)
1115
+
1116
+ # min_count=1
1117
+ result = df.groupby("A", observed=False).B.sum(min_count=1)
1118
+ expected = Series([3, 1, np.nan], expected_idx, name="B")
1119
+ tm.assert_series_equal(result, expected)
1120
+
1121
+ # min_count>1
1122
+ result = df.groupby("A", observed=False).B.sum(min_count=2)
1123
+ expected = Series([3, np.nan, np.nan], expected_idx, name="B")
1124
+ tm.assert_series_equal(result, expected)
1125
+
1126
+
1127
+ def test_empty_prod():
1128
+ # https://github.com/pandas-dev/pandas/issues/18678
1129
+ df = DataFrame(
1130
+ {"A": Categorical(["a", "a", "b"], categories=["a", "b", "c"]), "B": [1, 2, 1]}
1131
+ )
1132
+
1133
+ expected_idx = CategoricalIndex(["a", "b", "c"], name="A")
1134
+
1135
+ # 1 by default
1136
+ result = df.groupby("A", observed=False).B.prod()
1137
+ expected = Series([2, 1, 1], expected_idx, name="B")
1138
+ tm.assert_series_equal(result, expected)
1139
+
1140
+ # min_count=0
1141
+ result = df.groupby("A", observed=False).B.prod(min_count=0)
1142
+ expected = Series([2, 1, 1], expected_idx, name="B")
1143
+ tm.assert_series_equal(result, expected)
1144
+
1145
+ # min_count=1
1146
+ result = df.groupby("A", observed=False).B.prod(min_count=1)
1147
+ expected = Series([2, 1, np.nan], expected_idx, name="B")
1148
+ tm.assert_series_equal(result, expected)
1149
+
1150
+
1151
+ def test_groupby_multiindex_categorical_datetime():
1152
+ # https://github.com/pandas-dev/pandas/issues/21390
1153
+
1154
+ df = DataFrame(
1155
+ {
1156
+ "key1": Categorical(list("abcbabcba")),
1157
+ "key2": Categorical(
1158
+ list(pd.date_range("2018-06-01 00", freq="1min", periods=3)) * 3
1159
+ ),
1160
+ "values": np.arange(9),
1161
+ }
1162
+ )
1163
+ result = df.groupby(["key1", "key2"], observed=False).mean()
1164
+
1165
+ idx = MultiIndex.from_product(
1166
+ [
1167
+ Categorical(["a", "b", "c"]),
1168
+ Categorical(pd.date_range("2018-06-01 00", freq="1min", periods=3)),
1169
+ ],
1170
+ names=["key1", "key2"],
1171
+ )
1172
+ expected = DataFrame({"values": [0, 4, 8, 3, 4, 5, 6, np.nan, 2]}, index=idx)
1173
+ tm.assert_frame_equal(result, expected)
1174
+
1175
+
1176
+ @pytest.mark.parametrize(
1177
+ "as_index, expected",
1178
+ [
1179
+ (
1180
+ True,
1181
+ Series(
1182
+ index=MultiIndex.from_arrays(
1183
+ [Series([1, 1, 2], dtype="category"), [1, 2, 2]], names=["a", "b"]
1184
+ ),
1185
+ data=[1, 2, 3],
1186
+ name="x",
1187
+ ),
1188
+ ),
1189
+ (
1190
+ False,
1191
+ DataFrame(
1192
+ {
1193
+ "a": Series([1, 1, 2], dtype="category"),
1194
+ "b": [1, 2, 2],
1195
+ "x": [1, 2, 3],
1196
+ }
1197
+ ),
1198
+ ),
1199
+ ],
1200
+ )
1201
+ def test_groupby_agg_observed_true_single_column(as_index, expected):
1202
+ # GH-23970
1203
+ df = DataFrame(
1204
+ {"a": Series([1, 1, 2], dtype="category"), "b": [1, 2, 2], "x": [1, 2, 3]}
1205
+ )
1206
+
1207
+ result = df.groupby(["a", "b"], as_index=as_index, observed=True)["x"].sum()
1208
+
1209
+ tm.assert_equal(result, expected)
1210
+
1211
+
1212
+ @pytest.mark.parametrize("fill_value", [None, np.nan, pd.NaT])
1213
+ def test_shift(fill_value):
1214
+ ct = Categorical(
1215
+ ["a", "b", "c", "d"], categories=["a", "b", "c", "d"], ordered=False
1216
+ )
1217
+ expected = Categorical(
1218
+ [None, "a", "b", "c"], categories=["a", "b", "c", "d"], ordered=False
1219
+ )
1220
+ res = ct.shift(1, fill_value=fill_value)
1221
+ tm.assert_equal(res, expected)
1222
+
1223
+
1224
+ @pytest.fixture
1225
+ def df_cat(df):
1226
+ """
1227
+ DataFrame with multiple categorical columns and a column of integers.
1228
+ Shortened so as not to contain all possible combinations of categories.
1229
+ Useful for testing `observed` kwarg functionality on GroupBy objects.
1230
+
1231
+ Parameters
1232
+ ----------
1233
+ df: DataFrame
1234
+ Non-categorical, longer DataFrame from another fixture, used to derive
1235
+ this one
1236
+
1237
+ Returns
1238
+ -------
1239
+ df_cat: DataFrame
1240
+ """
1241
+ df_cat = df.copy()[:4] # leave out some groups
1242
+ df_cat["A"] = df_cat["A"].astype("category")
1243
+ df_cat["B"] = df_cat["B"].astype("category")
1244
+ df_cat["C"] = Series([1, 2, 3, 4])
1245
+ df_cat = df_cat.drop(["D"], axis=1)
1246
+ return df_cat
1247
+
1248
+
1249
+ @pytest.mark.parametrize("operation", ["agg", "apply"])
1250
+ def test_seriesgroupby_observed_true(df_cat, operation):
1251
+ # GH#24880
1252
+ # GH#49223 - order of results was wrong when grouping by index levels
1253
+ lev_a = Index(["bar", "bar", "foo", "foo"], dtype=df_cat["A"].dtype, name="A")
1254
+ lev_b = Index(["one", "three", "one", "two"], dtype=df_cat["B"].dtype, name="B")
1255
+ index = MultiIndex.from_arrays([lev_a, lev_b])
1256
+ expected = Series(data=[2, 4, 1, 3], index=index, name="C").sort_index()
1257
+
1258
+ grouped = df_cat.groupby(["A", "B"], observed=True)["C"]
1259
+ msg = "using np.sum" if operation == "apply" else "using SeriesGroupBy.sum"
1260
+ with tm.assert_produces_warning(FutureWarning, match=msg):
1261
+ # GH#53425
1262
+ result = getattr(grouped, operation)(sum)
1263
+ tm.assert_series_equal(result, expected)
1264
+
1265
+
1266
+ @pytest.mark.parametrize("operation", ["agg", "apply"])
1267
+ @pytest.mark.parametrize("observed", [False, None])
1268
+ def test_seriesgroupby_observed_false_or_none(df_cat, observed, operation):
1269
+ # GH 24880
1270
+ # GH#49223 - order of results was wrong when grouping by index levels
1271
+ index, _ = MultiIndex.from_product(
1272
+ [
1273
+ CategoricalIndex(["bar", "foo"], ordered=False),
1274
+ CategoricalIndex(["one", "three", "two"], ordered=False),
1275
+ ],
1276
+ names=["A", "B"],
1277
+ ).sortlevel()
1278
+
1279
+ expected = Series(data=[2, 4, np.nan, 1, np.nan, 3], index=index, name="C")
1280
+ if operation == "agg":
1281
+ msg = "The 'downcast' keyword in fillna is deprecated"
1282
+ with tm.assert_produces_warning(FutureWarning, match=msg):
1283
+ expected = expected.fillna(0, downcast="infer")
1284
+ grouped = df_cat.groupby(["A", "B"], observed=observed)["C"]
1285
+ msg = "using SeriesGroupBy.sum" if operation == "agg" else "using np.sum"
1286
+ with tm.assert_produces_warning(FutureWarning, match=msg):
1287
+ # GH#53425
1288
+ result = getattr(grouped, operation)(sum)
1289
+ tm.assert_series_equal(result, expected)
1290
+
1291
+
1292
+ @pytest.mark.parametrize(
1293
+ "observed, index, data",
1294
+ [
1295
+ (
1296
+ True,
1297
+ MultiIndex.from_arrays(
1298
+ [
1299
+ Index(["bar"] * 4 + ["foo"] * 4, dtype="category", name="A"),
1300
+ Index(
1301
+ ["one", "one", "three", "three", "one", "one", "two", "two"],
1302
+ dtype="category",
1303
+ name="B",
1304
+ ),
1305
+ Index(["min", "max"] * 4),
1306
+ ]
1307
+ ),
1308
+ [2, 2, 4, 4, 1, 1, 3, 3],
1309
+ ),
1310
+ (
1311
+ False,
1312
+ MultiIndex.from_product(
1313
+ [
1314
+ CategoricalIndex(["bar", "foo"], ordered=False),
1315
+ CategoricalIndex(["one", "three", "two"], ordered=False),
1316
+ Index(["min", "max"]),
1317
+ ],
1318
+ names=["A", "B", None],
1319
+ ),
1320
+ [2, 2, 4, 4, np.nan, np.nan, 1, 1, np.nan, np.nan, 3, 3],
1321
+ ),
1322
+ (
1323
+ None,
1324
+ MultiIndex.from_product(
1325
+ [
1326
+ CategoricalIndex(["bar", "foo"], ordered=False),
1327
+ CategoricalIndex(["one", "three", "two"], ordered=False),
1328
+ Index(["min", "max"]),
1329
+ ],
1330
+ names=["A", "B", None],
1331
+ ),
1332
+ [2, 2, 4, 4, np.nan, np.nan, 1, 1, np.nan, np.nan, 3, 3],
1333
+ ),
1334
+ ],
1335
+ )
1336
+ def test_seriesgroupby_observed_apply_dict(df_cat, observed, index, data):
1337
+ # GH 24880
1338
+ expected = Series(data=data, index=index, name="C")
1339
+ result = df_cat.groupby(["A", "B"], observed=observed)["C"].apply(
1340
+ lambda x: {"min": x.min(), "max": x.max()}
1341
+ )
1342
+ tm.assert_series_equal(result, expected)
1343
+
1344
+
1345
+ def test_groupby_categorical_series_dataframe_consistent(df_cat):
1346
+ # GH 20416
1347
+ expected = df_cat.groupby(["A", "B"], observed=False)["C"].mean()
1348
+ result = df_cat.groupby(["A", "B"], observed=False).mean()["C"]
1349
+ tm.assert_series_equal(result, expected)
1350
+
1351
+
1352
+ @pytest.mark.parametrize("code", [([1, 0, 0]), ([0, 0, 0])])
1353
+ def test_groupby_categorical_axis_1(code):
1354
+ # GH 13420
1355
+ df = DataFrame({"a": [1, 2, 3, 4], "b": [-1, -2, -3, -4], "c": [5, 6, 7, 8]})
1356
+ cat = Categorical.from_codes(code, categories=list("abc"))
1357
+ msg = "DataFrame.groupby with axis=1 is deprecated"
1358
+ with tm.assert_produces_warning(FutureWarning, match=msg):
1359
+ gb = df.groupby(cat, axis=1, observed=False)
1360
+ result = gb.mean()
1361
+ msg = "The 'axis' keyword in DataFrame.groupby is deprecated"
1362
+ with tm.assert_produces_warning(FutureWarning, match=msg):
1363
+ gb2 = df.T.groupby(cat, axis=0, observed=False)
1364
+ expected = gb2.mean().T
1365
+ tm.assert_frame_equal(result, expected)
1366
+
1367
+
1368
+ def test_groupby_cat_preserves_structure(observed, ordered):
1369
+ # GH 28787
1370
+ df = DataFrame(
1371
+ {"Name": Categorical(["Bob", "Greg"], ordered=ordered), "Item": [1, 2]},
1372
+ columns=["Name", "Item"],
1373
+ )
1374
+ expected = df.copy()
1375
+
1376
+ result = (
1377
+ df.groupby("Name", observed=observed)
1378
+ .agg(DataFrame.sum, skipna=True)
1379
+ .reset_index()
1380
+ )
1381
+
1382
+ tm.assert_frame_equal(result, expected)
1383
+
1384
+
1385
+ def test_get_nonexistent_category():
1386
+ # Accessing a Category that is not in the dataframe
1387
+ df = DataFrame({"var": ["a", "a", "b", "b"], "val": range(4)})
1388
+ with pytest.raises(KeyError, match="'vau'"):
1389
+ df.groupby("var").apply(
1390
+ lambda rows: DataFrame(
1391
+ {"var": [rows.iloc[-1]["var"]], "val": [rows.iloc[-1]["vau"]]}
1392
+ )
1393
+ )
1394
+
1395
+
1396
+ def test_series_groupby_on_2_categoricals_unobserved(reduction_func, observed):
1397
+ # GH 17605
1398
+ if reduction_func == "ngroup":
1399
+ pytest.skip("ngroup is not truly a reduction")
1400
+
1401
+ df = DataFrame(
1402
+ {
1403
+ "cat_1": Categorical(list("AABB"), categories=list("ABCD")),
1404
+ "cat_2": Categorical(list("AB") * 2, categories=list("ABCD")),
1405
+ "value": [0.1] * 4,
1406
+ }
1407
+ )
1408
+ args = get_groupby_method_args(reduction_func, df)
1409
+
1410
+ expected_length = 4 if observed else 16
1411
+
1412
+ series_groupby = df.groupby(["cat_1", "cat_2"], observed=observed)["value"]
1413
+
1414
+ if reduction_func == "corrwith":
1415
+ # TODO: implemented SeriesGroupBy.corrwith. See GH 32293
1416
+ assert not hasattr(series_groupby, reduction_func)
1417
+ return
1418
+
1419
+ agg = getattr(series_groupby, reduction_func)
1420
+
1421
+ if not observed and reduction_func in ["idxmin", "idxmax"]:
1422
+ # idxmin and idxmax are designed to fail on empty inputs
1423
+ with pytest.raises(
1424
+ ValueError, match="empty group due to unobserved categories"
1425
+ ):
1426
+ agg(*args)
1427
+ return
1428
+
1429
+ result = agg(*args)
1430
+
1431
+ assert len(result) == expected_length
1432
+
1433
+
1434
+ def test_series_groupby_on_2_categoricals_unobserved_zeroes_or_nans(
1435
+ reduction_func, request
1436
+ ):
1437
+ # GH 17605
1438
+ # Tests whether the unobserved categories in the result contain 0 or NaN
1439
+
1440
+ if reduction_func == "ngroup":
1441
+ pytest.skip("ngroup is not truly a reduction")
1442
+
1443
+ if reduction_func == "corrwith": # GH 32293
1444
+ mark = pytest.mark.xfail(
1445
+ reason="TODO: implemented SeriesGroupBy.corrwith. See GH 32293"
1446
+ )
1447
+ request.applymarker(mark)
1448
+
1449
+ df = DataFrame(
1450
+ {
1451
+ "cat_1": Categorical(list("AABB"), categories=list("ABC")),
1452
+ "cat_2": Categorical(list("AB") * 2, categories=list("ABC")),
1453
+ "value": [0.1] * 4,
1454
+ }
1455
+ )
1456
+ unobserved = [tuple("AC"), tuple("BC"), tuple("CA"), tuple("CB"), tuple("CC")]
1457
+ args = get_groupby_method_args(reduction_func, df)
1458
+
1459
+ series_groupby = df.groupby(["cat_1", "cat_2"], observed=False)["value"]
1460
+ agg = getattr(series_groupby, reduction_func)
1461
+
1462
+ if reduction_func in ["idxmin", "idxmax"]:
1463
+ # idxmin and idxmax are designed to fail on empty inputs
1464
+ with pytest.raises(
1465
+ ValueError, match="empty group due to unobserved categories"
1466
+ ):
1467
+ agg(*args)
1468
+ return
1469
+
1470
+ result = agg(*args)
1471
+
1472
+ zero_or_nan = _results_for_groupbys_with_missing_categories[reduction_func]
1473
+
1474
+ for idx in unobserved:
1475
+ val = result.loc[idx]
1476
+ assert (pd.isna(zero_or_nan) and pd.isna(val)) or (val == zero_or_nan)
1477
+
1478
+ # If we expect unobserved values to be zero, we also expect the dtype to be int.
1479
+ # Except for .sum(). If the observed categories sum to dtype=float (i.e. their
1480
+ # sums have decimals), then the zeros for the missing categories should also be
1481
+ # floats.
1482
+ if zero_or_nan == 0 and reduction_func != "sum":
1483
+ assert np.issubdtype(result.dtype, np.integer)
1484
+
1485
+
1486
+ def test_dataframe_groupby_on_2_categoricals_when_observed_is_true(reduction_func):
1487
+ # GH 23865
1488
+ # GH 27075
1489
+ # Ensure that df.groupby, when 'by' is two Categorical variables,
1490
+ # does not return the categories that are not in df when observed=True
1491
+ if reduction_func == "ngroup":
1492
+ pytest.skip("ngroup does not return the Categories on the index")
1493
+
1494
+ df = DataFrame(
1495
+ {
1496
+ "cat_1": Categorical(list("AABB"), categories=list("ABC")),
1497
+ "cat_2": Categorical(list("1111"), categories=list("12")),
1498
+ "value": [0.1, 0.1, 0.1, 0.1],
1499
+ }
1500
+ )
1501
+ unobserved_cats = [("A", "2"), ("B", "2"), ("C", "1"), ("C", "2")]
1502
+
1503
+ df_grp = df.groupby(["cat_1", "cat_2"], observed=True)
1504
+
1505
+ args = get_groupby_method_args(reduction_func, df)
1506
+ res = getattr(df_grp, reduction_func)(*args)
1507
+
1508
+ for cat in unobserved_cats:
1509
+ assert cat not in res.index
1510
+
1511
+
1512
+ @pytest.mark.parametrize("observed", [False, None])
1513
+ def test_dataframe_groupby_on_2_categoricals_when_observed_is_false(
1514
+ reduction_func, observed
1515
+ ):
1516
+ # GH 23865
1517
+ # GH 27075
1518
+ # Ensure that df.groupby, when 'by' is two Categorical variables,
1519
+ # returns the categories that are not in df when observed=False/None
1520
+
1521
+ if reduction_func == "ngroup":
1522
+ pytest.skip("ngroup does not return the Categories on the index")
1523
+
1524
+ df = DataFrame(
1525
+ {
1526
+ "cat_1": Categorical(list("AABB"), categories=list("ABC")),
1527
+ "cat_2": Categorical(list("1111"), categories=list("12")),
1528
+ "value": [0.1, 0.1, 0.1, 0.1],
1529
+ }
1530
+ )
1531
+ unobserved_cats = [("A", "2"), ("B", "2"), ("C", "1"), ("C", "2")]
1532
+
1533
+ df_grp = df.groupby(["cat_1", "cat_2"], observed=observed)
1534
+
1535
+ args = get_groupby_method_args(reduction_func, df)
1536
+
1537
+ if not observed and reduction_func in ["idxmin", "idxmax"]:
1538
+ # idxmin and idxmax are designed to fail on empty inputs
1539
+ with pytest.raises(
1540
+ ValueError, match="empty group due to unobserved categories"
1541
+ ):
1542
+ getattr(df_grp, reduction_func)(*args)
1543
+ return
1544
+
1545
+ res = getattr(df_grp, reduction_func)(*args)
1546
+
1547
+ expected = _results_for_groupbys_with_missing_categories[reduction_func]
1548
+
1549
+ if expected is np.nan:
1550
+ assert res.loc[unobserved_cats].isnull().all().all()
1551
+ else:
1552
+ assert (res.loc[unobserved_cats] == expected).all().all()
1553
+
1554
+
1555
+ def test_series_groupby_categorical_aggregation_getitem():
1556
+ # GH 8870
1557
+ d = {"foo": [10, 8, 4, 1], "bar": [10, 20, 30, 40], "baz": ["d", "c", "d", "c"]}
1558
+ df = DataFrame(d)
1559
+ cat = pd.cut(df["foo"], np.linspace(0, 20, 5))
1560
+ df["range"] = cat
1561
+ groups = df.groupby(["range", "baz"], as_index=True, sort=True, observed=False)
1562
+ result = groups["foo"].agg("mean")
1563
+ expected = groups.agg("mean")["foo"]
1564
+ tm.assert_series_equal(result, expected)
1565
+
1566
+
1567
+ @pytest.mark.parametrize(
1568
+ "func, expected_values",
1569
+ [(Series.nunique, [1, 1, 2]), (Series.count, [1, 2, 2])],
1570
+ )
1571
+ def test_groupby_agg_categorical_columns(func, expected_values):
1572
+ # 31256
1573
+ df = DataFrame(
1574
+ {
1575
+ "id": [0, 1, 2, 3, 4],
1576
+ "groups": [0, 1, 1, 2, 2],
1577
+ "value": Categorical([0, 0, 0, 0, 1]),
1578
+ }
1579
+ ).set_index("id")
1580
+ result = df.groupby("groups").agg(func)
1581
+
1582
+ expected = DataFrame(
1583
+ {"value": expected_values}, index=Index([0, 1, 2], name="groups")
1584
+ )
1585
+ tm.assert_frame_equal(result, expected)
1586
+
1587
+
1588
+ def test_groupby_agg_non_numeric():
1589
+ df = DataFrame({"A": Categorical(["a", "a", "b"], categories=["a", "b", "c"])})
1590
+ expected = DataFrame({"A": [2, 1]}, index=np.array([1, 2]))
1591
+
1592
+ result = df.groupby([1, 2, 1]).agg(Series.nunique)
1593
+ tm.assert_frame_equal(result, expected)
1594
+
1595
+ result = df.groupby([1, 2, 1]).nunique()
1596
+ tm.assert_frame_equal(result, expected)
1597
+
1598
+
1599
+ @pytest.mark.parametrize("func", ["first", "last"])
1600
+ def test_groupby_first_returned_categorical_instead_of_dataframe(func):
1601
+ # GH 28641: groupby drops index, when grouping over categorical column with
1602
+ # first/last. Renamed Categorical instead of DataFrame previously.
1603
+ df = DataFrame({"A": [1997], "B": Series(["b"], dtype="category").cat.as_ordered()})
1604
+ df_grouped = df.groupby("A")["B"]
1605
+ result = getattr(df_grouped, func)()
1606
+
1607
+ # ordered categorical dtype should be preserved
1608
+ expected = Series(
1609
+ ["b"], index=Index([1997], name="A"), name="B", dtype=df["B"].dtype
1610
+ )
1611
+ tm.assert_series_equal(result, expected)
1612
+
1613
+
1614
+ def test_read_only_category_no_sort():
1615
+ # GH33410
1616
+ cats = np.array([1, 2])
1617
+ cats.flags.writeable = False
1618
+ df = DataFrame(
1619
+ {"a": [1, 3, 5, 7], "b": Categorical([1, 1, 2, 2], categories=Index(cats))}
1620
+ )
1621
+ expected = DataFrame(data={"a": [2.0, 6.0]}, index=CategoricalIndex(cats, name="b"))
1622
+ result = df.groupby("b", sort=False, observed=False).mean()
1623
+ tm.assert_frame_equal(result, expected)
1624
+
1625
+
1626
+ def test_sorted_missing_category_values():
1627
+ # GH 28597
1628
+ df = DataFrame(
1629
+ {
1630
+ "foo": [
1631
+ "small",
1632
+ "large",
1633
+ "large",
1634
+ "large",
1635
+ "medium",
1636
+ "large",
1637
+ "large",
1638
+ "medium",
1639
+ ],
1640
+ "bar": ["C", "A", "A", "C", "A", "C", "A", "C"],
1641
+ }
1642
+ )
1643
+ df["foo"] = (
1644
+ df["foo"]
1645
+ .astype("category")
1646
+ .cat.set_categories(["tiny", "small", "medium", "large"], ordered=True)
1647
+ )
1648
+
1649
+ expected = DataFrame(
1650
+ {
1651
+ "tiny": {"A": 0, "C": 0},
1652
+ "small": {"A": 0, "C": 1},
1653
+ "medium": {"A": 1, "C": 1},
1654
+ "large": {"A": 3, "C": 2},
1655
+ }
1656
+ )
1657
+ expected = expected.rename_axis("bar", axis="index")
1658
+ expected.columns = CategoricalIndex(
1659
+ ["tiny", "small", "medium", "large"],
1660
+ categories=["tiny", "small", "medium", "large"],
1661
+ ordered=True,
1662
+ name="foo",
1663
+ dtype="category",
1664
+ )
1665
+
1666
+ result = df.groupby(["bar", "foo"], observed=False).size().unstack()
1667
+
1668
+ tm.assert_frame_equal(result, expected)
1669
+
1670
+
1671
+ def test_agg_cython_category_not_implemented_fallback():
1672
+ # https://github.com/pandas-dev/pandas/issues/31450
1673
+ df = DataFrame({"col_num": [1, 1, 2, 3]})
1674
+ df["col_cat"] = df["col_num"].astype("category")
1675
+
1676
+ result = df.groupby("col_num").col_cat.first()
1677
+
1678
+ # ordered categorical dtype should definitely be preserved;
1679
+ # this is unordered, so is less-clear case (if anything, it should raise)
1680
+ expected = Series(
1681
+ [1, 2, 3],
1682
+ index=Index([1, 2, 3], name="col_num"),
1683
+ name="col_cat",
1684
+ dtype=df["col_cat"].dtype,
1685
+ )
1686
+ tm.assert_series_equal(result, expected)
1687
+
1688
+ result = df.groupby("col_num").agg({"col_cat": "first"})
1689
+ expected = expected.to_frame()
1690
+ tm.assert_frame_equal(result, expected)
1691
+
1692
+
1693
+ def test_aggregate_categorical_with_isnan():
1694
+ # GH 29837
1695
+ df = DataFrame(
1696
+ {
1697
+ "A": [1, 1, 1, 1],
1698
+ "B": [1, 2, 1, 2],
1699
+ "numerical_col": [0.1, 0.2, np.nan, 0.3],
1700
+ "object_col": ["foo", "bar", "foo", "fee"],
1701
+ "categorical_col": ["foo", "bar", "foo", "fee"],
1702
+ }
1703
+ )
1704
+
1705
+ df = df.astype({"categorical_col": "category"})
1706
+
1707
+ result = df.groupby(["A", "B"]).agg(lambda df: df.isna().sum())
1708
+ index = MultiIndex.from_arrays([[1, 1], [1, 2]], names=("A", "B"))
1709
+ expected = DataFrame(
1710
+ data={
1711
+ "numerical_col": [1, 0],
1712
+ "object_col": [0, 0],
1713
+ "categorical_col": [0, 0],
1714
+ },
1715
+ index=index,
1716
+ )
1717
+ tm.assert_frame_equal(result, expected)
1718
+
1719
+
1720
+ def test_categorical_transform():
1721
+ # GH 29037
1722
+ df = DataFrame(
1723
+ {
1724
+ "package_id": [1, 1, 1, 2, 2, 3],
1725
+ "status": [
1726
+ "Waiting",
1727
+ "OnTheWay",
1728
+ "Delivered",
1729
+ "Waiting",
1730
+ "OnTheWay",
1731
+ "Waiting",
1732
+ ],
1733
+ }
1734
+ )
1735
+
1736
+ delivery_status_type = pd.CategoricalDtype(
1737
+ categories=["Waiting", "OnTheWay", "Delivered"], ordered=True
1738
+ )
1739
+ df["status"] = df["status"].astype(delivery_status_type)
1740
+ msg = "using SeriesGroupBy.max"
1741
+ with tm.assert_produces_warning(FutureWarning, match=msg):
1742
+ # GH#53425
1743
+ df["last_status"] = df.groupby("package_id")["status"].transform(max)
1744
+ result = df.copy()
1745
+
1746
+ expected = DataFrame(
1747
+ {
1748
+ "package_id": [1, 1, 1, 2, 2, 3],
1749
+ "status": [
1750
+ "Waiting",
1751
+ "OnTheWay",
1752
+ "Delivered",
1753
+ "Waiting",
1754
+ "OnTheWay",
1755
+ "Waiting",
1756
+ ],
1757
+ "last_status": [
1758
+ "Delivered",
1759
+ "Delivered",
1760
+ "Delivered",
1761
+ "OnTheWay",
1762
+ "OnTheWay",
1763
+ "Waiting",
1764
+ ],
1765
+ }
1766
+ )
1767
+
1768
+ expected["status"] = expected["status"].astype(delivery_status_type)
1769
+
1770
+ # .transform(max) should preserve ordered categoricals
1771
+ expected["last_status"] = expected["last_status"].astype(delivery_status_type)
1772
+
1773
+ tm.assert_frame_equal(result, expected)
1774
+
1775
+
1776
+ @pytest.mark.parametrize("func", ["first", "last"])
1777
+ def test_series_groupby_first_on_categorical_col_grouped_on_2_categoricals(
1778
+ func: str, observed: bool
1779
+ ):
1780
+ # GH 34951
1781
+ cat = Categorical([0, 0, 1, 1])
1782
+ val = [0, 1, 1, 0]
1783
+ df = DataFrame({"a": cat, "b": cat, "c": val})
1784
+
1785
+ cat2 = Categorical([0, 1])
1786
+ idx = MultiIndex.from_product([cat2, cat2], names=["a", "b"])
1787
+ expected_dict = {
1788
+ "first": Series([0, np.nan, np.nan, 1], idx, name="c"),
1789
+ "last": Series([1, np.nan, np.nan, 0], idx, name="c"),
1790
+ }
1791
+
1792
+ expected = expected_dict[func]
1793
+ if observed:
1794
+ expected = expected.dropna().astype(np.int64)
1795
+
1796
+ srs_grp = df.groupby(["a", "b"], observed=observed)["c"]
1797
+ result = getattr(srs_grp, func)()
1798
+ tm.assert_series_equal(result, expected)
1799
+
1800
+
1801
+ @pytest.mark.parametrize("func", ["first", "last"])
1802
+ def test_df_groupby_first_on_categorical_col_grouped_on_2_categoricals(
1803
+ func: str, observed: bool
1804
+ ):
1805
+ # GH 34951
1806
+ cat = Categorical([0, 0, 1, 1])
1807
+ val = [0, 1, 1, 0]
1808
+ df = DataFrame({"a": cat, "b": cat, "c": val})
1809
+
1810
+ cat2 = Categorical([0, 1])
1811
+ idx = MultiIndex.from_product([cat2, cat2], names=["a", "b"])
1812
+ expected_dict = {
1813
+ "first": Series([0, np.nan, np.nan, 1], idx, name="c"),
1814
+ "last": Series([1, np.nan, np.nan, 0], idx, name="c"),
1815
+ }
1816
+
1817
+ expected = expected_dict[func].to_frame()
1818
+ if observed:
1819
+ expected = expected.dropna().astype(np.int64)
1820
+
1821
+ df_grp = df.groupby(["a", "b"], observed=observed)
1822
+ result = getattr(df_grp, func)()
1823
+ tm.assert_frame_equal(result, expected)
1824
+
1825
+
1826
+ def test_groupby_categorical_indices_unused_categories():
1827
+ # GH#38642
1828
+ df = DataFrame(
1829
+ {
1830
+ "key": Categorical(["b", "b", "a"], categories=["a", "b", "c"]),
1831
+ "col": range(3),
1832
+ }
1833
+ )
1834
+ grouped = df.groupby("key", sort=False, observed=False)
1835
+ result = grouped.indices
1836
+ expected = {
1837
+ "b": np.array([0, 1], dtype="intp"),
1838
+ "a": np.array([2], dtype="intp"),
1839
+ "c": np.array([], dtype="intp"),
1840
+ }
1841
+ assert result.keys() == expected.keys()
1842
+ for key in result.keys():
1843
+ tm.assert_numpy_array_equal(result[key], expected[key])
1844
+
1845
+
1846
+ @pytest.mark.parametrize("func", ["first", "last"])
1847
+ def test_groupby_last_first_preserve_categoricaldtype(func):
1848
+ # GH#33090
1849
+ df = DataFrame({"a": [1, 2, 3]})
1850
+ df["b"] = df["a"].astype("category")
1851
+ result = getattr(df.groupby("a")["b"], func)()
1852
+ expected = Series(
1853
+ Categorical([1, 2, 3]), name="b", index=Index([1, 2, 3], name="a")
1854
+ )
1855
+ tm.assert_series_equal(expected, result)
1856
+
1857
+
1858
+ def test_groupby_categorical_observed_nunique():
1859
+ # GH#45128
1860
+ df = DataFrame({"a": [1, 2], "b": [1, 2], "c": [10, 11]})
1861
+ df = df.astype(dtype={"a": "category", "b": "category"})
1862
+ result = df.groupby(["a", "b"], observed=True).nunique()["c"]
1863
+ expected = Series(
1864
+ [1, 1],
1865
+ index=MultiIndex.from_arrays(
1866
+ [CategoricalIndex([1, 2], name="a"), CategoricalIndex([1, 2], name="b")]
1867
+ ),
1868
+ name="c",
1869
+ )
1870
+ tm.assert_series_equal(result, expected)
1871
+
1872
+
1873
+ def test_groupby_categorical_aggregate_functions():
1874
+ # GH#37275
1875
+ dtype = pd.CategoricalDtype(categories=["small", "big"], ordered=True)
1876
+ df = DataFrame(
1877
+ [[1, "small"], [1, "big"], [2, "small"]], columns=["grp", "description"]
1878
+ ).astype({"description": dtype})
1879
+
1880
+ result = df.groupby("grp")["description"].max()
1881
+ expected = Series(
1882
+ ["big", "small"],
1883
+ index=Index([1, 2], name="grp"),
1884
+ name="description",
1885
+ dtype=pd.CategoricalDtype(categories=["small", "big"], ordered=True),
1886
+ )
1887
+
1888
+ tm.assert_series_equal(result, expected)
1889
+
1890
+
1891
+ def test_groupby_categorical_dropna(observed, dropna):
1892
+ # GH#48645 - dropna should have no impact on the result when there are no NA values
1893
+ cat = Categorical([1, 2], categories=[1, 2, 3])
1894
+ df = DataFrame({"x": Categorical([1, 2], categories=[1, 2, 3]), "y": [3, 4]})
1895
+ gb = df.groupby("x", observed=observed, dropna=dropna)
1896
+ result = gb.sum()
1897
+
1898
+ if observed:
1899
+ expected = DataFrame({"y": [3, 4]}, index=cat)
1900
+ else:
1901
+ index = CategoricalIndex([1, 2, 3], [1, 2, 3])
1902
+ expected = DataFrame({"y": [3, 4, 0]}, index=index)
1903
+ expected.index.name = "x"
1904
+
1905
+ tm.assert_frame_equal(result, expected)
1906
+
1907
+
1908
+ @pytest.mark.parametrize("index_kind", ["range", "single", "multi"])
1909
+ @pytest.mark.parametrize("ordered", [True, False])
1910
+ def test_category_order_reducer(
1911
+ request, as_index, sort, observed, reduction_func, index_kind, ordered
1912
+ ):
1913
+ # GH#48749
1914
+ if reduction_func == "corrwith" and not as_index:
1915
+ msg = "GH#49950 - corrwith with as_index=False may not have grouping column"
1916
+ request.applymarker(pytest.mark.xfail(reason=msg))
1917
+ elif index_kind != "range" and not as_index:
1918
+ pytest.skip(reason="Result doesn't have categories, nothing to test")
1919
+ df = DataFrame(
1920
+ {
1921
+ "a": Categorical([2, 1, 2, 3], categories=[1, 4, 3, 2], ordered=ordered),
1922
+ "b": range(4),
1923
+ }
1924
+ )
1925
+ if index_kind == "range":
1926
+ keys = ["a"]
1927
+ elif index_kind == "single":
1928
+ keys = ["a"]
1929
+ df = df.set_index(keys)
1930
+ elif index_kind == "multi":
1931
+ keys = ["a", "a2"]
1932
+ df["a2"] = df["a"]
1933
+ df = df.set_index(keys)
1934
+ args = get_groupby_method_args(reduction_func, df)
1935
+ gb = df.groupby(keys, as_index=as_index, sort=sort, observed=observed)
1936
+
1937
+ if not observed and reduction_func in ["idxmin", "idxmax"]:
1938
+ # idxmin and idxmax are designed to fail on empty inputs
1939
+ with pytest.raises(
1940
+ ValueError, match="empty group due to unobserved categories"
1941
+ ):
1942
+ getattr(gb, reduction_func)(*args)
1943
+ return
1944
+
1945
+ op_result = getattr(gb, reduction_func)(*args)
1946
+ if as_index:
1947
+ result = op_result.index.get_level_values("a").categories
1948
+ else:
1949
+ result = op_result["a"].cat.categories
1950
+ expected = Index([1, 4, 3, 2])
1951
+ tm.assert_index_equal(result, expected)
1952
+
1953
+ if index_kind == "multi":
1954
+ result = op_result.index.get_level_values("a2").categories
1955
+ tm.assert_index_equal(result, expected)
1956
+
1957
+
1958
+ @pytest.mark.parametrize("index_kind", ["single", "multi"])
1959
+ @pytest.mark.parametrize("ordered", [True, False])
1960
+ def test_category_order_transformer(
1961
+ as_index, sort, observed, transformation_func, index_kind, ordered
1962
+ ):
1963
+ # GH#48749
1964
+ df = DataFrame(
1965
+ {
1966
+ "a": Categorical([2, 1, 2, 3], categories=[1, 4, 3, 2], ordered=ordered),
1967
+ "b": range(4),
1968
+ }
1969
+ )
1970
+ if index_kind == "single":
1971
+ keys = ["a"]
1972
+ df = df.set_index(keys)
1973
+ elif index_kind == "multi":
1974
+ keys = ["a", "a2"]
1975
+ df["a2"] = df["a"]
1976
+ df = df.set_index(keys)
1977
+ args = get_groupby_method_args(transformation_func, df)
1978
+ gb = df.groupby(keys, as_index=as_index, sort=sort, observed=observed)
1979
+ warn = FutureWarning if transformation_func == "fillna" else None
1980
+ msg = "DataFrameGroupBy.fillna is deprecated"
1981
+ with tm.assert_produces_warning(warn, match=msg):
1982
+ op_result = getattr(gb, transformation_func)(*args)
1983
+ result = op_result.index.get_level_values("a").categories
1984
+ expected = Index([1, 4, 3, 2])
1985
+ tm.assert_index_equal(result, expected)
1986
+
1987
+ if index_kind == "multi":
1988
+ result = op_result.index.get_level_values("a2").categories
1989
+ tm.assert_index_equal(result, expected)
1990
+
1991
+
1992
+ @pytest.mark.parametrize("index_kind", ["range", "single", "multi"])
1993
+ @pytest.mark.parametrize("method", ["head", "tail"])
1994
+ @pytest.mark.parametrize("ordered", [True, False])
1995
+ def test_category_order_head_tail(
1996
+ as_index, sort, observed, method, index_kind, ordered
1997
+ ):
1998
+ # GH#48749
1999
+ df = DataFrame(
2000
+ {
2001
+ "a": Categorical([2, 1, 2, 3], categories=[1, 4, 3, 2], ordered=ordered),
2002
+ "b": range(4),
2003
+ }
2004
+ )
2005
+ if index_kind == "range":
2006
+ keys = ["a"]
2007
+ elif index_kind == "single":
2008
+ keys = ["a"]
2009
+ df = df.set_index(keys)
2010
+ elif index_kind == "multi":
2011
+ keys = ["a", "a2"]
2012
+ df["a2"] = df["a"]
2013
+ df = df.set_index(keys)
2014
+ gb = df.groupby(keys, as_index=as_index, sort=sort, observed=observed)
2015
+ op_result = getattr(gb, method)()
2016
+ if index_kind == "range":
2017
+ result = op_result["a"].cat.categories
2018
+ else:
2019
+ result = op_result.index.get_level_values("a").categories
2020
+ expected = Index([1, 4, 3, 2])
2021
+ tm.assert_index_equal(result, expected)
2022
+
2023
+ if index_kind == "multi":
2024
+ result = op_result.index.get_level_values("a2").categories
2025
+ tm.assert_index_equal(result, expected)
2026
+
2027
+
2028
+ @pytest.mark.parametrize("index_kind", ["range", "single", "multi"])
2029
+ @pytest.mark.parametrize("method", ["apply", "agg", "transform"])
2030
+ @pytest.mark.parametrize("ordered", [True, False])
2031
+ def test_category_order_apply(as_index, sort, observed, method, index_kind, ordered):
2032
+ # GH#48749
2033
+ if (method == "transform" and index_kind == "range") or (
2034
+ not as_index and index_kind != "range"
2035
+ ):
2036
+ pytest.skip("No categories in result, nothing to test")
2037
+ df = DataFrame(
2038
+ {
2039
+ "a": Categorical([2, 1, 2, 3], categories=[1, 4, 3, 2], ordered=ordered),
2040
+ "b": range(4),
2041
+ }
2042
+ )
2043
+ if index_kind == "range":
2044
+ keys = ["a"]
2045
+ elif index_kind == "single":
2046
+ keys = ["a"]
2047
+ df = df.set_index(keys)
2048
+ elif index_kind == "multi":
2049
+ keys = ["a", "a2"]
2050
+ df["a2"] = df["a"]
2051
+ df = df.set_index(keys)
2052
+ gb = df.groupby(keys, as_index=as_index, sort=sort, observed=observed)
2053
+ warn = DeprecationWarning if method == "apply" and index_kind == "range" else None
2054
+ msg = "DataFrameGroupBy.apply operated on the grouping columns"
2055
+ with tm.assert_produces_warning(warn, match=msg):
2056
+ op_result = getattr(gb, method)(lambda x: x.sum(numeric_only=True))
2057
+ if (method == "transform" or not as_index) and index_kind == "range":
2058
+ result = op_result["a"].cat.categories
2059
+ else:
2060
+ result = op_result.index.get_level_values("a").categories
2061
+ expected = Index([1, 4, 3, 2])
2062
+ tm.assert_index_equal(result, expected)
2063
+
2064
+ if index_kind == "multi":
2065
+ result = op_result.index.get_level_values("a2").categories
2066
+ tm.assert_index_equal(result, expected)
2067
+
2068
+
2069
+ @pytest.mark.parametrize("index_kind", ["range", "single", "multi"])
2070
+ def test_many_categories(as_index, sort, index_kind, ordered):
2071
+ # GH#48749 - Test when the grouper has many categories
2072
+ if index_kind != "range" and not as_index:
2073
+ pytest.skip(reason="Result doesn't have categories, nothing to test")
2074
+ categories = np.arange(9999, -1, -1)
2075
+ grouper = Categorical([2, 1, 2, 3], categories=categories, ordered=ordered)
2076
+ df = DataFrame({"a": grouper, "b": range(4)})
2077
+ if index_kind == "range":
2078
+ keys = ["a"]
2079
+ elif index_kind == "single":
2080
+ keys = ["a"]
2081
+ df = df.set_index(keys)
2082
+ elif index_kind == "multi":
2083
+ keys = ["a", "a2"]
2084
+ df["a2"] = df["a"]
2085
+ df = df.set_index(keys)
2086
+ gb = df.groupby(keys, as_index=as_index, sort=sort, observed=True)
2087
+ result = gb.sum()
2088
+
2089
+ # Test is setup so that data and index are the same values
2090
+ data = [3, 2, 1] if sort else [2, 1, 3]
2091
+
2092
+ index = CategoricalIndex(
2093
+ data, categories=grouper.categories, ordered=ordered, name="a"
2094
+ )
2095
+ if as_index:
2096
+ expected = DataFrame({"b": data})
2097
+ if index_kind == "multi":
2098
+ expected.index = MultiIndex.from_frame(DataFrame({"a": index, "a2": index}))
2099
+ else:
2100
+ expected.index = index
2101
+ elif index_kind == "multi":
2102
+ expected = DataFrame({"a": Series(index), "a2": Series(index), "b": data})
2103
+ else:
2104
+ expected = DataFrame({"a": Series(index), "b": data})
2105
+
2106
+ tm.assert_frame_equal(result, expected)
2107
+
2108
+
2109
+ @pytest.mark.parametrize("cat_columns", ["a", "b", ["a", "b"]])
2110
+ @pytest.mark.parametrize("keys", ["a", "b", ["a", "b"]])
2111
+ def test_groupby_default_depr(cat_columns, keys):
2112
+ # GH#43999
2113
+ df = DataFrame({"a": [1, 1, 2, 3], "b": [4, 5, 6, 7]})
2114
+ df[cat_columns] = df[cat_columns].astype("category")
2115
+ msg = "The default of observed=False is deprecated"
2116
+ klass = FutureWarning if set(cat_columns) & set(keys) else None
2117
+ with tm.assert_produces_warning(klass, match=msg):
2118
+ df.groupby(keys)
2119
+
2120
+
2121
+ @pytest.mark.parametrize("test_series", [True, False])
2122
+ @pytest.mark.parametrize("keys", [["a1"], ["a1", "a2"]])
2123
+ def test_agg_list(request, as_index, observed, reduction_func, test_series, keys):
2124
+ # GH#52760
2125
+ if test_series and reduction_func == "corrwith":
2126
+ assert not hasattr(SeriesGroupBy, "corrwith")
2127
+ pytest.skip("corrwith not implemented for SeriesGroupBy")
2128
+ elif reduction_func == "corrwith":
2129
+ msg = "GH#32293: attempts to call SeriesGroupBy.corrwith"
2130
+ request.applymarker(pytest.mark.xfail(reason=msg))
2131
+ elif (
2132
+ reduction_func == "nunique"
2133
+ and not test_series
2134
+ and len(keys) != 1
2135
+ and not observed
2136
+ and not as_index
2137
+ ):
2138
+ msg = "GH#52848 - raises a ValueError"
2139
+ request.applymarker(pytest.mark.xfail(reason=msg))
2140
+
2141
+ df = DataFrame({"a1": [0, 0, 1], "a2": [2, 3, 3], "b": [4, 5, 6]})
2142
+ df = df.astype({"a1": "category", "a2": "category"})
2143
+ if "a2" not in keys:
2144
+ df = df.drop(columns="a2")
2145
+ gb = df.groupby(by=keys, as_index=as_index, observed=observed)
2146
+ if test_series:
2147
+ gb = gb["b"]
2148
+ args = get_groupby_method_args(reduction_func, df)
2149
+
2150
+ if not observed and reduction_func in ["idxmin", "idxmax"] and keys == ["a1", "a2"]:
2151
+ with pytest.raises(
2152
+ ValueError, match="empty group due to unobserved categories"
2153
+ ):
2154
+ gb.agg([reduction_func], *args)
2155
+ return
2156
+
2157
+ result = gb.agg([reduction_func], *args)
2158
+ expected = getattr(gb, reduction_func)(*args)
2159
+
2160
+ if as_index and (test_series or reduction_func == "size"):
2161
+ expected = expected.to_frame(reduction_func)
2162
+ if not test_series:
2163
+ expected.columns = MultiIndex.from_tuples(
2164
+ [(ind, "") for ind in expected.columns[:-1]] + [("b", reduction_func)]
2165
+ )
2166
+ elif not as_index:
2167
+ expected.columns = keys + [reduction_func]
2168
+
2169
+ tm.assert_equal(result, expected)
venv/lib/python3.10/site-packages/pandas/tests/groupby/test_counting.py ADDED
@@ -0,0 +1,394 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from itertools import product
2
+ from string import ascii_lowercase
3
+
4
+ import numpy as np
5
+ import pytest
6
+
7
+ from pandas import (
8
+ DataFrame,
9
+ Index,
10
+ MultiIndex,
11
+ Period,
12
+ Series,
13
+ Timedelta,
14
+ Timestamp,
15
+ date_range,
16
+ )
17
+ import pandas._testing as tm
18
+
19
+
20
+ class TestCounting:
21
+ def test_cumcount(self):
22
+ df = DataFrame([["a"], ["a"], ["a"], ["b"], ["a"]], columns=["A"])
23
+ g = df.groupby("A")
24
+ sg = g.A
25
+
26
+ expected = Series([0, 1, 2, 0, 3])
27
+
28
+ tm.assert_series_equal(expected, g.cumcount())
29
+ tm.assert_series_equal(expected, sg.cumcount())
30
+
31
+ def test_cumcount_empty(self):
32
+ ge = DataFrame().groupby(level=0)
33
+ se = Series(dtype=object).groupby(level=0)
34
+
35
+ # edge case, as this is usually considered float
36
+ e = Series(dtype="int64")
37
+
38
+ tm.assert_series_equal(e, ge.cumcount())
39
+ tm.assert_series_equal(e, se.cumcount())
40
+
41
+ def test_cumcount_dupe_index(self):
42
+ df = DataFrame(
43
+ [["a"], ["a"], ["a"], ["b"], ["a"]], columns=["A"], index=[0] * 5
44
+ )
45
+ g = df.groupby("A")
46
+ sg = g.A
47
+
48
+ expected = Series([0, 1, 2, 0, 3], index=[0] * 5)
49
+
50
+ tm.assert_series_equal(expected, g.cumcount())
51
+ tm.assert_series_equal(expected, sg.cumcount())
52
+
53
+ def test_cumcount_mi(self):
54
+ mi = MultiIndex.from_tuples([[0, 1], [1, 2], [2, 2], [2, 2], [1, 0]])
55
+ df = DataFrame([["a"], ["a"], ["a"], ["b"], ["a"]], columns=["A"], index=mi)
56
+ g = df.groupby("A")
57
+ sg = g.A
58
+
59
+ expected = Series([0, 1, 2, 0, 3], index=mi)
60
+
61
+ tm.assert_series_equal(expected, g.cumcount())
62
+ tm.assert_series_equal(expected, sg.cumcount())
63
+
64
+ def test_cumcount_groupby_not_col(self):
65
+ df = DataFrame(
66
+ [["a"], ["a"], ["a"], ["b"], ["a"]], columns=["A"], index=[0] * 5
67
+ )
68
+ g = df.groupby([0, 0, 0, 1, 0])
69
+ sg = g.A
70
+
71
+ expected = Series([0, 1, 2, 0, 3], index=[0] * 5)
72
+
73
+ tm.assert_series_equal(expected, g.cumcount())
74
+ tm.assert_series_equal(expected, sg.cumcount())
75
+
76
+ def test_ngroup(self):
77
+ df = DataFrame({"A": list("aaaba")})
78
+ g = df.groupby("A")
79
+ sg = g.A
80
+
81
+ expected = Series([0, 0, 0, 1, 0])
82
+
83
+ tm.assert_series_equal(expected, g.ngroup())
84
+ tm.assert_series_equal(expected, sg.ngroup())
85
+
86
+ def test_ngroup_distinct(self):
87
+ df = DataFrame({"A": list("abcde")})
88
+ g = df.groupby("A")
89
+ sg = g.A
90
+
91
+ expected = Series(range(5), dtype="int64")
92
+
93
+ tm.assert_series_equal(expected, g.ngroup())
94
+ tm.assert_series_equal(expected, sg.ngroup())
95
+
96
+ def test_ngroup_one_group(self):
97
+ df = DataFrame({"A": [0] * 5})
98
+ g = df.groupby("A")
99
+ sg = g.A
100
+
101
+ expected = Series([0] * 5)
102
+
103
+ tm.assert_series_equal(expected, g.ngroup())
104
+ tm.assert_series_equal(expected, sg.ngroup())
105
+
106
+ def test_ngroup_empty(self):
107
+ ge = DataFrame().groupby(level=0)
108
+ se = Series(dtype=object).groupby(level=0)
109
+
110
+ # edge case, as this is usually considered float
111
+ e = Series(dtype="int64")
112
+
113
+ tm.assert_series_equal(e, ge.ngroup())
114
+ tm.assert_series_equal(e, se.ngroup())
115
+
116
+ def test_ngroup_series_matches_frame(self):
117
+ df = DataFrame({"A": list("aaaba")})
118
+ s = Series(list("aaaba"))
119
+
120
+ tm.assert_series_equal(df.groupby(s).ngroup(), s.groupby(s).ngroup())
121
+
122
+ def test_ngroup_dupe_index(self):
123
+ df = DataFrame({"A": list("aaaba")}, index=[0] * 5)
124
+ g = df.groupby("A")
125
+ sg = g.A
126
+
127
+ expected = Series([0, 0, 0, 1, 0], index=[0] * 5)
128
+
129
+ tm.assert_series_equal(expected, g.ngroup())
130
+ tm.assert_series_equal(expected, sg.ngroup())
131
+
132
+ def test_ngroup_mi(self):
133
+ mi = MultiIndex.from_tuples([[0, 1], [1, 2], [2, 2], [2, 2], [1, 0]])
134
+ df = DataFrame({"A": list("aaaba")}, index=mi)
135
+ g = df.groupby("A")
136
+ sg = g.A
137
+ expected = Series([0, 0, 0, 1, 0], index=mi)
138
+
139
+ tm.assert_series_equal(expected, g.ngroup())
140
+ tm.assert_series_equal(expected, sg.ngroup())
141
+
142
+ def test_ngroup_groupby_not_col(self):
143
+ df = DataFrame({"A": list("aaaba")}, index=[0] * 5)
144
+ g = df.groupby([0, 0, 0, 1, 0])
145
+ sg = g.A
146
+
147
+ expected = Series([0, 0, 0, 1, 0], index=[0] * 5)
148
+
149
+ tm.assert_series_equal(expected, g.ngroup())
150
+ tm.assert_series_equal(expected, sg.ngroup())
151
+
152
+ def test_ngroup_descending(self):
153
+ df = DataFrame(["a", "a", "b", "a", "b"], columns=["A"])
154
+ g = df.groupby(["A"])
155
+
156
+ ascending = Series([0, 0, 1, 0, 1])
157
+ descending = Series([1, 1, 0, 1, 0])
158
+
159
+ tm.assert_series_equal(descending, (g.ngroups - 1) - ascending)
160
+ tm.assert_series_equal(ascending, g.ngroup(ascending=True))
161
+ tm.assert_series_equal(descending, g.ngroup(ascending=False))
162
+
163
+ def test_ngroup_matches_cumcount(self):
164
+ # verify one manually-worked out case works
165
+ df = DataFrame(
166
+ [["a", "x"], ["a", "y"], ["b", "x"], ["a", "x"], ["b", "y"]],
167
+ columns=["A", "X"],
168
+ )
169
+ g = df.groupby(["A", "X"])
170
+ g_ngroup = g.ngroup()
171
+ g_cumcount = g.cumcount()
172
+ expected_ngroup = Series([0, 1, 2, 0, 3])
173
+ expected_cumcount = Series([0, 0, 0, 1, 0])
174
+
175
+ tm.assert_series_equal(g_ngroup, expected_ngroup)
176
+ tm.assert_series_equal(g_cumcount, expected_cumcount)
177
+
178
+ def test_ngroup_cumcount_pair(self):
179
+ # brute force comparison for all small series
180
+ for p in product(range(3), repeat=4):
181
+ df = DataFrame({"a": p})
182
+ g = df.groupby(["a"])
183
+
184
+ order = sorted(set(p))
185
+ ngroupd = [order.index(val) for val in p]
186
+ cumcounted = [p[:i].count(val) for i, val in enumerate(p)]
187
+
188
+ tm.assert_series_equal(g.ngroup(), Series(ngroupd))
189
+ tm.assert_series_equal(g.cumcount(), Series(cumcounted))
190
+
191
+ def test_ngroup_respects_groupby_order(self, sort):
192
+ df = DataFrame({"a": np.random.default_rng(2).choice(list("abcdef"), 100)})
193
+ g = df.groupby("a", sort=sort)
194
+ df["group_id"] = -1
195
+ df["group_index"] = -1
196
+
197
+ for i, (_, group) in enumerate(g):
198
+ df.loc[group.index, "group_id"] = i
199
+ for j, ind in enumerate(group.index):
200
+ df.loc[ind, "group_index"] = j
201
+
202
+ tm.assert_series_equal(Series(df["group_id"].values), g.ngroup())
203
+ tm.assert_series_equal(Series(df["group_index"].values), g.cumcount())
204
+
205
+ @pytest.mark.parametrize(
206
+ "datetimelike",
207
+ [
208
+ [Timestamp(f"2016-05-{i:02d} 20:09:25+00:00") for i in range(1, 4)],
209
+ [Timestamp(f"2016-05-{i:02d} 20:09:25") for i in range(1, 4)],
210
+ [Timestamp(f"2016-05-{i:02d} 20:09:25", tz="UTC") for i in range(1, 4)],
211
+ [Timedelta(x, unit="h") for x in range(1, 4)],
212
+ [Period(freq="2W", year=2017, month=x) for x in range(1, 4)],
213
+ ],
214
+ )
215
+ def test_count_with_datetimelike(self, datetimelike):
216
+ # test for #13393, where DataframeGroupBy.count() fails
217
+ # when counting a datetimelike column.
218
+
219
+ df = DataFrame({"x": ["a", "a", "b"], "y": datetimelike})
220
+ res = df.groupby("x").count()
221
+ expected = DataFrame({"y": [2, 1]}, index=["a", "b"])
222
+ expected.index.name = "x"
223
+ tm.assert_frame_equal(expected, res)
224
+
225
+ def test_count_with_only_nans_in_first_group(self):
226
+ # GH21956
227
+ df = DataFrame({"A": [np.nan, np.nan], "B": ["a", "b"], "C": [1, 2]})
228
+ result = df.groupby(["A", "B"]).C.count()
229
+ mi = MultiIndex(levels=[[], ["a", "b"]], codes=[[], []], names=["A", "B"])
230
+ expected = Series([], index=mi, dtype=np.int64, name="C")
231
+ tm.assert_series_equal(result, expected, check_index_type=False)
232
+
233
+ def test_count_groupby_column_with_nan_in_groupby_column(self):
234
+ # https://github.com/pandas-dev/pandas/issues/32841
235
+ df = DataFrame({"A": [1, 1, 1, 1, 1], "B": [5, 4, np.nan, 3, 0]})
236
+ res = df.groupby(["B"]).count()
237
+ expected = DataFrame(
238
+ index=Index([0.0, 3.0, 4.0, 5.0], name="B"), data={"A": [1, 1, 1, 1]}
239
+ )
240
+ tm.assert_frame_equal(expected, res)
241
+
242
+ def test_groupby_count_dateparseerror(self):
243
+ dr = date_range(start="1/1/2012", freq="5min", periods=10)
244
+
245
+ # BAD Example, datetimes first
246
+ ser = Series(np.arange(10), index=[dr, np.arange(10)])
247
+ grouped = ser.groupby(lambda x: x[1] % 2 == 0)
248
+ result = grouped.count()
249
+
250
+ ser = Series(np.arange(10), index=[np.arange(10), dr])
251
+ grouped = ser.groupby(lambda x: x[0] % 2 == 0)
252
+ expected = grouped.count()
253
+
254
+ tm.assert_series_equal(result, expected)
255
+
256
+
257
+ def test_groupby_timedelta_cython_count():
258
+ df = DataFrame(
259
+ {"g": list("ab" * 2), "delta": np.arange(4).astype("timedelta64[ns]")}
260
+ )
261
+ expected = Series([2, 2], index=Index(["a", "b"], name="g"), name="delta")
262
+ result = df.groupby("g").delta.count()
263
+ tm.assert_series_equal(expected, result)
264
+
265
+
266
+ def test_count():
267
+ n = 1 << 15
268
+ dr = date_range("2015-08-30", periods=n // 10, freq="min")
269
+
270
+ df = DataFrame(
271
+ {
272
+ "1st": np.random.default_rng(2).choice(list(ascii_lowercase), n),
273
+ "2nd": np.random.default_rng(2).integers(0, 5, n),
274
+ "3rd": np.random.default_rng(2).standard_normal(n).round(3),
275
+ "4th": np.random.default_rng(2).integers(-10, 10, n),
276
+ "5th": np.random.default_rng(2).choice(dr, n),
277
+ "6th": np.random.default_rng(2).standard_normal(n).round(3),
278
+ "7th": np.random.default_rng(2).standard_normal(n).round(3),
279
+ "8th": np.random.default_rng(2).choice(dr, n)
280
+ - np.random.default_rng(2).choice(dr, 1),
281
+ "9th": np.random.default_rng(2).choice(list(ascii_lowercase), n),
282
+ }
283
+ )
284
+
285
+ for col in df.columns.drop(["1st", "2nd", "4th"]):
286
+ df.loc[np.random.default_rng(2).choice(n, n // 10), col] = np.nan
287
+
288
+ df["9th"] = df["9th"].astype("category")
289
+
290
+ for key in ["1st", "2nd", ["1st", "2nd"]]:
291
+ left = df.groupby(key).count()
292
+ msg = "DataFrameGroupBy.apply operated on the grouping columns"
293
+ with tm.assert_produces_warning(DeprecationWarning, match=msg):
294
+ right = df.groupby(key).apply(DataFrame.count).drop(key, axis=1)
295
+ tm.assert_frame_equal(left, right)
296
+
297
+
298
+ def test_count_non_nulls():
299
+ # GH#5610
300
+ # count counts non-nulls
301
+ df = DataFrame(
302
+ [[1, 2, "foo"], [1, np.nan, "bar"], [3, np.nan, np.nan]],
303
+ columns=["A", "B", "C"],
304
+ )
305
+
306
+ count_as = df.groupby("A").count()
307
+ count_not_as = df.groupby("A", as_index=False).count()
308
+
309
+ expected = DataFrame([[1, 2], [0, 0]], columns=["B", "C"], index=[1, 3])
310
+ expected.index.name = "A"
311
+ tm.assert_frame_equal(count_not_as, expected.reset_index())
312
+ tm.assert_frame_equal(count_as, expected)
313
+
314
+ count_B = df.groupby("A")["B"].count()
315
+ tm.assert_series_equal(count_B, expected["B"])
316
+
317
+
318
+ def test_count_object():
319
+ df = DataFrame({"a": ["a"] * 3 + ["b"] * 3, "c": [2] * 3 + [3] * 3})
320
+ result = df.groupby("c").a.count()
321
+ expected = Series([3, 3], index=Index([2, 3], name="c"), name="a")
322
+ tm.assert_series_equal(result, expected)
323
+
324
+ df = DataFrame({"a": ["a", np.nan, np.nan] + ["b"] * 3, "c": [2] * 3 + [3] * 3})
325
+ result = df.groupby("c").a.count()
326
+ expected = Series([1, 3], index=Index([2, 3], name="c"), name="a")
327
+ tm.assert_series_equal(result, expected)
328
+
329
+
330
+ def test_count_cross_type():
331
+ # GH8169
332
+ # Set float64 dtype to avoid upcast when setting nan below
333
+ vals = np.hstack(
334
+ (
335
+ np.random.default_rng(2).integers(0, 5, (100, 2)),
336
+ np.random.default_rng(2).integers(0, 2, (100, 2)),
337
+ )
338
+ ).astype("float64")
339
+
340
+ df = DataFrame(vals, columns=["a", "b", "c", "d"])
341
+ df[df == 2] = np.nan
342
+ expected = df.groupby(["c", "d"]).count()
343
+
344
+ for t in ["float32", "object"]:
345
+ df["a"] = df["a"].astype(t)
346
+ df["b"] = df["b"].astype(t)
347
+ result = df.groupby(["c", "d"]).count()
348
+ tm.assert_frame_equal(result, expected)
349
+
350
+
351
+ def test_lower_int_prec_count():
352
+ df = DataFrame(
353
+ {
354
+ "a": np.array([0, 1, 2, 100], np.int8),
355
+ "b": np.array([1, 2, 3, 6], np.uint32),
356
+ "c": np.array([4, 5, 6, 8], np.int16),
357
+ "grp": list("ab" * 2),
358
+ }
359
+ )
360
+ result = df.groupby("grp").count()
361
+ expected = DataFrame(
362
+ {"a": [2, 2], "b": [2, 2], "c": [2, 2]}, index=Index(list("ab"), name="grp")
363
+ )
364
+ tm.assert_frame_equal(result, expected)
365
+
366
+
367
+ def test_count_uses_size_on_exception():
368
+ class RaisingObjectException(Exception):
369
+ pass
370
+
371
+ class RaisingObject:
372
+ def __init__(self, msg="I will raise inside Cython") -> None:
373
+ super().__init__()
374
+ self.msg = msg
375
+
376
+ def __eq__(self, other):
377
+ # gets called in Cython to check that raising calls the method
378
+ raise RaisingObjectException(self.msg)
379
+
380
+ df = DataFrame({"a": [RaisingObject() for _ in range(4)], "grp": list("ab" * 2)})
381
+ result = df.groupby("grp").count()
382
+ expected = DataFrame({"a": [2, 2]}, index=Index(list("ab"), name="grp"))
383
+ tm.assert_frame_equal(result, expected)
384
+
385
+
386
+ def test_count_arrow_string_array(any_string_dtype):
387
+ # GH#54751
388
+ pytest.importorskip("pyarrow")
389
+ df = DataFrame(
390
+ {"a": [1, 2, 3], "b": Series(["a", "b", "a"], dtype=any_string_dtype)}
391
+ )
392
+ result = df.groupby("a").count()
393
+ expected = DataFrame({"b": 1}, index=Index([1, 2, 3], name="a"))
394
+ tm.assert_frame_equal(result, expected)
venv/lib/python3.10/site-packages/pandas/tests/groupby/test_cumulative.py ADDED
@@ -0,0 +1,319 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import numpy as np
2
+ import pytest
3
+
4
+ from pandas.errors import UnsupportedFunctionCall
5
+ import pandas.util._test_decorators as td
6
+
7
+ import pandas as pd
8
+ from pandas import (
9
+ DataFrame,
10
+ Series,
11
+ )
12
+ import pandas._testing as tm
13
+
14
+
15
+ @pytest.fixture(
16
+ params=[np.int32, np.int64, np.float32, np.float64, "Int64", "Float64"],
17
+ ids=["np.int32", "np.int64", "np.float32", "np.float64", "Int64", "Float64"],
18
+ )
19
+ def dtypes_for_minmax(request):
20
+ """
21
+ Fixture of dtypes with min and max values used for testing
22
+ cummin and cummax
23
+ """
24
+ dtype = request.param
25
+
26
+ np_type = dtype
27
+ if dtype == "Int64":
28
+ np_type = np.int64
29
+ elif dtype == "Float64":
30
+ np_type = np.float64
31
+
32
+ min_val = (
33
+ np.iinfo(np_type).min
34
+ if np.dtype(np_type).kind == "i"
35
+ else np.finfo(np_type).min
36
+ )
37
+ max_val = (
38
+ np.iinfo(np_type).max
39
+ if np.dtype(np_type).kind == "i"
40
+ else np.finfo(np_type).max
41
+ )
42
+
43
+ return (dtype, min_val, max_val)
44
+
45
+
46
+ def test_groupby_cumprod():
47
+ # GH 4095
48
+ df = DataFrame({"key": ["b"] * 10, "value": 2})
49
+
50
+ actual = df.groupby("key")["value"].cumprod()
51
+ expected = df.groupby("key", group_keys=False)["value"].apply(lambda x: x.cumprod())
52
+ expected.name = "value"
53
+ tm.assert_series_equal(actual, expected)
54
+
55
+ df = DataFrame({"key": ["b"] * 100, "value": 2})
56
+ df["value"] = df["value"].astype(float)
57
+ actual = df.groupby("key")["value"].cumprod()
58
+ expected = df.groupby("key", group_keys=False)["value"].apply(lambda x: x.cumprod())
59
+ expected.name = "value"
60
+ tm.assert_series_equal(actual, expected)
61
+
62
+
63
+ @pytest.mark.skip_ubsan
64
+ def test_groupby_cumprod_overflow():
65
+ # GH#37493 if we overflow we return garbage consistent with numpy
66
+ df = DataFrame({"key": ["b"] * 4, "value": 100_000})
67
+ actual = df.groupby("key")["value"].cumprod()
68
+ expected = Series(
69
+ [100_000, 10_000_000_000, 1_000_000_000_000_000, 7766279631452241920],
70
+ name="value",
71
+ )
72
+ tm.assert_series_equal(actual, expected)
73
+
74
+ numpy_result = df.groupby("key", group_keys=False)["value"].apply(
75
+ lambda x: x.cumprod()
76
+ )
77
+ numpy_result.name = "value"
78
+ tm.assert_series_equal(actual, numpy_result)
79
+
80
+
81
+ def test_groupby_cumprod_nan_influences_other_columns():
82
+ # GH#48064
83
+ df = DataFrame(
84
+ {
85
+ "a": 1,
86
+ "b": [1, np.nan, 2],
87
+ "c": [1, 2, 3.0],
88
+ }
89
+ )
90
+ result = df.groupby("a").cumprod(numeric_only=True, skipna=False)
91
+ expected = DataFrame({"b": [1, np.nan, np.nan], "c": [1, 2, 6.0]})
92
+ tm.assert_frame_equal(result, expected)
93
+
94
+
95
+ def test_cummin(dtypes_for_minmax):
96
+ dtype = dtypes_for_minmax[0]
97
+ min_val = dtypes_for_minmax[1]
98
+
99
+ # GH 15048
100
+ base_df = DataFrame({"A": [1, 1, 1, 1, 2, 2, 2, 2], "B": [3, 4, 3, 2, 2, 3, 2, 1]})
101
+ expected_mins = [3, 3, 3, 2, 2, 2, 2, 1]
102
+
103
+ df = base_df.astype(dtype)
104
+
105
+ expected = DataFrame({"B": expected_mins}).astype(dtype)
106
+ result = df.groupby("A").cummin()
107
+ tm.assert_frame_equal(result, expected)
108
+ result = df.groupby("A", group_keys=False).B.apply(lambda x: x.cummin()).to_frame()
109
+ tm.assert_frame_equal(result, expected)
110
+
111
+ # Test w/ min value for dtype
112
+ df.loc[[2, 6], "B"] = min_val
113
+ df.loc[[1, 5], "B"] = min_val + 1
114
+ expected.loc[[2, 3, 6, 7], "B"] = min_val
115
+ expected.loc[[1, 5], "B"] = min_val + 1 # should not be rounded to min_val
116
+ result = df.groupby("A").cummin()
117
+ tm.assert_frame_equal(result, expected, check_exact=True)
118
+ expected = (
119
+ df.groupby("A", group_keys=False).B.apply(lambda x: x.cummin()).to_frame()
120
+ )
121
+ tm.assert_frame_equal(result, expected, check_exact=True)
122
+
123
+ # Test nan in some values
124
+ # Explicit cast to float to avoid implicit cast when setting nan
125
+ base_df = base_df.astype({"B": "float"})
126
+ base_df.loc[[0, 2, 4, 6], "B"] = np.nan
127
+ expected = DataFrame({"B": [np.nan, 4, np.nan, 2, np.nan, 3, np.nan, 1]})
128
+ result = base_df.groupby("A").cummin()
129
+ tm.assert_frame_equal(result, expected)
130
+ expected = (
131
+ base_df.groupby("A", group_keys=False).B.apply(lambda x: x.cummin()).to_frame()
132
+ )
133
+ tm.assert_frame_equal(result, expected)
134
+
135
+ # GH 15561
136
+ df = DataFrame({"a": [1], "b": pd.to_datetime(["2001"])})
137
+ expected = Series(pd.to_datetime("2001"), index=[0], name="b")
138
+
139
+ result = df.groupby("a")["b"].cummin()
140
+ tm.assert_series_equal(expected, result)
141
+
142
+ # GH 15635
143
+ df = DataFrame({"a": [1, 2, 1], "b": [1, 2, 2]})
144
+ result = df.groupby("a").b.cummin()
145
+ expected = Series([1, 2, 1], name="b")
146
+ tm.assert_series_equal(result, expected)
147
+
148
+
149
+ @pytest.mark.parametrize("method", ["cummin", "cummax"])
150
+ @pytest.mark.parametrize("dtype", ["UInt64", "Int64", "Float64", "float", "boolean"])
151
+ def test_cummin_max_all_nan_column(method, dtype):
152
+ base_df = DataFrame({"A": [1, 1, 1, 1, 2, 2, 2, 2], "B": [np.nan] * 8})
153
+ base_df["B"] = base_df["B"].astype(dtype)
154
+ grouped = base_df.groupby("A")
155
+
156
+ expected = DataFrame({"B": [np.nan] * 8}, dtype=dtype)
157
+ result = getattr(grouped, method)()
158
+ tm.assert_frame_equal(expected, result)
159
+
160
+ result = getattr(grouped["B"], method)().to_frame()
161
+ tm.assert_frame_equal(expected, result)
162
+
163
+
164
+ def test_cummax(dtypes_for_minmax):
165
+ dtype = dtypes_for_minmax[0]
166
+ max_val = dtypes_for_minmax[2]
167
+
168
+ # GH 15048
169
+ base_df = DataFrame({"A": [1, 1, 1, 1, 2, 2, 2, 2], "B": [3, 4, 3, 2, 2, 3, 2, 1]})
170
+ expected_maxs = [3, 4, 4, 4, 2, 3, 3, 3]
171
+
172
+ df = base_df.astype(dtype)
173
+
174
+ expected = DataFrame({"B": expected_maxs}).astype(dtype)
175
+ result = df.groupby("A").cummax()
176
+ tm.assert_frame_equal(result, expected)
177
+ result = df.groupby("A", group_keys=False).B.apply(lambda x: x.cummax()).to_frame()
178
+ tm.assert_frame_equal(result, expected)
179
+
180
+ # Test w/ max value for dtype
181
+ df.loc[[2, 6], "B"] = max_val
182
+ expected.loc[[2, 3, 6, 7], "B"] = max_val
183
+ result = df.groupby("A").cummax()
184
+ tm.assert_frame_equal(result, expected)
185
+ expected = (
186
+ df.groupby("A", group_keys=False).B.apply(lambda x: x.cummax()).to_frame()
187
+ )
188
+ tm.assert_frame_equal(result, expected)
189
+
190
+ # Test nan in some values
191
+ # Explicit cast to float to avoid implicit cast when setting nan
192
+ base_df = base_df.astype({"B": "float"})
193
+ base_df.loc[[0, 2, 4, 6], "B"] = np.nan
194
+ expected = DataFrame({"B": [np.nan, 4, np.nan, 4, np.nan, 3, np.nan, 3]})
195
+ result = base_df.groupby("A").cummax()
196
+ tm.assert_frame_equal(result, expected)
197
+ expected = (
198
+ base_df.groupby("A", group_keys=False).B.apply(lambda x: x.cummax()).to_frame()
199
+ )
200
+ tm.assert_frame_equal(result, expected)
201
+
202
+ # GH 15561
203
+ df = DataFrame({"a": [1], "b": pd.to_datetime(["2001"])})
204
+ expected = Series(pd.to_datetime("2001"), index=[0], name="b")
205
+
206
+ result = df.groupby("a")["b"].cummax()
207
+ tm.assert_series_equal(expected, result)
208
+
209
+ # GH 15635
210
+ df = DataFrame({"a": [1, 2, 1], "b": [2, 1, 1]})
211
+ result = df.groupby("a").b.cummax()
212
+ expected = Series([2, 1, 2], name="b")
213
+ tm.assert_series_equal(result, expected)
214
+
215
+
216
+ def test_cummax_i8_at_implementation_bound():
217
+ # the minimum value used to be treated as NPY_NAT+1 instead of NPY_NAT
218
+ # for int64 dtype GH#46382
219
+ ser = Series([pd.NaT._value + n for n in range(5)])
220
+ df = DataFrame({"A": 1, "B": ser, "C": ser._values.view("M8[ns]")})
221
+ gb = df.groupby("A")
222
+
223
+ res = gb.cummax()
224
+ exp = df[["B", "C"]]
225
+ tm.assert_frame_equal(res, exp)
226
+
227
+
228
+ @pytest.mark.parametrize("method", ["cummin", "cummax"])
229
+ @pytest.mark.parametrize("dtype", ["float", "Int64", "Float64"])
230
+ @pytest.mark.parametrize(
231
+ "groups,expected_data",
232
+ [
233
+ ([1, 1, 1], [1, None, None]),
234
+ ([1, 2, 3], [1, None, 2]),
235
+ ([1, 3, 3], [1, None, None]),
236
+ ],
237
+ )
238
+ def test_cummin_max_skipna(method, dtype, groups, expected_data):
239
+ # GH-34047
240
+ df = DataFrame({"a": Series([1, None, 2], dtype=dtype)})
241
+ orig = df.copy()
242
+ gb = df.groupby(groups)["a"]
243
+
244
+ result = getattr(gb, method)(skipna=False)
245
+ expected = Series(expected_data, dtype=dtype, name="a")
246
+
247
+ # check we didn't accidentally alter df
248
+ tm.assert_frame_equal(df, orig)
249
+
250
+ tm.assert_series_equal(result, expected)
251
+
252
+
253
+ @pytest.mark.parametrize("method", ["cummin", "cummax"])
254
+ def test_cummin_max_skipna_multiple_cols(method):
255
+ # Ensure missing value in "a" doesn't cause "b" to be nan-filled
256
+ df = DataFrame({"a": [np.nan, 2.0, 2.0], "b": [2.0, 2.0, 2.0]})
257
+ gb = df.groupby([1, 1, 1])[["a", "b"]]
258
+
259
+ result = getattr(gb, method)(skipna=False)
260
+ expected = DataFrame({"a": [np.nan, np.nan, np.nan], "b": [2.0, 2.0, 2.0]})
261
+
262
+ tm.assert_frame_equal(result, expected)
263
+
264
+
265
+ @pytest.mark.parametrize("func", ["cumprod", "cumsum"])
266
+ def test_numpy_compat(func):
267
+ # see gh-12811
268
+ df = DataFrame({"A": [1, 2, 1], "B": [1, 2, 3]})
269
+ g = df.groupby("A")
270
+
271
+ msg = "numpy operations are not valid with groupby"
272
+
273
+ with pytest.raises(UnsupportedFunctionCall, match=msg):
274
+ getattr(g, func)(1, 2, 3)
275
+ with pytest.raises(UnsupportedFunctionCall, match=msg):
276
+ getattr(g, func)(foo=1)
277
+
278
+
279
+ @td.skip_if_32bit
280
+ @pytest.mark.parametrize("method", ["cummin", "cummax"])
281
+ @pytest.mark.parametrize(
282
+ "dtype,val", [("UInt64", np.iinfo("uint64").max), ("Int64", 2**53 + 1)]
283
+ )
284
+ def test_nullable_int_not_cast_as_float(method, dtype, val):
285
+ data = [val, pd.NA]
286
+ df = DataFrame({"grp": [1, 1], "b": data}, dtype=dtype)
287
+ grouped = df.groupby("grp")
288
+
289
+ result = grouped.transform(method)
290
+ expected = DataFrame({"b": data}, dtype=dtype)
291
+
292
+ tm.assert_frame_equal(result, expected)
293
+
294
+
295
+ def test_cython_api2():
296
+ # this takes the fast apply path
297
+
298
+ # cumsum (GH5614)
299
+ df = DataFrame([[1, 2, np.nan], [1, np.nan, 9], [3, 4, 9]], columns=["A", "B", "C"])
300
+ expected = DataFrame([[2, np.nan], [np.nan, 9], [4, 9]], columns=["B", "C"])
301
+ result = df.groupby("A").cumsum()
302
+ tm.assert_frame_equal(result, expected)
303
+
304
+ # GH 5755 - cumsum is a transformer and should ignore as_index
305
+ result = df.groupby("A", as_index=False).cumsum()
306
+ tm.assert_frame_equal(result, expected)
307
+
308
+ # GH 13994
309
+ msg = "DataFrameGroupBy.cumsum with axis=1 is deprecated"
310
+ with tm.assert_produces_warning(FutureWarning, match=msg):
311
+ result = df.groupby("A").cumsum(axis=1)
312
+ expected = df.cumsum(axis=1)
313
+ tm.assert_frame_equal(result, expected)
314
+
315
+ msg = "DataFrameGroupBy.cumprod with axis=1 is deprecated"
316
+ with tm.assert_produces_warning(FutureWarning, match=msg):
317
+ result = df.groupby("A").cumprod(axis=1)
318
+ expected = df.cumprod(axis=1)
319
+ tm.assert_frame_equal(result, expected)
venv/lib/python3.10/site-packages/pandas/tests/groupby/test_filters.py ADDED
@@ -0,0 +1,636 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from string import ascii_lowercase
2
+
3
+ import numpy as np
4
+ import pytest
5
+
6
+ import pandas as pd
7
+ from pandas import (
8
+ DataFrame,
9
+ Series,
10
+ Timestamp,
11
+ )
12
+ import pandas._testing as tm
13
+
14
+
15
+ def test_filter_series():
16
+ s = Series([1, 3, 20, 5, 22, 24, 7])
17
+ expected_odd = Series([1, 3, 5, 7], index=[0, 1, 3, 6])
18
+ expected_even = Series([20, 22, 24], index=[2, 4, 5])
19
+ grouper = s.apply(lambda x: x % 2)
20
+ grouped = s.groupby(grouper)
21
+ tm.assert_series_equal(grouped.filter(lambda x: x.mean() < 10), expected_odd)
22
+ tm.assert_series_equal(grouped.filter(lambda x: x.mean() > 10), expected_even)
23
+ # Test dropna=False.
24
+ tm.assert_series_equal(
25
+ grouped.filter(lambda x: x.mean() < 10, dropna=False),
26
+ expected_odd.reindex(s.index),
27
+ )
28
+ tm.assert_series_equal(
29
+ grouped.filter(lambda x: x.mean() > 10, dropna=False),
30
+ expected_even.reindex(s.index),
31
+ )
32
+
33
+
34
+ def test_filter_single_column_df():
35
+ df = DataFrame([1, 3, 20, 5, 22, 24, 7])
36
+ expected_odd = DataFrame([1, 3, 5, 7], index=[0, 1, 3, 6])
37
+ expected_even = DataFrame([20, 22, 24], index=[2, 4, 5])
38
+ grouper = df[0].apply(lambda x: x % 2)
39
+ grouped = df.groupby(grouper)
40
+ tm.assert_frame_equal(grouped.filter(lambda x: x.mean() < 10), expected_odd)
41
+ tm.assert_frame_equal(grouped.filter(lambda x: x.mean() > 10), expected_even)
42
+ # Test dropna=False.
43
+ tm.assert_frame_equal(
44
+ grouped.filter(lambda x: x.mean() < 10, dropna=False),
45
+ expected_odd.reindex(df.index),
46
+ )
47
+ tm.assert_frame_equal(
48
+ grouped.filter(lambda x: x.mean() > 10, dropna=False),
49
+ expected_even.reindex(df.index),
50
+ )
51
+
52
+
53
+ def test_filter_multi_column_df():
54
+ df = DataFrame({"A": [1, 12, 12, 1], "B": [1, 1, 1, 1]})
55
+ grouper = df["A"].apply(lambda x: x % 2)
56
+ grouped = df.groupby(grouper)
57
+ expected = DataFrame({"A": [12, 12], "B": [1, 1]}, index=[1, 2])
58
+ tm.assert_frame_equal(
59
+ grouped.filter(lambda x: x["A"].sum() - x["B"].sum() > 10), expected
60
+ )
61
+
62
+
63
+ def test_filter_mixed_df():
64
+ df = DataFrame({"A": [1, 12, 12, 1], "B": "a b c d".split()})
65
+ grouper = df["A"].apply(lambda x: x % 2)
66
+ grouped = df.groupby(grouper)
67
+ expected = DataFrame({"A": [12, 12], "B": ["b", "c"]}, index=[1, 2])
68
+ tm.assert_frame_equal(grouped.filter(lambda x: x["A"].sum() > 10), expected)
69
+
70
+
71
+ def test_filter_out_all_groups():
72
+ s = Series([1, 3, 20, 5, 22, 24, 7])
73
+ grouper = s.apply(lambda x: x % 2)
74
+ grouped = s.groupby(grouper)
75
+ tm.assert_series_equal(grouped.filter(lambda x: x.mean() > 1000), s[[]])
76
+ df = DataFrame({"A": [1, 12, 12, 1], "B": "a b c d".split()})
77
+ grouper = df["A"].apply(lambda x: x % 2)
78
+ grouped = df.groupby(grouper)
79
+ tm.assert_frame_equal(grouped.filter(lambda x: x["A"].sum() > 1000), df.loc[[]])
80
+
81
+
82
+ def test_filter_out_no_groups():
83
+ s = Series([1, 3, 20, 5, 22, 24, 7])
84
+ grouper = s.apply(lambda x: x % 2)
85
+ grouped = s.groupby(grouper)
86
+ filtered = grouped.filter(lambda x: x.mean() > 0)
87
+ tm.assert_series_equal(filtered, s)
88
+ df = DataFrame({"A": [1, 12, 12, 1], "B": "a b c d".split()})
89
+ grouper = df["A"].apply(lambda x: x % 2)
90
+ grouped = df.groupby(grouper)
91
+ filtered = grouped.filter(lambda x: x["A"].mean() > 0)
92
+ tm.assert_frame_equal(filtered, df)
93
+
94
+
95
+ def test_filter_out_all_groups_in_df():
96
+ # GH12768
97
+ df = DataFrame({"a": [1, 1, 2], "b": [1, 2, 0]})
98
+ res = df.groupby("a")
99
+ res = res.filter(lambda x: x["b"].sum() > 5, dropna=False)
100
+ expected = DataFrame({"a": [np.nan] * 3, "b": [np.nan] * 3})
101
+ tm.assert_frame_equal(expected, res)
102
+
103
+ df = DataFrame({"a": [1, 1, 2], "b": [1, 2, 0]})
104
+ res = df.groupby("a")
105
+ res = res.filter(lambda x: x["b"].sum() > 5, dropna=True)
106
+ expected = DataFrame({"a": [], "b": []}, dtype="int64")
107
+ tm.assert_frame_equal(expected, res)
108
+
109
+
110
+ def test_filter_condition_raises():
111
+ def raise_if_sum_is_zero(x):
112
+ if x.sum() == 0:
113
+ raise ValueError
114
+ return x.sum() > 0
115
+
116
+ s = Series([-1, 0, 1, 2])
117
+ grouper = s.apply(lambda x: x % 2)
118
+ grouped = s.groupby(grouper)
119
+ msg = "the filter must return a boolean result"
120
+ with pytest.raises(TypeError, match=msg):
121
+ grouped.filter(raise_if_sum_is_zero)
122
+
123
+
124
+ def test_filter_with_axis_in_groupby():
125
+ # issue 11041
126
+ index = pd.MultiIndex.from_product([range(10), [0, 1]])
127
+ data = DataFrame(np.arange(100).reshape(-1, 20), columns=index, dtype="int64")
128
+
129
+ msg = "DataFrame.groupby with axis=1"
130
+ with tm.assert_produces_warning(FutureWarning, match=msg):
131
+ gb = data.groupby(level=0, axis=1)
132
+ result = gb.filter(lambda x: x.iloc[0, 0] > 10)
133
+ expected = data.iloc[:, 12:20]
134
+ tm.assert_frame_equal(result, expected)
135
+
136
+
137
+ def test_filter_bad_shapes():
138
+ df = DataFrame({"A": np.arange(8), "B": list("aabbbbcc"), "C": np.arange(8)})
139
+ s = df["B"]
140
+ g_df = df.groupby("B")
141
+ g_s = s.groupby(s)
142
+
143
+ f = lambda x: x
144
+ msg = "filter function returned a DataFrame, but expected a scalar bool"
145
+ with pytest.raises(TypeError, match=msg):
146
+ g_df.filter(f)
147
+ msg = "the filter must return a boolean result"
148
+ with pytest.raises(TypeError, match=msg):
149
+ g_s.filter(f)
150
+
151
+ f = lambda x: x == 1
152
+ msg = "filter function returned a DataFrame, but expected a scalar bool"
153
+ with pytest.raises(TypeError, match=msg):
154
+ g_df.filter(f)
155
+ msg = "the filter must return a boolean result"
156
+ with pytest.raises(TypeError, match=msg):
157
+ g_s.filter(f)
158
+
159
+ f = lambda x: np.outer(x, x)
160
+ msg = "can't multiply sequence by non-int of type 'str'"
161
+ with pytest.raises(TypeError, match=msg):
162
+ g_df.filter(f)
163
+ msg = "the filter must return a boolean result"
164
+ with pytest.raises(TypeError, match=msg):
165
+ g_s.filter(f)
166
+
167
+
168
+ def test_filter_nan_is_false():
169
+ df = DataFrame({"A": np.arange(8), "B": list("aabbbbcc"), "C": np.arange(8)})
170
+ s = df["B"]
171
+ g_df = df.groupby(df["B"])
172
+ g_s = s.groupby(s)
173
+
174
+ f = lambda x: np.nan
175
+ tm.assert_frame_equal(g_df.filter(f), df.loc[[]])
176
+ tm.assert_series_equal(g_s.filter(f), s[[]])
177
+
178
+
179
+ def test_filter_pdna_is_false():
180
+ # in particular, dont raise in filter trying to call bool(pd.NA)
181
+ df = DataFrame({"A": np.arange(8), "B": list("aabbbbcc"), "C": np.arange(8)})
182
+ ser = df["B"]
183
+ g_df = df.groupby(df["B"])
184
+ g_s = ser.groupby(ser)
185
+
186
+ func = lambda x: pd.NA
187
+ res = g_df.filter(func)
188
+ tm.assert_frame_equal(res, df.loc[[]])
189
+ res = g_s.filter(func)
190
+ tm.assert_series_equal(res, ser[[]])
191
+
192
+
193
+ def test_filter_against_workaround_ints():
194
+ # Series of ints
195
+ s = Series(np.random.default_rng(2).integers(0, 100, 100))
196
+ grouper = s.apply(lambda x: np.round(x, -1))
197
+ grouped = s.groupby(grouper)
198
+ f = lambda x: x.mean() > 10
199
+
200
+ old_way = s[grouped.transform(f).astype("bool")]
201
+ new_way = grouped.filter(f)
202
+ tm.assert_series_equal(new_way.sort_values(), old_way.sort_values())
203
+
204
+
205
+ def test_filter_against_workaround_floats():
206
+ # Series of floats
207
+ s = 100 * Series(np.random.default_rng(2).random(100))
208
+ grouper = s.apply(lambda x: np.round(x, -1))
209
+ grouped = s.groupby(grouper)
210
+ f = lambda x: x.mean() > 10
211
+ old_way = s[grouped.transform(f).astype("bool")]
212
+ new_way = grouped.filter(f)
213
+ tm.assert_series_equal(new_way.sort_values(), old_way.sort_values())
214
+
215
+
216
+ def test_filter_against_workaround_dataframe():
217
+ # Set up DataFrame of ints, floats, strings.
218
+ letters = np.array(list(ascii_lowercase))
219
+ N = 100
220
+ random_letters = letters.take(
221
+ np.random.default_rng(2).integers(0, 26, N, dtype=int)
222
+ )
223
+ df = DataFrame(
224
+ {
225
+ "ints": Series(np.random.default_rng(2).integers(0, 100, N)),
226
+ "floats": N / 10 * Series(np.random.default_rng(2).random(N)),
227
+ "letters": Series(random_letters),
228
+ }
229
+ )
230
+
231
+ # Group by ints; filter on floats.
232
+ grouped = df.groupby("ints")
233
+ old_way = df[grouped.floats.transform(lambda x: x.mean() > N / 20).astype("bool")]
234
+ new_way = grouped.filter(lambda x: x["floats"].mean() > N / 20)
235
+ tm.assert_frame_equal(new_way, old_way)
236
+
237
+ # Group by floats (rounded); filter on strings.
238
+ grouper = df.floats.apply(lambda x: np.round(x, -1))
239
+ grouped = df.groupby(grouper)
240
+ old_way = df[grouped.letters.transform(lambda x: len(x) < N / 10).astype("bool")]
241
+ new_way = grouped.filter(lambda x: len(x.letters) < N / 10)
242
+ tm.assert_frame_equal(new_way, old_way)
243
+
244
+ # Group by strings; filter on ints.
245
+ grouped = df.groupby("letters")
246
+ old_way = df[grouped.ints.transform(lambda x: x.mean() > N / 20).astype("bool")]
247
+ new_way = grouped.filter(lambda x: x["ints"].mean() > N / 20)
248
+ tm.assert_frame_equal(new_way, old_way)
249
+
250
+
251
+ def test_filter_using_len():
252
+ # BUG GH4447
253
+ df = DataFrame({"A": np.arange(8), "B": list("aabbbbcc"), "C": np.arange(8)})
254
+ grouped = df.groupby("B")
255
+ actual = grouped.filter(lambda x: len(x) > 2)
256
+ expected = DataFrame(
257
+ {"A": np.arange(2, 6), "B": list("bbbb"), "C": np.arange(2, 6)},
258
+ index=np.arange(2, 6, dtype=np.int64),
259
+ )
260
+ tm.assert_frame_equal(actual, expected)
261
+
262
+ actual = grouped.filter(lambda x: len(x) > 4)
263
+ expected = df.loc[[]]
264
+ tm.assert_frame_equal(actual, expected)
265
+
266
+ # Series have always worked properly, but we'll test anyway.
267
+ s = df["B"]
268
+ grouped = s.groupby(s)
269
+ actual = grouped.filter(lambda x: len(x) > 2)
270
+ expected = Series(4 * ["b"], index=np.arange(2, 6, dtype=np.int64), name="B")
271
+ tm.assert_series_equal(actual, expected)
272
+
273
+ actual = grouped.filter(lambda x: len(x) > 4)
274
+ expected = s[[]]
275
+ tm.assert_series_equal(actual, expected)
276
+
277
+
278
+ def test_filter_maintains_ordering():
279
+ # Simple case: index is sequential. #4621
280
+ df = DataFrame(
281
+ {"pid": [1, 1, 1, 2, 2, 3, 3, 3], "tag": [23, 45, 62, 24, 45, 34, 25, 62]}
282
+ )
283
+ s = df["pid"]
284
+ grouped = df.groupby("tag")
285
+ actual = grouped.filter(lambda x: len(x) > 1)
286
+ expected = df.iloc[[1, 2, 4, 7]]
287
+ tm.assert_frame_equal(actual, expected)
288
+
289
+ grouped = s.groupby(df["tag"])
290
+ actual = grouped.filter(lambda x: len(x) > 1)
291
+ expected = s.iloc[[1, 2, 4, 7]]
292
+ tm.assert_series_equal(actual, expected)
293
+
294
+ # Now index is sequentially decreasing.
295
+ df.index = np.arange(len(df) - 1, -1, -1)
296
+ s = df["pid"]
297
+ grouped = df.groupby("tag")
298
+ actual = grouped.filter(lambda x: len(x) > 1)
299
+ expected = df.iloc[[1, 2, 4, 7]]
300
+ tm.assert_frame_equal(actual, expected)
301
+
302
+ grouped = s.groupby(df["tag"])
303
+ actual = grouped.filter(lambda x: len(x) > 1)
304
+ expected = s.iloc[[1, 2, 4, 7]]
305
+ tm.assert_series_equal(actual, expected)
306
+
307
+ # Index is shuffled.
308
+ SHUFFLED = [4, 6, 7, 2, 1, 0, 5, 3]
309
+ df.index = df.index[SHUFFLED]
310
+ s = df["pid"]
311
+ grouped = df.groupby("tag")
312
+ actual = grouped.filter(lambda x: len(x) > 1)
313
+ expected = df.iloc[[1, 2, 4, 7]]
314
+ tm.assert_frame_equal(actual, expected)
315
+
316
+ grouped = s.groupby(df["tag"])
317
+ actual = grouped.filter(lambda x: len(x) > 1)
318
+ expected = s.iloc[[1, 2, 4, 7]]
319
+ tm.assert_series_equal(actual, expected)
320
+
321
+
322
+ def test_filter_multiple_timestamp():
323
+ # GH 10114
324
+ df = DataFrame(
325
+ {
326
+ "A": np.arange(5, dtype="int64"),
327
+ "B": ["foo", "bar", "foo", "bar", "bar"],
328
+ "C": Timestamp("20130101"),
329
+ }
330
+ )
331
+
332
+ grouped = df.groupby(["B", "C"])
333
+
334
+ result = grouped["A"].filter(lambda x: True)
335
+ tm.assert_series_equal(df["A"], result)
336
+
337
+ result = grouped["A"].transform(len)
338
+ expected = Series([2, 3, 2, 3, 3], name="A")
339
+ tm.assert_series_equal(result, expected)
340
+
341
+ result = grouped.filter(lambda x: True)
342
+ tm.assert_frame_equal(df, result)
343
+
344
+ result = grouped.transform("sum")
345
+ expected = DataFrame({"A": [2, 8, 2, 8, 8]})
346
+ tm.assert_frame_equal(result, expected)
347
+
348
+ result = grouped.transform(len)
349
+ expected = DataFrame({"A": [2, 3, 2, 3, 3]})
350
+ tm.assert_frame_equal(result, expected)
351
+
352
+
353
+ def test_filter_and_transform_with_non_unique_int_index():
354
+ # GH4620
355
+ index = [1, 1, 1, 2, 1, 1, 0, 1]
356
+ df = DataFrame(
357
+ {"pid": [1, 1, 1, 2, 2, 3, 3, 3], "tag": [23, 45, 62, 24, 45, 34, 25, 62]},
358
+ index=index,
359
+ )
360
+ grouped_df = df.groupby("tag")
361
+ ser = df["pid"]
362
+ grouped_ser = ser.groupby(df["tag"])
363
+ expected_indexes = [1, 2, 4, 7]
364
+
365
+ # Filter DataFrame
366
+ actual = grouped_df.filter(lambda x: len(x) > 1)
367
+ expected = df.iloc[expected_indexes]
368
+ tm.assert_frame_equal(actual, expected)
369
+
370
+ actual = grouped_df.filter(lambda x: len(x) > 1, dropna=False)
371
+ # Cast to avoid upcast when setting nan below
372
+ expected = df.copy().astype("float64")
373
+ expected.iloc[[0, 3, 5, 6]] = np.nan
374
+ tm.assert_frame_equal(actual, expected)
375
+
376
+ # Filter Series
377
+ actual = grouped_ser.filter(lambda x: len(x) > 1)
378
+ expected = ser.take(expected_indexes)
379
+ tm.assert_series_equal(actual, expected)
380
+
381
+ actual = grouped_ser.filter(lambda x: len(x) > 1, dropna=False)
382
+ expected = Series([np.nan, 1, 1, np.nan, 2, np.nan, np.nan, 3], index, name="pid")
383
+ # ^ made manually because this can get confusing!
384
+ tm.assert_series_equal(actual, expected)
385
+
386
+ # Transform Series
387
+ actual = grouped_ser.transform(len)
388
+ expected = Series([1, 2, 2, 1, 2, 1, 1, 2], index, name="pid")
389
+ tm.assert_series_equal(actual, expected)
390
+
391
+ # Transform (a column from) DataFrameGroupBy
392
+ actual = grouped_df.pid.transform(len)
393
+ tm.assert_series_equal(actual, expected)
394
+
395
+
396
+ def test_filter_and_transform_with_multiple_non_unique_int_index():
397
+ # GH4620
398
+ index = [1, 1, 1, 2, 0, 0, 0, 1]
399
+ df = DataFrame(
400
+ {"pid": [1, 1, 1, 2, 2, 3, 3, 3], "tag": [23, 45, 62, 24, 45, 34, 25, 62]},
401
+ index=index,
402
+ )
403
+ grouped_df = df.groupby("tag")
404
+ ser = df["pid"]
405
+ grouped_ser = ser.groupby(df["tag"])
406
+ expected_indexes = [1, 2, 4, 7]
407
+
408
+ # Filter DataFrame
409
+ actual = grouped_df.filter(lambda x: len(x) > 1)
410
+ expected = df.iloc[expected_indexes]
411
+ tm.assert_frame_equal(actual, expected)
412
+
413
+ actual = grouped_df.filter(lambda x: len(x) > 1, dropna=False)
414
+ # Cast to avoid upcast when setting nan below
415
+ expected = df.copy().astype("float64")
416
+ expected.iloc[[0, 3, 5, 6]] = np.nan
417
+ tm.assert_frame_equal(actual, expected)
418
+
419
+ # Filter Series
420
+ actual = grouped_ser.filter(lambda x: len(x) > 1)
421
+ expected = ser.take(expected_indexes)
422
+ tm.assert_series_equal(actual, expected)
423
+
424
+ actual = grouped_ser.filter(lambda x: len(x) > 1, dropna=False)
425
+ expected = Series([np.nan, 1, 1, np.nan, 2, np.nan, np.nan, 3], index, name="pid")
426
+ # ^ made manually because this can get confusing!
427
+ tm.assert_series_equal(actual, expected)
428
+
429
+ # Transform Series
430
+ actual = grouped_ser.transform(len)
431
+ expected = Series([1, 2, 2, 1, 2, 1, 1, 2], index, name="pid")
432
+ tm.assert_series_equal(actual, expected)
433
+
434
+ # Transform (a column from) DataFrameGroupBy
435
+ actual = grouped_df.pid.transform(len)
436
+ tm.assert_series_equal(actual, expected)
437
+
438
+
439
+ def test_filter_and_transform_with_non_unique_float_index():
440
+ # GH4620
441
+ index = np.array([1, 1, 1, 2, 1, 1, 0, 1], dtype=float)
442
+ df = DataFrame(
443
+ {"pid": [1, 1, 1, 2, 2, 3, 3, 3], "tag": [23, 45, 62, 24, 45, 34, 25, 62]},
444
+ index=index,
445
+ )
446
+ grouped_df = df.groupby("tag")
447
+ ser = df["pid"]
448
+ grouped_ser = ser.groupby(df["tag"])
449
+ expected_indexes = [1, 2, 4, 7]
450
+
451
+ # Filter DataFrame
452
+ actual = grouped_df.filter(lambda x: len(x) > 1)
453
+ expected = df.iloc[expected_indexes]
454
+ tm.assert_frame_equal(actual, expected)
455
+
456
+ actual = grouped_df.filter(lambda x: len(x) > 1, dropna=False)
457
+ # Cast to avoid upcast when setting nan below
458
+ expected = df.copy().astype("float64")
459
+ expected.iloc[[0, 3, 5, 6]] = np.nan
460
+ tm.assert_frame_equal(actual, expected)
461
+
462
+ # Filter Series
463
+ actual = grouped_ser.filter(lambda x: len(x) > 1)
464
+ expected = ser.take(expected_indexes)
465
+ tm.assert_series_equal(actual, expected)
466
+
467
+ actual = grouped_ser.filter(lambda x: len(x) > 1, dropna=False)
468
+ expected = Series([np.nan, 1, 1, np.nan, 2, np.nan, np.nan, 3], index, name="pid")
469
+ # ^ made manually because this can get confusing!
470
+ tm.assert_series_equal(actual, expected)
471
+
472
+ # Transform Series
473
+ actual = grouped_ser.transform(len)
474
+ expected = Series([1, 2, 2, 1, 2, 1, 1, 2], index, name="pid")
475
+ tm.assert_series_equal(actual, expected)
476
+
477
+ # Transform (a column from) DataFrameGroupBy
478
+ actual = grouped_df.pid.transform(len)
479
+ tm.assert_series_equal(actual, expected)
480
+
481
+
482
+ def test_filter_and_transform_with_non_unique_timestamp_index():
483
+ # GH4620
484
+ t0 = Timestamp("2013-09-30 00:05:00")
485
+ t1 = Timestamp("2013-10-30 00:05:00")
486
+ t2 = Timestamp("2013-11-30 00:05:00")
487
+ index = [t1, t1, t1, t2, t1, t1, t0, t1]
488
+ df = DataFrame(
489
+ {"pid": [1, 1, 1, 2, 2, 3, 3, 3], "tag": [23, 45, 62, 24, 45, 34, 25, 62]},
490
+ index=index,
491
+ )
492
+ grouped_df = df.groupby("tag")
493
+ ser = df["pid"]
494
+ grouped_ser = ser.groupby(df["tag"])
495
+ expected_indexes = [1, 2, 4, 7]
496
+
497
+ # Filter DataFrame
498
+ actual = grouped_df.filter(lambda x: len(x) > 1)
499
+ expected = df.iloc[expected_indexes]
500
+ tm.assert_frame_equal(actual, expected)
501
+
502
+ actual = grouped_df.filter(lambda x: len(x) > 1, dropna=False)
503
+ # Cast to avoid upcast when setting nan below
504
+ expected = df.copy().astype("float64")
505
+ expected.iloc[[0, 3, 5, 6]] = np.nan
506
+ tm.assert_frame_equal(actual, expected)
507
+
508
+ # Filter Series
509
+ actual = grouped_ser.filter(lambda x: len(x) > 1)
510
+ expected = ser.take(expected_indexes)
511
+ tm.assert_series_equal(actual, expected)
512
+
513
+ actual = grouped_ser.filter(lambda x: len(x) > 1, dropna=False)
514
+ expected = Series([np.nan, 1, 1, np.nan, 2, np.nan, np.nan, 3], index, name="pid")
515
+ # ^ made manually because this can get confusing!
516
+ tm.assert_series_equal(actual, expected)
517
+
518
+ # Transform Series
519
+ actual = grouped_ser.transform(len)
520
+ expected = Series([1, 2, 2, 1, 2, 1, 1, 2], index, name="pid")
521
+ tm.assert_series_equal(actual, expected)
522
+
523
+ # Transform (a column from) DataFrameGroupBy
524
+ actual = grouped_df.pid.transform(len)
525
+ tm.assert_series_equal(actual, expected)
526
+
527
+
528
+ def test_filter_and_transform_with_non_unique_string_index():
529
+ # GH4620
530
+ index = list("bbbcbbab")
531
+ df = DataFrame(
532
+ {"pid": [1, 1, 1, 2, 2, 3, 3, 3], "tag": [23, 45, 62, 24, 45, 34, 25, 62]},
533
+ index=index,
534
+ )
535
+ grouped_df = df.groupby("tag")
536
+ ser = df["pid"]
537
+ grouped_ser = ser.groupby(df["tag"])
538
+ expected_indexes = [1, 2, 4, 7]
539
+
540
+ # Filter DataFrame
541
+ actual = grouped_df.filter(lambda x: len(x) > 1)
542
+ expected = df.iloc[expected_indexes]
543
+ tm.assert_frame_equal(actual, expected)
544
+
545
+ actual = grouped_df.filter(lambda x: len(x) > 1, dropna=False)
546
+ # Cast to avoid upcast when setting nan below
547
+ expected = df.copy().astype("float64")
548
+ expected.iloc[[0, 3, 5, 6]] = np.nan
549
+ tm.assert_frame_equal(actual, expected)
550
+
551
+ # Filter Series
552
+ actual = grouped_ser.filter(lambda x: len(x) > 1)
553
+ expected = ser.take(expected_indexes)
554
+ tm.assert_series_equal(actual, expected)
555
+
556
+ actual = grouped_ser.filter(lambda x: len(x) > 1, dropna=False)
557
+ expected = Series([np.nan, 1, 1, np.nan, 2, np.nan, np.nan, 3], index, name="pid")
558
+ # ^ made manually because this can get confusing!
559
+ tm.assert_series_equal(actual, expected)
560
+
561
+ # Transform Series
562
+ actual = grouped_ser.transform(len)
563
+ expected = Series([1, 2, 2, 1, 2, 1, 1, 2], index, name="pid")
564
+ tm.assert_series_equal(actual, expected)
565
+
566
+ # Transform (a column from) DataFrameGroupBy
567
+ actual = grouped_df.pid.transform(len)
568
+ tm.assert_series_equal(actual, expected)
569
+
570
+
571
+ def test_filter_has_access_to_grouped_cols():
572
+ df = DataFrame([[1, 2], [1, 3], [5, 6]], columns=["A", "B"])
573
+ g = df.groupby("A")
574
+ # previously didn't have access to col A #????
575
+ filt = g.filter(lambda x: x["A"].sum() == 2)
576
+ tm.assert_frame_equal(filt, df.iloc[[0, 1]])
577
+
578
+
579
+ def test_filter_enforces_scalarness():
580
+ df = DataFrame(
581
+ [
582
+ ["best", "a", "x"],
583
+ ["worst", "b", "y"],
584
+ ["best", "c", "x"],
585
+ ["best", "d", "y"],
586
+ ["worst", "d", "y"],
587
+ ["worst", "d", "y"],
588
+ ["best", "d", "z"],
589
+ ],
590
+ columns=["a", "b", "c"],
591
+ )
592
+ with pytest.raises(TypeError, match="filter function returned a.*"):
593
+ df.groupby("c").filter(lambda g: g["a"] == "best")
594
+
595
+
596
+ def test_filter_non_bool_raises():
597
+ df = DataFrame(
598
+ [
599
+ ["best", "a", 1],
600
+ ["worst", "b", 1],
601
+ ["best", "c", 1],
602
+ ["best", "d", 1],
603
+ ["worst", "d", 1],
604
+ ["worst", "d", 1],
605
+ ["best", "d", 1],
606
+ ],
607
+ columns=["a", "b", "c"],
608
+ )
609
+ with pytest.raises(TypeError, match="filter function returned a.*"):
610
+ df.groupby("a").filter(lambda g: g.c.mean())
611
+
612
+
613
+ def test_filter_dropna_with_empty_groups():
614
+ # GH 10780
615
+ data = Series(np.random.default_rng(2).random(9), index=np.repeat([1, 2, 3], 3))
616
+ grouped = data.groupby(level=0)
617
+ result_false = grouped.filter(lambda x: x.mean() > 1, dropna=False)
618
+ expected_false = Series([np.nan] * 9, index=np.repeat([1, 2, 3], 3))
619
+ tm.assert_series_equal(result_false, expected_false)
620
+
621
+ result_true = grouped.filter(lambda x: x.mean() > 1, dropna=True)
622
+ expected_true = Series(index=pd.Index([], dtype=int), dtype=np.float64)
623
+ tm.assert_series_equal(result_true, expected_true)
624
+
625
+
626
+ def test_filter_consistent_result_before_after_agg_func():
627
+ # GH 17091
628
+ df = DataFrame({"data": range(6), "key": list("ABCABC")})
629
+ grouper = df.groupby("key")
630
+ result = grouper.filter(lambda x: True)
631
+ expected = DataFrame({"data": range(6), "key": list("ABCABC")})
632
+ tm.assert_frame_equal(result, expected)
633
+
634
+ grouper.sum()
635
+ result = grouper.filter(lambda x: True)
636
+ tm.assert_frame_equal(result, expected)
venv/lib/python3.10/site-packages/pandas/tests/groupby/test_groupby.py ADDED
The diff for this file is too large to render. See raw diff
 
venv/lib/python3.10/site-packages/pandas/tests/groupby/test_groupby_dropna.py ADDED
@@ -0,0 +1,696 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import numpy as np
2
+ import pytest
3
+
4
+ from pandas.compat.pyarrow import pa_version_under10p1
5
+
6
+ from pandas.core.dtypes.missing import na_value_for_dtype
7
+
8
+ import pandas as pd
9
+ import pandas._testing as tm
10
+ from pandas.tests.groupby import get_groupby_method_args
11
+
12
+
13
+ @pytest.mark.parametrize(
14
+ "dropna, tuples, outputs",
15
+ [
16
+ (
17
+ True,
18
+ [["A", "B"], ["B", "A"]],
19
+ {"c": [13.0, 123.23], "d": [13.0, 123.0], "e": [13.0, 1.0]},
20
+ ),
21
+ (
22
+ False,
23
+ [["A", "B"], ["A", np.nan], ["B", "A"]],
24
+ {
25
+ "c": [13.0, 12.3, 123.23],
26
+ "d": [13.0, 233.0, 123.0],
27
+ "e": [13.0, 12.0, 1.0],
28
+ },
29
+ ),
30
+ ],
31
+ )
32
+ def test_groupby_dropna_multi_index_dataframe_nan_in_one_group(
33
+ dropna, tuples, outputs, nulls_fixture
34
+ ):
35
+ # GH 3729 this is to test that NA is in one group
36
+ df_list = [
37
+ ["A", "B", 12, 12, 12],
38
+ ["A", nulls_fixture, 12.3, 233.0, 12],
39
+ ["B", "A", 123.23, 123, 1],
40
+ ["A", "B", 1, 1, 1.0],
41
+ ]
42
+ df = pd.DataFrame(df_list, columns=["a", "b", "c", "d", "e"])
43
+ grouped = df.groupby(["a", "b"], dropna=dropna).sum()
44
+
45
+ mi = pd.MultiIndex.from_tuples(tuples, names=list("ab"))
46
+
47
+ # Since right now, by default MI will drop NA from levels when we create MI
48
+ # via `from_*`, so we need to add NA for level manually afterwards.
49
+ if not dropna:
50
+ mi = mi.set_levels(["A", "B", np.nan], level="b")
51
+ expected = pd.DataFrame(outputs, index=mi)
52
+
53
+ tm.assert_frame_equal(grouped, expected)
54
+
55
+
56
+ @pytest.mark.parametrize(
57
+ "dropna, tuples, outputs",
58
+ [
59
+ (
60
+ True,
61
+ [["A", "B"], ["B", "A"]],
62
+ {"c": [12.0, 123.23], "d": [12.0, 123.0], "e": [12.0, 1.0]},
63
+ ),
64
+ (
65
+ False,
66
+ [["A", "B"], ["A", np.nan], ["B", "A"], [np.nan, "B"]],
67
+ {
68
+ "c": [12.0, 13.3, 123.23, 1.0],
69
+ "d": [12.0, 234.0, 123.0, 1.0],
70
+ "e": [12.0, 13.0, 1.0, 1.0],
71
+ },
72
+ ),
73
+ ],
74
+ )
75
+ def test_groupby_dropna_multi_index_dataframe_nan_in_two_groups(
76
+ dropna, tuples, outputs, nulls_fixture, nulls_fixture2
77
+ ):
78
+ # GH 3729 this is to test that NA in different groups with different representations
79
+ df_list = [
80
+ ["A", "B", 12, 12, 12],
81
+ ["A", nulls_fixture, 12.3, 233.0, 12],
82
+ ["B", "A", 123.23, 123, 1],
83
+ [nulls_fixture2, "B", 1, 1, 1.0],
84
+ ["A", nulls_fixture2, 1, 1, 1.0],
85
+ ]
86
+ df = pd.DataFrame(df_list, columns=["a", "b", "c", "d", "e"])
87
+ grouped = df.groupby(["a", "b"], dropna=dropna).sum()
88
+
89
+ mi = pd.MultiIndex.from_tuples(tuples, names=list("ab"))
90
+
91
+ # Since right now, by default MI will drop NA from levels when we create MI
92
+ # via `from_*`, so we need to add NA for level manually afterwards.
93
+ if not dropna:
94
+ mi = mi.set_levels([["A", "B", np.nan], ["A", "B", np.nan]])
95
+ expected = pd.DataFrame(outputs, index=mi)
96
+
97
+ tm.assert_frame_equal(grouped, expected)
98
+
99
+
100
+ @pytest.mark.parametrize(
101
+ "dropna, idx, outputs",
102
+ [
103
+ (True, ["A", "B"], {"b": [123.23, 13.0], "c": [123.0, 13.0], "d": [1.0, 13.0]}),
104
+ (
105
+ False,
106
+ ["A", "B", np.nan],
107
+ {
108
+ "b": [123.23, 13.0, 12.3],
109
+ "c": [123.0, 13.0, 233.0],
110
+ "d": [1.0, 13.0, 12.0],
111
+ },
112
+ ),
113
+ ],
114
+ )
115
+ def test_groupby_dropna_normal_index_dataframe(dropna, idx, outputs):
116
+ # GH 3729
117
+ df_list = [
118
+ ["B", 12, 12, 12],
119
+ [None, 12.3, 233.0, 12],
120
+ ["A", 123.23, 123, 1],
121
+ ["B", 1, 1, 1.0],
122
+ ]
123
+ df = pd.DataFrame(df_list, columns=["a", "b", "c", "d"])
124
+ grouped = df.groupby("a", dropna=dropna).sum()
125
+
126
+ expected = pd.DataFrame(outputs, index=pd.Index(idx, dtype="object", name="a"))
127
+
128
+ tm.assert_frame_equal(grouped, expected)
129
+
130
+
131
+ @pytest.mark.parametrize(
132
+ "dropna, idx, expected",
133
+ [
134
+ (True, ["a", "a", "b", np.nan], pd.Series([3, 3], index=["a", "b"])),
135
+ (
136
+ False,
137
+ ["a", "a", "b", np.nan],
138
+ pd.Series([3, 3, 3], index=["a", "b", np.nan]),
139
+ ),
140
+ ],
141
+ )
142
+ def test_groupby_dropna_series_level(dropna, idx, expected):
143
+ ser = pd.Series([1, 2, 3, 3], index=idx)
144
+
145
+ result = ser.groupby(level=0, dropna=dropna).sum()
146
+ tm.assert_series_equal(result, expected)
147
+
148
+
149
+ @pytest.mark.parametrize(
150
+ "dropna, expected",
151
+ [
152
+ (True, pd.Series([210.0, 350.0], index=["a", "b"], name="Max Speed")),
153
+ (
154
+ False,
155
+ pd.Series([210.0, 350.0, 20.0], index=["a", "b", np.nan], name="Max Speed"),
156
+ ),
157
+ ],
158
+ )
159
+ def test_groupby_dropna_series_by(dropna, expected):
160
+ ser = pd.Series(
161
+ [390.0, 350.0, 30.0, 20.0],
162
+ index=["Falcon", "Falcon", "Parrot", "Parrot"],
163
+ name="Max Speed",
164
+ )
165
+
166
+ result = ser.groupby(["a", "b", "a", np.nan], dropna=dropna).mean()
167
+ tm.assert_series_equal(result, expected)
168
+
169
+
170
+ @pytest.mark.parametrize("dropna", (False, True))
171
+ def test_grouper_dropna_propagation(dropna):
172
+ # GH 36604
173
+ df = pd.DataFrame({"A": [0, 0, 1, None], "B": [1, 2, 3, None]})
174
+ gb = df.groupby("A", dropna=dropna)
175
+ assert gb._grouper.dropna == dropna
176
+
177
+
178
+ @pytest.mark.parametrize(
179
+ "index",
180
+ [
181
+ pd.RangeIndex(0, 4),
182
+ list("abcd"),
183
+ pd.MultiIndex.from_product([(1, 2), ("R", "B")], names=["num", "col"]),
184
+ ],
185
+ )
186
+ def test_groupby_dataframe_slice_then_transform(dropna, index):
187
+ # GH35014 & GH35612
188
+ expected_data = {"B": [2, 2, 1, np.nan if dropna else 1]}
189
+
190
+ df = pd.DataFrame({"A": [0, 0, 1, None], "B": [1, 2, 3, None]}, index=index)
191
+ gb = df.groupby("A", dropna=dropna)
192
+
193
+ result = gb.transform(len)
194
+ expected = pd.DataFrame(expected_data, index=index)
195
+ tm.assert_frame_equal(result, expected)
196
+
197
+ result = gb[["B"]].transform(len)
198
+ expected = pd.DataFrame(expected_data, index=index)
199
+ tm.assert_frame_equal(result, expected)
200
+
201
+ result = gb["B"].transform(len)
202
+ expected = pd.Series(expected_data["B"], index=index, name="B")
203
+ tm.assert_series_equal(result, expected)
204
+
205
+
206
+ @pytest.mark.parametrize(
207
+ "dropna, tuples, outputs",
208
+ [
209
+ (
210
+ True,
211
+ [["A", "B"], ["B", "A"]],
212
+ {"c": [13.0, 123.23], "d": [12.0, 123.0], "e": [1.0, 1.0]},
213
+ ),
214
+ (
215
+ False,
216
+ [["A", "B"], ["A", np.nan], ["B", "A"]],
217
+ {
218
+ "c": [13.0, 12.3, 123.23],
219
+ "d": [12.0, 233.0, 123.0],
220
+ "e": [1.0, 12.0, 1.0],
221
+ },
222
+ ),
223
+ ],
224
+ )
225
+ def test_groupby_dropna_multi_index_dataframe_agg(dropna, tuples, outputs):
226
+ # GH 3729
227
+ df_list = [
228
+ ["A", "B", 12, 12, 12],
229
+ ["A", None, 12.3, 233.0, 12],
230
+ ["B", "A", 123.23, 123, 1],
231
+ ["A", "B", 1, 1, 1.0],
232
+ ]
233
+ df = pd.DataFrame(df_list, columns=["a", "b", "c", "d", "e"])
234
+ agg_dict = {"c": "sum", "d": "max", "e": "min"}
235
+ grouped = df.groupby(["a", "b"], dropna=dropna).agg(agg_dict)
236
+
237
+ mi = pd.MultiIndex.from_tuples(tuples, names=list("ab"))
238
+
239
+ # Since right now, by default MI will drop NA from levels when we create MI
240
+ # via `from_*`, so we need to add NA for level manually afterwards.
241
+ if not dropna:
242
+ mi = mi.set_levels(["A", "B", np.nan], level="b")
243
+ expected = pd.DataFrame(outputs, index=mi)
244
+
245
+ tm.assert_frame_equal(grouped, expected)
246
+
247
+
248
+ @pytest.mark.arm_slow
249
+ @pytest.mark.parametrize(
250
+ "datetime1, datetime2",
251
+ [
252
+ (pd.Timestamp("2020-01-01"), pd.Timestamp("2020-02-01")),
253
+ (pd.Timedelta("-2 days"), pd.Timedelta("-1 days")),
254
+ (pd.Period("2020-01-01"), pd.Period("2020-02-01")),
255
+ ],
256
+ )
257
+ @pytest.mark.parametrize("dropna, values", [(True, [12, 3]), (False, [12, 3, 6])])
258
+ def test_groupby_dropna_datetime_like_data(
259
+ dropna, values, datetime1, datetime2, unique_nulls_fixture, unique_nulls_fixture2
260
+ ):
261
+ # 3729
262
+ df = pd.DataFrame(
263
+ {
264
+ "values": [1, 2, 3, 4, 5, 6],
265
+ "dt": [
266
+ datetime1,
267
+ unique_nulls_fixture,
268
+ datetime2,
269
+ unique_nulls_fixture2,
270
+ datetime1,
271
+ datetime1,
272
+ ],
273
+ }
274
+ )
275
+
276
+ if dropna:
277
+ indexes = [datetime1, datetime2]
278
+ else:
279
+ indexes = [datetime1, datetime2, np.nan]
280
+
281
+ grouped = df.groupby("dt", dropna=dropna).agg({"values": "sum"})
282
+ expected = pd.DataFrame({"values": values}, index=pd.Index(indexes, name="dt"))
283
+
284
+ tm.assert_frame_equal(grouped, expected)
285
+
286
+
287
+ @pytest.mark.parametrize(
288
+ "dropna, data, selected_data, levels",
289
+ [
290
+ pytest.param(
291
+ False,
292
+ {"groups": ["a", "a", "b", np.nan], "values": [10, 10, 20, 30]},
293
+ {"values": [0, 1, 0, 0]},
294
+ ["a", "b", np.nan],
295
+ id="dropna_false_has_nan",
296
+ ),
297
+ pytest.param(
298
+ True,
299
+ {"groups": ["a", "a", "b", np.nan], "values": [10, 10, 20, 30]},
300
+ {"values": [0, 1, 0]},
301
+ None,
302
+ id="dropna_true_has_nan",
303
+ ),
304
+ pytest.param(
305
+ # no nan in "groups"; dropna=True|False should be same.
306
+ False,
307
+ {"groups": ["a", "a", "b", "c"], "values": [10, 10, 20, 30]},
308
+ {"values": [0, 1, 0, 0]},
309
+ None,
310
+ id="dropna_false_no_nan",
311
+ ),
312
+ pytest.param(
313
+ # no nan in "groups"; dropna=True|False should be same.
314
+ True,
315
+ {"groups": ["a", "a", "b", "c"], "values": [10, 10, 20, 30]},
316
+ {"values": [0, 1, 0, 0]},
317
+ None,
318
+ id="dropna_true_no_nan",
319
+ ),
320
+ ],
321
+ )
322
+ def test_groupby_apply_with_dropna_for_multi_index(dropna, data, selected_data, levels):
323
+ # GH 35889
324
+
325
+ df = pd.DataFrame(data)
326
+ gb = df.groupby("groups", dropna=dropna)
327
+ msg = "DataFrameGroupBy.apply operated on the grouping columns"
328
+ with tm.assert_produces_warning(DeprecationWarning, match=msg):
329
+ result = gb.apply(lambda grp: pd.DataFrame({"values": range(len(grp))}))
330
+
331
+ mi_tuples = tuple(zip(data["groups"], selected_data["values"]))
332
+ mi = pd.MultiIndex.from_tuples(mi_tuples, names=["groups", None])
333
+ # Since right now, by default MI will drop NA from levels when we create MI
334
+ # via `from_*`, so we need to add NA for level manually afterwards.
335
+ if not dropna and levels:
336
+ mi = mi.set_levels(levels, level="groups")
337
+
338
+ expected = pd.DataFrame(selected_data, index=mi)
339
+ tm.assert_frame_equal(result, expected)
340
+
341
+
342
+ @pytest.mark.parametrize("input_index", [None, ["a"], ["a", "b"]])
343
+ @pytest.mark.parametrize("keys", [["a"], ["a", "b"]])
344
+ @pytest.mark.parametrize("series", [True, False])
345
+ def test_groupby_dropna_with_multiindex_input(input_index, keys, series):
346
+ # GH#46783
347
+ obj = pd.DataFrame(
348
+ {
349
+ "a": [1, np.nan],
350
+ "b": [1, 1],
351
+ "c": [2, 3],
352
+ }
353
+ )
354
+
355
+ expected = obj.set_index(keys)
356
+ if series:
357
+ expected = expected["c"]
358
+ elif input_index == ["a", "b"] and keys == ["a"]:
359
+ # Column b should not be aggregated
360
+ expected = expected[["c"]]
361
+
362
+ if input_index is not None:
363
+ obj = obj.set_index(input_index)
364
+ gb = obj.groupby(keys, dropna=False)
365
+ if series:
366
+ gb = gb["c"]
367
+ result = gb.sum()
368
+
369
+ tm.assert_equal(result, expected)
370
+
371
+
372
+ def test_groupby_nan_included():
373
+ # GH 35646
374
+ data = {"group": ["g1", np.nan, "g1", "g2", np.nan], "B": [0, 1, 2, 3, 4]}
375
+ df = pd.DataFrame(data)
376
+ grouped = df.groupby("group", dropna=False)
377
+ result = grouped.indices
378
+ dtype = np.intp
379
+ expected = {
380
+ "g1": np.array([0, 2], dtype=dtype),
381
+ "g2": np.array([3], dtype=dtype),
382
+ np.nan: np.array([1, 4], dtype=dtype),
383
+ }
384
+ for result_values, expected_values in zip(result.values(), expected.values()):
385
+ tm.assert_numpy_array_equal(result_values, expected_values)
386
+ assert np.isnan(list(result.keys())[2])
387
+ assert list(result.keys())[0:2] == ["g1", "g2"]
388
+
389
+
390
+ def test_groupby_drop_nan_with_multi_index():
391
+ # GH 39895
392
+ df = pd.DataFrame([[np.nan, 0, 1]], columns=["a", "b", "c"])
393
+ df = df.set_index(["a", "b"])
394
+ result = df.groupby(["a", "b"], dropna=False).first()
395
+ expected = df
396
+ tm.assert_frame_equal(result, expected)
397
+
398
+
399
+ # sequence_index enumerates all strings made up of x, y, z of length 4
400
+ @pytest.mark.parametrize("sequence_index", range(3**4))
401
+ @pytest.mark.parametrize(
402
+ "dtype",
403
+ [
404
+ None,
405
+ "UInt8",
406
+ "Int8",
407
+ "UInt16",
408
+ "Int16",
409
+ "UInt32",
410
+ "Int32",
411
+ "UInt64",
412
+ "Int64",
413
+ "Float32",
414
+ "Int64",
415
+ "Float64",
416
+ "category",
417
+ "string",
418
+ pytest.param(
419
+ "string[pyarrow]",
420
+ marks=pytest.mark.skipif(
421
+ pa_version_under10p1, reason="pyarrow is not installed"
422
+ ),
423
+ ),
424
+ "datetime64[ns]",
425
+ "period[d]",
426
+ "Sparse[float]",
427
+ ],
428
+ )
429
+ @pytest.mark.parametrize("test_series", [True, False])
430
+ def test_no_sort_keep_na(sequence_index, dtype, test_series, as_index):
431
+ # GH#46584, GH#48794
432
+
433
+ # Convert sequence_index into a string sequence, e.g. 5 becomes "xxyz"
434
+ # This sequence is used for the grouper.
435
+ sequence = "".join(
436
+ [{0: "x", 1: "y", 2: "z"}[sequence_index // (3**k) % 3] for k in range(4)]
437
+ )
438
+
439
+ # Unique values to use for grouper, depends on dtype
440
+ if dtype in ("string", "string[pyarrow]"):
441
+ uniques = {"x": "x", "y": "y", "z": pd.NA}
442
+ elif dtype in ("datetime64[ns]", "period[d]"):
443
+ uniques = {"x": "2016-01-01", "y": "2017-01-01", "z": pd.NA}
444
+ else:
445
+ uniques = {"x": 1, "y": 2, "z": np.nan}
446
+
447
+ df = pd.DataFrame(
448
+ {
449
+ "key": pd.Series([uniques[label] for label in sequence], dtype=dtype),
450
+ "a": [0, 1, 2, 3],
451
+ }
452
+ )
453
+ gb = df.groupby("key", dropna=False, sort=False, as_index=as_index, observed=False)
454
+ if test_series:
455
+ gb = gb["a"]
456
+ result = gb.sum()
457
+
458
+ # Manually compute the groupby sum, use the labels "x", "y", and "z" to avoid
459
+ # issues with hashing np.nan
460
+ summed = {}
461
+ for idx, label in enumerate(sequence):
462
+ summed[label] = summed.get(label, 0) + idx
463
+ if dtype == "category":
464
+ index = pd.CategoricalIndex(
465
+ [uniques[e] for e in summed],
466
+ df["key"].cat.categories,
467
+ name="key",
468
+ )
469
+ elif isinstance(dtype, str) and dtype.startswith("Sparse"):
470
+ index = pd.Index(
471
+ pd.array([uniques[label] for label in summed], dtype=dtype), name="key"
472
+ )
473
+ else:
474
+ index = pd.Index([uniques[label] for label in summed], dtype=dtype, name="key")
475
+ expected = pd.Series(summed.values(), index=index, name="a", dtype=None)
476
+ if not test_series:
477
+ expected = expected.to_frame()
478
+ if not as_index:
479
+ expected = expected.reset_index()
480
+ if dtype is not None and dtype.startswith("Sparse"):
481
+ expected["key"] = expected["key"].astype(dtype)
482
+
483
+ tm.assert_equal(result, expected)
484
+
485
+
486
+ @pytest.mark.parametrize("test_series", [True, False])
487
+ @pytest.mark.parametrize("dtype", [object, None])
488
+ def test_null_is_null_for_dtype(
489
+ sort, dtype, nulls_fixture, nulls_fixture2, test_series
490
+ ):
491
+ # GH#48506 - groups should always result in using the null for the dtype
492
+ df = pd.DataFrame({"a": [1, 2]})
493
+ groups = pd.Series([nulls_fixture, nulls_fixture2], dtype=dtype)
494
+ obj = df["a"] if test_series else df
495
+ gb = obj.groupby(groups, dropna=False, sort=sort)
496
+ result = gb.sum()
497
+ index = pd.Index([na_value_for_dtype(groups.dtype)])
498
+ expected = pd.DataFrame({"a": [3]}, index=index)
499
+ if test_series:
500
+ tm.assert_series_equal(result, expected["a"])
501
+ else:
502
+ tm.assert_frame_equal(result, expected)
503
+
504
+
505
+ @pytest.mark.parametrize("index_kind", ["range", "single", "multi"])
506
+ def test_categorical_reducers(reduction_func, observed, sort, as_index, index_kind):
507
+ # Ensure there is at least one null value by appending to the end
508
+ values = np.append(np.random.default_rng(2).choice([1, 2, None], size=19), None)
509
+ df = pd.DataFrame(
510
+ {"x": pd.Categorical(values, categories=[1, 2, 3]), "y": range(20)}
511
+ )
512
+
513
+ # Strategy: Compare to dropna=True by filling null values with a new code
514
+ df_filled = df.copy()
515
+ df_filled["x"] = pd.Categorical(values, categories=[1, 2, 3, 4]).fillna(4)
516
+
517
+ if index_kind == "range":
518
+ keys = ["x"]
519
+ elif index_kind == "single":
520
+ keys = ["x"]
521
+ df = df.set_index("x")
522
+ df_filled = df_filled.set_index("x")
523
+ else:
524
+ keys = ["x", "x2"]
525
+ df["x2"] = df["x"]
526
+ df = df.set_index(["x", "x2"])
527
+ df_filled["x2"] = df_filled["x"]
528
+ df_filled = df_filled.set_index(["x", "x2"])
529
+ args = get_groupby_method_args(reduction_func, df)
530
+ args_filled = get_groupby_method_args(reduction_func, df_filled)
531
+ if reduction_func == "corrwith" and index_kind == "range":
532
+ # Don't include the grouping columns so we can call reset_index
533
+ args = (args[0].drop(columns=keys),)
534
+ args_filled = (args_filled[0].drop(columns=keys),)
535
+
536
+ gb_keepna = df.groupby(
537
+ keys, dropna=False, observed=observed, sort=sort, as_index=as_index
538
+ )
539
+
540
+ if not observed and reduction_func in ["idxmin", "idxmax"]:
541
+ with pytest.raises(
542
+ ValueError, match="empty group due to unobserved categories"
543
+ ):
544
+ getattr(gb_keepna, reduction_func)(*args)
545
+ return
546
+
547
+ gb_filled = df_filled.groupby(keys, observed=observed, sort=sort, as_index=True)
548
+ expected = getattr(gb_filled, reduction_func)(*args_filled).reset_index()
549
+ expected["x"] = expected["x"].cat.remove_categories([4])
550
+ if index_kind == "multi":
551
+ expected["x2"] = expected["x2"].cat.remove_categories([4])
552
+ if as_index:
553
+ if index_kind == "multi":
554
+ expected = expected.set_index(["x", "x2"])
555
+ else:
556
+ expected = expected.set_index("x")
557
+ elif index_kind != "range" and reduction_func != "size":
558
+ # size, unlike other methods, has the desired behavior in GH#49519
559
+ expected = expected.drop(columns="x")
560
+ if index_kind == "multi":
561
+ expected = expected.drop(columns="x2")
562
+ if reduction_func in ("idxmax", "idxmin") and index_kind != "range":
563
+ # expected was computed with a RangeIndex; need to translate to index values
564
+ values = expected["y"].values.tolist()
565
+ if index_kind == "single":
566
+ values = [np.nan if e == 4 else e for e in values]
567
+ expected["y"] = pd.Categorical(values, categories=[1, 2, 3])
568
+ else:
569
+ values = [(np.nan, np.nan) if e == (4, 4) else e for e in values]
570
+ expected["y"] = values
571
+ if reduction_func == "size":
572
+ # size, unlike other methods, has the desired behavior in GH#49519
573
+ expected = expected.rename(columns={0: "size"})
574
+ if as_index:
575
+ expected = expected["size"].rename(None)
576
+
577
+ if as_index or index_kind == "range" or reduction_func == "size":
578
+ warn = None
579
+ else:
580
+ warn = FutureWarning
581
+ msg = "A grouping .* was excluded from the result"
582
+ with tm.assert_produces_warning(warn, match=msg):
583
+ result = getattr(gb_keepna, reduction_func)(*args)
584
+
585
+ # size will return a Series, others are DataFrame
586
+ tm.assert_equal(result, expected)
587
+
588
+
589
+ def test_categorical_transformers(
590
+ request, transformation_func, observed, sort, as_index
591
+ ):
592
+ # GH#36327
593
+ if transformation_func == "fillna":
594
+ msg = "GH#49651 fillna may incorrectly reorders results when dropna=False"
595
+ request.applymarker(pytest.mark.xfail(reason=msg, strict=False))
596
+
597
+ values = np.append(np.random.default_rng(2).choice([1, 2, None], size=19), None)
598
+ df = pd.DataFrame(
599
+ {"x": pd.Categorical(values, categories=[1, 2, 3]), "y": range(20)}
600
+ )
601
+ args = get_groupby_method_args(transformation_func, df)
602
+
603
+ # Compute result for null group
604
+ null_group_values = df[df["x"].isnull()]["y"]
605
+ if transformation_func == "cumcount":
606
+ null_group_data = list(range(len(null_group_values)))
607
+ elif transformation_func == "ngroup":
608
+ if sort:
609
+ if observed:
610
+ na_group = df["x"].nunique(dropna=False) - 1
611
+ else:
612
+ # TODO: Should this be 3?
613
+ na_group = df["x"].nunique(dropna=False) - 1
614
+ else:
615
+ na_group = df.iloc[: null_group_values.index[0]]["x"].nunique()
616
+ null_group_data = len(null_group_values) * [na_group]
617
+ else:
618
+ null_group_data = getattr(null_group_values, transformation_func)(*args)
619
+ null_group_result = pd.DataFrame({"y": null_group_data})
620
+
621
+ gb_keepna = df.groupby(
622
+ "x", dropna=False, observed=observed, sort=sort, as_index=as_index
623
+ )
624
+ gb_dropna = df.groupby("x", dropna=True, observed=observed, sort=sort)
625
+
626
+ msg = "The default fill_method='ffill' in DataFrameGroupBy.pct_change is deprecated"
627
+ if transformation_func == "pct_change":
628
+ with tm.assert_produces_warning(FutureWarning, match=msg):
629
+ result = getattr(gb_keepna, "pct_change")(*args)
630
+ else:
631
+ result = getattr(gb_keepna, transformation_func)(*args)
632
+ expected = getattr(gb_dropna, transformation_func)(*args)
633
+
634
+ for iloc, value in zip(
635
+ df[df["x"].isnull()].index.tolist(), null_group_result.values.ravel()
636
+ ):
637
+ if expected.ndim == 1:
638
+ expected.iloc[iloc] = value
639
+ else:
640
+ expected.iloc[iloc, 0] = value
641
+ if transformation_func == "ngroup":
642
+ expected[df["x"].notnull() & expected.ge(na_group)] += 1
643
+ if transformation_func not in ("rank", "diff", "pct_change", "shift"):
644
+ expected = expected.astype("int64")
645
+
646
+ tm.assert_equal(result, expected)
647
+
648
+
649
+ @pytest.mark.parametrize("method", ["head", "tail"])
650
+ def test_categorical_head_tail(method, observed, sort, as_index):
651
+ # GH#36327
652
+ values = np.random.default_rng(2).choice([1, 2, None], 30)
653
+ df = pd.DataFrame(
654
+ {"x": pd.Categorical(values, categories=[1, 2, 3]), "y": range(len(values))}
655
+ )
656
+ gb = df.groupby("x", dropna=False, observed=observed, sort=sort, as_index=as_index)
657
+ result = getattr(gb, method)()
658
+
659
+ if method == "tail":
660
+ values = values[::-1]
661
+ # Take the top 5 values from each group
662
+ mask = (
663
+ ((values == 1) & ((values == 1).cumsum() <= 5))
664
+ | ((values == 2) & ((values == 2).cumsum() <= 5))
665
+ # flake8 doesn't like the vectorized check for None, thinks we should use `is`
666
+ | ((values == None) & ((values == None).cumsum() <= 5)) # noqa: E711
667
+ )
668
+ if method == "tail":
669
+ mask = mask[::-1]
670
+ expected = df[mask]
671
+
672
+ tm.assert_frame_equal(result, expected)
673
+
674
+
675
+ def test_categorical_agg():
676
+ # GH#36327
677
+ values = np.random.default_rng(2).choice([1, 2, None], 30)
678
+ df = pd.DataFrame(
679
+ {"x": pd.Categorical(values, categories=[1, 2, 3]), "y": range(len(values))}
680
+ )
681
+ gb = df.groupby("x", dropna=False, observed=False)
682
+ result = gb.agg(lambda x: x.sum())
683
+ expected = gb.sum()
684
+ tm.assert_frame_equal(result, expected)
685
+
686
+
687
+ def test_categorical_transform():
688
+ # GH#36327
689
+ values = np.random.default_rng(2).choice([1, 2, None], 30)
690
+ df = pd.DataFrame(
691
+ {"x": pd.Categorical(values, categories=[1, 2, 3]), "y": range(len(values))}
692
+ )
693
+ gb = df.groupby("x", dropna=False, observed=False)
694
+ result = gb.transform(lambda x: x.sum())
695
+ expected = gb.transform("sum")
696
+ tm.assert_frame_equal(result, expected)
venv/lib/python3.10/site-packages/pandas/tests/groupby/test_groupby_subclass.py ADDED
@@ -0,0 +1,135 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from datetime import datetime
2
+
3
+ import numpy as np
4
+ import pytest
5
+
6
+ from pandas import (
7
+ DataFrame,
8
+ Index,
9
+ Series,
10
+ )
11
+ import pandas._testing as tm
12
+ from pandas.tests.groupby import get_groupby_method_args
13
+
14
+ pytestmark = pytest.mark.filterwarnings(
15
+ "ignore:Passing a BlockManager|Passing a SingleBlockManager:DeprecationWarning"
16
+ )
17
+
18
+
19
+ @pytest.mark.parametrize(
20
+ "obj",
21
+ [
22
+ tm.SubclassedDataFrame({"A": np.arange(0, 10)}),
23
+ tm.SubclassedSeries(np.arange(0, 10), name="A"),
24
+ ],
25
+ )
26
+ def test_groupby_preserves_subclass(obj, groupby_func):
27
+ # GH28330 -- preserve subclass through groupby operations
28
+
29
+ if isinstance(obj, Series) and groupby_func in {"corrwith"}:
30
+ pytest.skip(f"Not applicable for Series and {groupby_func}")
31
+
32
+ grouped = obj.groupby(np.arange(0, 10))
33
+
34
+ # Groups should preserve subclass type
35
+ assert isinstance(grouped.get_group(0), type(obj))
36
+
37
+ args = get_groupby_method_args(groupby_func, obj)
38
+
39
+ warn = FutureWarning if groupby_func == "fillna" else None
40
+ msg = f"{type(grouped).__name__}.fillna is deprecated"
41
+ with tm.assert_produces_warning(warn, match=msg, raise_on_extra_warnings=False):
42
+ result1 = getattr(grouped, groupby_func)(*args)
43
+ with tm.assert_produces_warning(warn, match=msg, raise_on_extra_warnings=False):
44
+ result2 = grouped.agg(groupby_func, *args)
45
+
46
+ # Reduction or transformation kernels should preserve type
47
+ slices = {"ngroup", "cumcount", "size"}
48
+ if isinstance(obj, DataFrame) and groupby_func in slices:
49
+ assert isinstance(result1, tm.SubclassedSeries)
50
+ else:
51
+ assert isinstance(result1, type(obj))
52
+
53
+ # Confirm .agg() groupby operations return same results
54
+ if isinstance(result1, DataFrame):
55
+ tm.assert_frame_equal(result1, result2)
56
+ else:
57
+ tm.assert_series_equal(result1, result2)
58
+
59
+
60
+ def test_groupby_preserves_metadata():
61
+ # GH-37343
62
+ custom_df = tm.SubclassedDataFrame({"a": [1, 2, 3], "b": [1, 1, 2], "c": [7, 8, 9]})
63
+ assert "testattr" in custom_df._metadata
64
+ custom_df.testattr = "hello"
65
+ for _, group_df in custom_df.groupby("c"):
66
+ assert group_df.testattr == "hello"
67
+
68
+ # GH-45314
69
+ def func(group):
70
+ assert isinstance(group, tm.SubclassedDataFrame)
71
+ assert hasattr(group, "testattr")
72
+ assert group.testattr == "hello"
73
+ return group.testattr
74
+
75
+ msg = "DataFrameGroupBy.apply operated on the grouping columns"
76
+ with tm.assert_produces_warning(
77
+ DeprecationWarning,
78
+ match=msg,
79
+ raise_on_extra_warnings=False,
80
+ check_stacklevel=False,
81
+ ):
82
+ result = custom_df.groupby("c").apply(func)
83
+ expected = tm.SubclassedSeries(["hello"] * 3, index=Index([7, 8, 9], name="c"))
84
+ tm.assert_series_equal(result, expected)
85
+
86
+ result = custom_df.groupby("c").apply(func, include_groups=False)
87
+ tm.assert_series_equal(result, expected)
88
+
89
+ # https://github.com/pandas-dev/pandas/pull/56761
90
+ result = custom_df.groupby("c")[["a", "b"]].apply(func)
91
+ tm.assert_series_equal(result, expected)
92
+
93
+ def func2(group):
94
+ assert isinstance(group, tm.SubclassedSeries)
95
+ assert hasattr(group, "testattr")
96
+ return group.testattr
97
+
98
+ custom_series = tm.SubclassedSeries([1, 2, 3])
99
+ custom_series.testattr = "hello"
100
+ result = custom_series.groupby(custom_df["c"]).apply(func2)
101
+ tm.assert_series_equal(result, expected)
102
+ result = custom_series.groupby(custom_df["c"]).agg(func2)
103
+ tm.assert_series_equal(result, expected)
104
+
105
+
106
+ @pytest.mark.parametrize("obj", [DataFrame, tm.SubclassedDataFrame])
107
+ def test_groupby_resample_preserves_subclass(obj):
108
+ # GH28330 -- preserve subclass through groupby.resample()
109
+
110
+ df = obj(
111
+ {
112
+ "Buyer": "Carl Carl Carl Carl Joe Carl".split(),
113
+ "Quantity": [18, 3, 5, 1, 9, 3],
114
+ "Date": [
115
+ datetime(2013, 9, 1, 13, 0),
116
+ datetime(2013, 9, 1, 13, 5),
117
+ datetime(2013, 10, 1, 20, 0),
118
+ datetime(2013, 10, 3, 10, 0),
119
+ datetime(2013, 12, 2, 12, 0),
120
+ datetime(2013, 9, 2, 14, 0),
121
+ ],
122
+ }
123
+ )
124
+ df = df.set_index("Date")
125
+
126
+ # Confirm groupby.resample() preserves dataframe type
127
+ msg = "DataFrameGroupBy.resample operated on the grouping columns"
128
+ with tm.assert_produces_warning(
129
+ DeprecationWarning,
130
+ match=msg,
131
+ raise_on_extra_warnings=False,
132
+ check_stacklevel=False,
133
+ ):
134
+ result = df.groupby("Buyer").resample("5D").sum()
135
+ assert isinstance(result, obj)
venv/lib/python3.10/site-packages/pandas/tests/groupby/test_grouping.py ADDED
@@ -0,0 +1,1236 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ test where we are determining what we are grouping, or getting groups
3
+ """
4
+ from datetime import (
5
+ date,
6
+ timedelta,
7
+ )
8
+
9
+ import numpy as np
10
+ import pytest
11
+
12
+ import pandas as pd
13
+ from pandas import (
14
+ CategoricalIndex,
15
+ DataFrame,
16
+ Grouper,
17
+ Index,
18
+ MultiIndex,
19
+ Series,
20
+ Timestamp,
21
+ date_range,
22
+ period_range,
23
+ )
24
+ import pandas._testing as tm
25
+ from pandas.core.groupby.grouper import Grouping
26
+
27
+ # selection
28
+ # --------------------------------
29
+
30
+
31
+ class TestSelection:
32
+ def test_select_bad_cols(self):
33
+ df = DataFrame([[1, 2]], columns=["A", "B"])
34
+ g = df.groupby("A")
35
+ with pytest.raises(KeyError, match="\"Columns not found: 'C'\""):
36
+ g[["C"]]
37
+
38
+ with pytest.raises(KeyError, match="^[^A]+$"):
39
+ # A should not be referenced as a bad column...
40
+ # will have to rethink regex if you change message!
41
+ g[["A", "C"]]
42
+
43
+ def test_groupby_duplicated_column_errormsg(self):
44
+ # GH7511
45
+ df = DataFrame(
46
+ columns=["A", "B", "A", "C"], data=[range(4), range(2, 6), range(0, 8, 2)]
47
+ )
48
+
49
+ msg = "Grouper for 'A' not 1-dimensional"
50
+ with pytest.raises(ValueError, match=msg):
51
+ df.groupby("A")
52
+ with pytest.raises(ValueError, match=msg):
53
+ df.groupby(["A", "B"])
54
+
55
+ grouped = df.groupby("B")
56
+ c = grouped.count()
57
+ assert c.columns.nlevels == 1
58
+ assert c.columns.size == 3
59
+
60
+ def test_column_select_via_attr(self, df):
61
+ result = df.groupby("A").C.sum()
62
+ expected = df.groupby("A")["C"].sum()
63
+ tm.assert_series_equal(result, expected)
64
+
65
+ df["mean"] = 1.5
66
+ result = df.groupby("A").mean(numeric_only=True)
67
+ expected = df.groupby("A")[["C", "D", "mean"]].agg("mean")
68
+ tm.assert_frame_equal(result, expected)
69
+
70
+ def test_getitem_list_of_columns(self):
71
+ df = DataFrame(
72
+ {
73
+ "A": ["foo", "bar", "foo", "bar", "foo", "bar", "foo", "foo"],
74
+ "B": ["one", "one", "two", "three", "two", "two", "one", "three"],
75
+ "C": np.random.default_rng(2).standard_normal(8),
76
+ "D": np.random.default_rng(2).standard_normal(8),
77
+ "E": np.random.default_rng(2).standard_normal(8),
78
+ }
79
+ )
80
+
81
+ result = df.groupby("A")[["C", "D"]].mean()
82
+ result2 = df.groupby("A")[df.columns[2:4]].mean()
83
+
84
+ expected = df.loc[:, ["A", "C", "D"]].groupby("A").mean()
85
+
86
+ tm.assert_frame_equal(result, expected)
87
+ tm.assert_frame_equal(result2, expected)
88
+
89
+ def test_getitem_numeric_column_names(self):
90
+ # GH #13731
91
+ df = DataFrame(
92
+ {
93
+ 0: list("abcd") * 2,
94
+ 2: np.random.default_rng(2).standard_normal(8),
95
+ 4: np.random.default_rng(2).standard_normal(8),
96
+ 6: np.random.default_rng(2).standard_normal(8),
97
+ }
98
+ )
99
+ result = df.groupby(0)[df.columns[1:3]].mean()
100
+ result2 = df.groupby(0)[[2, 4]].mean()
101
+
102
+ expected = df.loc[:, [0, 2, 4]].groupby(0).mean()
103
+
104
+ tm.assert_frame_equal(result, expected)
105
+ tm.assert_frame_equal(result2, expected)
106
+
107
+ # per GH 23566 enforced deprecation raises a ValueError
108
+ with pytest.raises(ValueError, match="Cannot subset columns with a tuple"):
109
+ df.groupby(0)[2, 4].mean()
110
+
111
+ def test_getitem_single_tuple_of_columns_raises(self, df):
112
+ # per GH 23566 enforced deprecation raises a ValueError
113
+ with pytest.raises(ValueError, match="Cannot subset columns with a tuple"):
114
+ df.groupby("A")["C", "D"].mean()
115
+
116
+ def test_getitem_single_column(self):
117
+ df = DataFrame(
118
+ {
119
+ "A": ["foo", "bar", "foo", "bar", "foo", "bar", "foo", "foo"],
120
+ "B": ["one", "one", "two", "three", "two", "two", "one", "three"],
121
+ "C": np.random.default_rng(2).standard_normal(8),
122
+ "D": np.random.default_rng(2).standard_normal(8),
123
+ "E": np.random.default_rng(2).standard_normal(8),
124
+ }
125
+ )
126
+
127
+ result = df.groupby("A")["C"].mean()
128
+
129
+ as_frame = df.loc[:, ["A", "C"]].groupby("A").mean()
130
+ as_series = as_frame.iloc[:, 0]
131
+ expected = as_series
132
+
133
+ tm.assert_series_equal(result, expected)
134
+
135
+ @pytest.mark.parametrize(
136
+ "func", [lambda x: x.sum(), lambda x: x.agg(lambda y: y.sum())]
137
+ )
138
+ def test_getitem_from_grouper(self, func):
139
+ # GH 50383
140
+ df = DataFrame({"a": [1, 1, 2], "b": 3, "c": 4, "d": 5})
141
+ gb = df.groupby(["a", "b"])[["a", "c"]]
142
+
143
+ idx = MultiIndex.from_tuples([(1, 3), (2, 3)], names=["a", "b"])
144
+ expected = DataFrame({"a": [2, 2], "c": [8, 4]}, index=idx)
145
+ result = func(gb)
146
+
147
+ tm.assert_frame_equal(result, expected)
148
+
149
+ def test_indices_grouped_by_tuple_with_lambda(self):
150
+ # GH 36158
151
+ df = DataFrame(
152
+ {
153
+ "Tuples": (
154
+ (x, y)
155
+ for x in [0, 1]
156
+ for y in np.random.default_rng(2).integers(3, 5, 5)
157
+ )
158
+ }
159
+ )
160
+
161
+ gb = df.groupby("Tuples")
162
+ gb_lambda = df.groupby(lambda x: df.iloc[x, 0])
163
+
164
+ expected = gb.indices
165
+ result = gb_lambda.indices
166
+
167
+ tm.assert_dict_equal(result, expected)
168
+
169
+
170
+ # grouping
171
+ # --------------------------------
172
+
173
+
174
+ class TestGrouping:
175
+ @pytest.mark.parametrize(
176
+ "index",
177
+ [
178
+ Index(list("abcde")),
179
+ Index(np.arange(5)),
180
+ Index(np.arange(5, dtype=float)),
181
+ date_range("2020-01-01", periods=5),
182
+ period_range("2020-01-01", periods=5),
183
+ ],
184
+ )
185
+ def test_grouper_index_types(self, index):
186
+ # related GH5375
187
+ # groupby misbehaving when using a Floatlike index
188
+ df = DataFrame(np.arange(10).reshape(5, 2), columns=list("AB"), index=index)
189
+
190
+ df.groupby(list("abcde"), group_keys=False).apply(lambda x: x)
191
+
192
+ df.index = df.index[::-1]
193
+ df.groupby(list("abcde"), group_keys=False).apply(lambda x: x)
194
+
195
+ def test_grouper_multilevel_freq(self):
196
+ # GH 7885
197
+ # with level and freq specified in a Grouper
198
+ d0 = date.today() - timedelta(days=14)
199
+ dates = date_range(d0, date.today())
200
+ date_index = MultiIndex.from_product([dates, dates], names=["foo", "bar"])
201
+ df = DataFrame(np.random.default_rng(2).integers(0, 100, 225), index=date_index)
202
+
203
+ # Check string level
204
+ expected = (
205
+ df.reset_index()
206
+ .groupby([Grouper(key="foo", freq="W"), Grouper(key="bar", freq="W")])
207
+ .sum()
208
+ )
209
+ # reset index changes columns dtype to object
210
+ expected.columns = Index([0], dtype="int64")
211
+
212
+ result = df.groupby(
213
+ [Grouper(level="foo", freq="W"), Grouper(level="bar", freq="W")]
214
+ ).sum()
215
+ tm.assert_frame_equal(result, expected)
216
+
217
+ # Check integer level
218
+ result = df.groupby(
219
+ [Grouper(level=0, freq="W"), Grouper(level=1, freq="W")]
220
+ ).sum()
221
+ tm.assert_frame_equal(result, expected)
222
+
223
+ def test_grouper_creation_bug(self):
224
+ # GH 8795
225
+ df = DataFrame({"A": [0, 0, 1, 1, 2, 2], "B": [1, 2, 3, 4, 5, 6]})
226
+ g = df.groupby("A")
227
+ expected = g.sum()
228
+
229
+ g = df.groupby(Grouper(key="A"))
230
+ result = g.sum()
231
+ tm.assert_frame_equal(result, expected)
232
+
233
+ msg = "Grouper axis keyword is deprecated and will be removed"
234
+ with tm.assert_produces_warning(FutureWarning, match=msg):
235
+ gpr = Grouper(key="A", axis=0)
236
+ g = df.groupby(gpr)
237
+ result = g.sum()
238
+ tm.assert_frame_equal(result, expected)
239
+
240
+ msg = "DataFrameGroupBy.apply operated on the grouping columns"
241
+ with tm.assert_produces_warning(DeprecationWarning, match=msg):
242
+ result = g.apply(lambda x: x.sum())
243
+ expected["A"] = [0, 2, 4]
244
+ expected = expected.loc[:, ["A", "B"]]
245
+ tm.assert_frame_equal(result, expected)
246
+
247
+ def test_grouper_creation_bug2(self):
248
+ # GH14334
249
+ # Grouper(key=...) may be passed in a list
250
+ df = DataFrame(
251
+ {"A": [0, 0, 0, 1, 1, 1], "B": [1, 1, 2, 2, 3, 3], "C": [1, 2, 3, 4, 5, 6]}
252
+ )
253
+ # Group by single column
254
+ expected = df.groupby("A").sum()
255
+ g = df.groupby([Grouper(key="A")])
256
+ result = g.sum()
257
+ tm.assert_frame_equal(result, expected)
258
+
259
+ # Group by two columns
260
+ # using a combination of strings and Grouper objects
261
+ expected = df.groupby(["A", "B"]).sum()
262
+
263
+ # Group with two Grouper objects
264
+ g = df.groupby([Grouper(key="A"), Grouper(key="B")])
265
+ result = g.sum()
266
+ tm.assert_frame_equal(result, expected)
267
+
268
+ # Group with a string and a Grouper object
269
+ g = df.groupby(["A", Grouper(key="B")])
270
+ result = g.sum()
271
+ tm.assert_frame_equal(result, expected)
272
+
273
+ # Group with a Grouper object and a string
274
+ g = df.groupby([Grouper(key="A"), "B"])
275
+ result = g.sum()
276
+ tm.assert_frame_equal(result, expected)
277
+
278
+ def test_grouper_creation_bug3(self, unit):
279
+ # GH8866
280
+ dti = date_range("20130101", periods=2, unit=unit)
281
+ mi = MultiIndex.from_product(
282
+ [list("ab"), range(2), dti],
283
+ names=["one", "two", "three"],
284
+ )
285
+ ser = Series(
286
+ np.arange(8, dtype="int64"),
287
+ index=mi,
288
+ )
289
+ result = ser.groupby(Grouper(level="three", freq="ME")).sum()
290
+ exp_dti = pd.DatetimeIndex(
291
+ [Timestamp("2013-01-31")], freq="ME", name="three"
292
+ ).as_unit(unit)
293
+ expected = Series(
294
+ [28],
295
+ index=exp_dti,
296
+ )
297
+ tm.assert_series_equal(result, expected)
298
+
299
+ # just specifying a level breaks
300
+ result = ser.groupby(Grouper(level="one")).sum()
301
+ expected = ser.groupby(level="one").sum()
302
+ tm.assert_series_equal(result, expected)
303
+
304
+ @pytest.mark.parametrize("func", [False, True])
305
+ def test_grouper_returning_tuples(self, func):
306
+ # GH 22257 , both with dict and with callable
307
+ df = DataFrame({"X": ["A", "B", "A", "B"], "Y": [1, 4, 3, 2]})
308
+ mapping = dict(zip(range(4), [("C", 5), ("D", 6)] * 2))
309
+
310
+ if func:
311
+ gb = df.groupby(by=lambda idx: mapping[idx], sort=False)
312
+ else:
313
+ gb = df.groupby(by=mapping, sort=False)
314
+
315
+ name, expected = next(iter(gb))
316
+ assert name == ("C", 5)
317
+ result = gb.get_group(name)
318
+
319
+ tm.assert_frame_equal(result, expected)
320
+
321
+ def test_grouper_column_and_index(self):
322
+ # GH 14327
323
+
324
+ # Grouping a multi-index frame by a column and an index level should
325
+ # be equivalent to resetting the index and grouping by two columns
326
+ idx = MultiIndex.from_tuples(
327
+ [("a", 1), ("a", 2), ("a", 3), ("b", 1), ("b", 2), ("b", 3)]
328
+ )
329
+ idx.names = ["outer", "inner"]
330
+ df_multi = DataFrame(
331
+ {"A": np.arange(6), "B": ["one", "one", "two", "two", "one", "one"]},
332
+ index=idx,
333
+ )
334
+ result = df_multi.groupby(["B", Grouper(level="inner")]).mean(numeric_only=True)
335
+ expected = (
336
+ df_multi.reset_index().groupby(["B", "inner"]).mean(numeric_only=True)
337
+ )
338
+ tm.assert_frame_equal(result, expected)
339
+
340
+ # Test the reverse grouping order
341
+ result = df_multi.groupby([Grouper(level="inner"), "B"]).mean(numeric_only=True)
342
+ expected = (
343
+ df_multi.reset_index().groupby(["inner", "B"]).mean(numeric_only=True)
344
+ )
345
+ tm.assert_frame_equal(result, expected)
346
+
347
+ # Grouping a single-index frame by a column and the index should
348
+ # be equivalent to resetting the index and grouping by two columns
349
+ df_single = df_multi.reset_index("outer")
350
+ result = df_single.groupby(["B", Grouper(level="inner")]).mean(
351
+ numeric_only=True
352
+ )
353
+ expected = (
354
+ df_single.reset_index().groupby(["B", "inner"]).mean(numeric_only=True)
355
+ )
356
+ tm.assert_frame_equal(result, expected)
357
+
358
+ # Test the reverse grouping order
359
+ result = df_single.groupby([Grouper(level="inner"), "B"]).mean(
360
+ numeric_only=True
361
+ )
362
+ expected = (
363
+ df_single.reset_index().groupby(["inner", "B"]).mean(numeric_only=True)
364
+ )
365
+ tm.assert_frame_equal(result, expected)
366
+
367
+ def test_groupby_levels_and_columns(self):
368
+ # GH9344, GH9049
369
+ idx_names = ["x", "y"]
370
+ idx = MultiIndex.from_tuples([(1, 1), (1, 2), (3, 4), (5, 6)], names=idx_names)
371
+ df = DataFrame(np.arange(12).reshape(-1, 3), index=idx)
372
+
373
+ by_levels = df.groupby(level=idx_names).mean()
374
+ # reset_index changes columns dtype to object
375
+ by_columns = df.reset_index().groupby(idx_names).mean()
376
+
377
+ # without casting, by_columns.columns is object-dtype
378
+ by_columns.columns = by_columns.columns.astype(np.int64)
379
+ tm.assert_frame_equal(by_levels, by_columns)
380
+
381
+ def test_groupby_categorical_index_and_columns(self, observed):
382
+ # GH18432, adapted for GH25871
383
+ columns = ["A", "B", "A", "B"]
384
+ categories = ["B", "A"]
385
+ data = np.array(
386
+ [[1, 2, 1, 2], [1, 2, 1, 2], [1, 2, 1, 2], [1, 2, 1, 2], [1, 2, 1, 2]], int
387
+ )
388
+ cat_columns = CategoricalIndex(columns, categories=categories, ordered=True)
389
+ df = DataFrame(data=data, columns=cat_columns)
390
+ depr_msg = "DataFrame.groupby with axis=1 is deprecated"
391
+ with tm.assert_produces_warning(FutureWarning, match=depr_msg):
392
+ result = df.groupby(axis=1, level=0, observed=observed).sum()
393
+ expected_data = np.array([[4, 2], [4, 2], [4, 2], [4, 2], [4, 2]], int)
394
+ expected_columns = CategoricalIndex(
395
+ categories, categories=categories, ordered=True
396
+ )
397
+ expected = DataFrame(data=expected_data, columns=expected_columns)
398
+ tm.assert_frame_equal(result, expected)
399
+
400
+ # test transposed version
401
+ df = DataFrame(data.T, index=cat_columns)
402
+ msg = "The 'axis' keyword in DataFrame.groupby is deprecated"
403
+ with tm.assert_produces_warning(FutureWarning, match=msg):
404
+ result = df.groupby(axis=0, level=0, observed=observed).sum()
405
+ expected = DataFrame(data=expected_data.T, index=expected_columns)
406
+ tm.assert_frame_equal(result, expected)
407
+
408
+ def test_grouper_getting_correct_binner(self):
409
+ # GH 10063
410
+ # using a non-time-based grouper and a time-based grouper
411
+ # and specifying levels
412
+ df = DataFrame(
413
+ {"A": 1},
414
+ index=MultiIndex.from_product(
415
+ [list("ab"), date_range("20130101", periods=80)], names=["one", "two"]
416
+ ),
417
+ )
418
+ result = df.groupby(
419
+ [Grouper(level="one"), Grouper(level="two", freq="ME")]
420
+ ).sum()
421
+ expected = DataFrame(
422
+ {"A": [31, 28, 21, 31, 28, 21]},
423
+ index=MultiIndex.from_product(
424
+ [list("ab"), date_range("20130101", freq="ME", periods=3)],
425
+ names=["one", "two"],
426
+ ),
427
+ )
428
+ tm.assert_frame_equal(result, expected)
429
+
430
+ def test_grouper_iter(self, df):
431
+ gb = df.groupby("A")
432
+ msg = "DataFrameGroupBy.grouper is deprecated"
433
+ with tm.assert_produces_warning(FutureWarning, match=msg):
434
+ grouper = gb.grouper
435
+ result = sorted(grouper)
436
+ expected = ["bar", "foo"]
437
+ assert result == expected
438
+
439
+ def test_empty_groups(self, df):
440
+ # see gh-1048
441
+ with pytest.raises(ValueError, match="No group keys passed!"):
442
+ df.groupby([])
443
+
444
+ def test_groupby_grouper(self, df):
445
+ grouped = df.groupby("A")
446
+ msg = "DataFrameGroupBy.grouper is deprecated"
447
+ with tm.assert_produces_warning(FutureWarning, match=msg):
448
+ grouper = grouped.grouper
449
+ result = df.groupby(grouper).mean(numeric_only=True)
450
+ expected = grouped.mean(numeric_only=True)
451
+ tm.assert_frame_equal(result, expected)
452
+
453
+ def test_groupby_dict_mapping(self):
454
+ # GH #679
455
+ s = Series({"T1": 5})
456
+ result = s.groupby({"T1": "T2"}).agg("sum")
457
+ expected = s.groupby(["T2"]).agg("sum")
458
+ tm.assert_series_equal(result, expected)
459
+
460
+ s = Series([1.0, 2.0, 3.0, 4.0], index=list("abcd"))
461
+ mapping = {"a": 0, "b": 0, "c": 1, "d": 1}
462
+
463
+ result = s.groupby(mapping).mean()
464
+ result2 = s.groupby(mapping).agg("mean")
465
+ exp_key = np.array([0, 0, 1, 1], dtype=np.int64)
466
+ expected = s.groupby(exp_key).mean()
467
+ expected2 = s.groupby(exp_key).mean()
468
+ tm.assert_series_equal(result, expected)
469
+ tm.assert_series_equal(result, result2)
470
+ tm.assert_series_equal(result, expected2)
471
+
472
+ @pytest.mark.parametrize(
473
+ "index",
474
+ [
475
+ [0, 1, 2, 3],
476
+ ["a", "b", "c", "d"],
477
+ [Timestamp(2021, 7, 28 + i) for i in range(4)],
478
+ ],
479
+ )
480
+ def test_groupby_series_named_with_tuple(self, frame_or_series, index):
481
+ # GH 42731
482
+ obj = frame_or_series([1, 2, 3, 4], index=index)
483
+ groups = Series([1, 0, 1, 0], index=index, name=("a", "a"))
484
+ result = obj.groupby(groups).last()
485
+ expected = frame_or_series([4, 3])
486
+ expected.index.name = ("a", "a")
487
+ tm.assert_equal(result, expected)
488
+
489
+ def test_groupby_grouper_f_sanity_checked(self):
490
+ dates = date_range("01-Jan-2013", periods=12, freq="MS")
491
+ ts = Series(np.random.default_rng(2).standard_normal(12), index=dates)
492
+
493
+ # GH51979
494
+ # simple check that the passed function doesn't operates on the whole index
495
+ msg = "'Timestamp' object is not subscriptable"
496
+ with pytest.raises(TypeError, match=msg):
497
+ ts.groupby(lambda key: key[0:6])
498
+
499
+ result = ts.groupby(lambda x: x).sum()
500
+ expected = ts.groupby(ts.index).sum()
501
+ expected.index.freq = None
502
+ tm.assert_series_equal(result, expected)
503
+
504
+ def test_groupby_with_datetime_key(self):
505
+ # GH 51158
506
+ df = DataFrame(
507
+ {
508
+ "id": ["a", "b"] * 3,
509
+ "b": date_range("2000-01-01", "2000-01-03", freq="9h"),
510
+ }
511
+ )
512
+ grouper = Grouper(key="b", freq="D")
513
+ gb = df.groupby([grouper, "id"])
514
+
515
+ # test number of groups
516
+ expected = {
517
+ (Timestamp("2000-01-01"), "a"): [0, 2],
518
+ (Timestamp("2000-01-01"), "b"): [1],
519
+ (Timestamp("2000-01-02"), "a"): [4],
520
+ (Timestamp("2000-01-02"), "b"): [3, 5],
521
+ }
522
+ tm.assert_dict_equal(gb.groups, expected)
523
+
524
+ # test number of group keys
525
+ assert len(gb.groups.keys()) == 4
526
+
527
+ def test_grouping_error_on_multidim_input(self, df):
528
+ msg = "Grouper for '<class 'pandas.core.frame.DataFrame'>' not 1-dimensional"
529
+ with pytest.raises(ValueError, match=msg):
530
+ Grouping(df.index, df[["A", "A"]])
531
+
532
+ def test_multiindex_passthru(self):
533
+ # GH 7997
534
+ # regression from 0.14.1
535
+ df = DataFrame([[1, 2, 3], [4, 5, 6], [7, 8, 9]])
536
+ df.columns = MultiIndex.from_tuples([(0, 1), (1, 1), (2, 1)])
537
+
538
+ depr_msg = "DataFrame.groupby with axis=1 is deprecated"
539
+ with tm.assert_produces_warning(FutureWarning, match=depr_msg):
540
+ gb = df.groupby(axis=1, level=[0, 1])
541
+ result = gb.first()
542
+ tm.assert_frame_equal(result, df)
543
+
544
+ def test_multiindex_negative_level(self, multiindex_dataframe_random_data):
545
+ # GH 13901
546
+ result = multiindex_dataframe_random_data.groupby(level=-1).sum()
547
+ expected = multiindex_dataframe_random_data.groupby(level="second").sum()
548
+ tm.assert_frame_equal(result, expected)
549
+
550
+ result = multiindex_dataframe_random_data.groupby(level=-2).sum()
551
+ expected = multiindex_dataframe_random_data.groupby(level="first").sum()
552
+ tm.assert_frame_equal(result, expected)
553
+
554
+ result = multiindex_dataframe_random_data.groupby(level=[-2, -1]).sum()
555
+ expected = multiindex_dataframe_random_data.sort_index()
556
+ tm.assert_frame_equal(result, expected)
557
+
558
+ result = multiindex_dataframe_random_data.groupby(level=[-1, "first"]).sum()
559
+ expected = multiindex_dataframe_random_data.groupby(
560
+ level=["second", "first"]
561
+ ).sum()
562
+ tm.assert_frame_equal(result, expected)
563
+
564
+ def test_multifunc_select_col_integer_cols(self, df):
565
+ df.columns = np.arange(len(df.columns))
566
+
567
+ # it works!
568
+ msg = "Passing a dictionary to SeriesGroupBy.agg is deprecated"
569
+ with tm.assert_produces_warning(FutureWarning, match=msg):
570
+ df.groupby(1, as_index=False)[2].agg({"Q": np.mean})
571
+
572
+ def test_multiindex_columns_empty_level(self):
573
+ lst = [["count", "values"], ["to filter", ""]]
574
+ midx = MultiIndex.from_tuples(lst)
575
+
576
+ df = DataFrame([[1, "A"]], columns=midx)
577
+
578
+ grouped = df.groupby("to filter").groups
579
+ assert grouped["A"] == [0]
580
+
581
+ grouped = df.groupby([("to filter", "")]).groups
582
+ assert grouped["A"] == [0]
583
+
584
+ df = DataFrame([[1, "A"], [2, "B"]], columns=midx)
585
+
586
+ expected = df.groupby("to filter").groups
587
+ result = df.groupby([("to filter", "")]).groups
588
+ assert result == expected
589
+
590
+ df = DataFrame([[1, "A"], [2, "A"]], columns=midx)
591
+
592
+ expected = df.groupby("to filter").groups
593
+ result = df.groupby([("to filter", "")]).groups
594
+ tm.assert_dict_equal(result, expected)
595
+
596
+ def test_groupby_multiindex_tuple(self):
597
+ # GH 17979
598
+ df = DataFrame(
599
+ [[1, 2, 3, 4], [3, 4, 5, 6], [1, 4, 2, 3]],
600
+ columns=MultiIndex.from_arrays([["a", "b", "b", "c"], [1, 1, 2, 2]]),
601
+ )
602
+ expected = df.groupby([("b", 1)]).groups
603
+ result = df.groupby(("b", 1)).groups
604
+ tm.assert_dict_equal(expected, result)
605
+
606
+ df2 = DataFrame(
607
+ df.values,
608
+ columns=MultiIndex.from_arrays(
609
+ [["a", "b", "b", "c"], ["d", "d", "e", "e"]]
610
+ ),
611
+ )
612
+ expected = df2.groupby([("b", "d")]).groups
613
+ result = df.groupby(("b", 1)).groups
614
+ tm.assert_dict_equal(expected, result)
615
+
616
+ df3 = DataFrame(df.values, columns=[("a", "d"), ("b", "d"), ("b", "e"), "c"])
617
+ expected = df3.groupby([("b", "d")]).groups
618
+ result = df.groupby(("b", 1)).groups
619
+ tm.assert_dict_equal(expected, result)
620
+
621
+ def test_groupby_multiindex_partial_indexing_equivalence(self):
622
+ # GH 17977
623
+ df = DataFrame(
624
+ [[1, 2, 3, 4], [3, 4, 5, 6], [1, 4, 2, 3]],
625
+ columns=MultiIndex.from_arrays([["a", "b", "b", "c"], [1, 1, 2, 2]]),
626
+ )
627
+
628
+ expected_mean = df.groupby([("a", 1)])[[("b", 1), ("b", 2)]].mean()
629
+ result_mean = df.groupby([("a", 1)])["b"].mean()
630
+ tm.assert_frame_equal(expected_mean, result_mean)
631
+
632
+ expected_sum = df.groupby([("a", 1)])[[("b", 1), ("b", 2)]].sum()
633
+ result_sum = df.groupby([("a", 1)])["b"].sum()
634
+ tm.assert_frame_equal(expected_sum, result_sum)
635
+
636
+ expected_count = df.groupby([("a", 1)])[[("b", 1), ("b", 2)]].count()
637
+ result_count = df.groupby([("a", 1)])["b"].count()
638
+ tm.assert_frame_equal(expected_count, result_count)
639
+
640
+ expected_min = df.groupby([("a", 1)])[[("b", 1), ("b", 2)]].min()
641
+ result_min = df.groupby([("a", 1)])["b"].min()
642
+ tm.assert_frame_equal(expected_min, result_min)
643
+
644
+ expected_max = df.groupby([("a", 1)])[[("b", 1), ("b", 2)]].max()
645
+ result_max = df.groupby([("a", 1)])["b"].max()
646
+ tm.assert_frame_equal(expected_max, result_max)
647
+
648
+ expected_groups = df.groupby([("a", 1)])[[("b", 1), ("b", 2)]].groups
649
+ result_groups = df.groupby([("a", 1)])["b"].groups
650
+ tm.assert_dict_equal(expected_groups, result_groups)
651
+
652
+ @pytest.mark.parametrize("sort", [True, False])
653
+ def test_groupby_level(self, sort, multiindex_dataframe_random_data, df):
654
+ # GH 17537
655
+ frame = multiindex_dataframe_random_data
656
+ deleveled = frame.reset_index()
657
+
658
+ result0 = frame.groupby(level=0, sort=sort).sum()
659
+ result1 = frame.groupby(level=1, sort=sort).sum()
660
+
661
+ expected0 = frame.groupby(deleveled["first"].values, sort=sort).sum()
662
+ expected1 = frame.groupby(deleveled["second"].values, sort=sort).sum()
663
+
664
+ expected0.index.name = "first"
665
+ expected1.index.name = "second"
666
+
667
+ assert result0.index.name == "first"
668
+ assert result1.index.name == "second"
669
+
670
+ tm.assert_frame_equal(result0, expected0)
671
+ tm.assert_frame_equal(result1, expected1)
672
+ assert result0.index.name == frame.index.names[0]
673
+ assert result1.index.name == frame.index.names[1]
674
+
675
+ # groupby level name
676
+ result0 = frame.groupby(level="first", sort=sort).sum()
677
+ result1 = frame.groupby(level="second", sort=sort).sum()
678
+ tm.assert_frame_equal(result0, expected0)
679
+ tm.assert_frame_equal(result1, expected1)
680
+
681
+ # axis=1
682
+ msg = "DataFrame.groupby with axis=1 is deprecated"
683
+ with tm.assert_produces_warning(FutureWarning, match=msg):
684
+ result0 = frame.T.groupby(level=0, axis=1, sort=sort).sum()
685
+ result1 = frame.T.groupby(level=1, axis=1, sort=sort).sum()
686
+ tm.assert_frame_equal(result0, expected0.T)
687
+ tm.assert_frame_equal(result1, expected1.T)
688
+
689
+ # raise exception for non-MultiIndex
690
+ msg = "level > 0 or level < -1 only valid with MultiIndex"
691
+ with pytest.raises(ValueError, match=msg):
692
+ df.groupby(level=1)
693
+
694
+ def test_groupby_level_index_names(self, axis):
695
+ # GH4014 this used to raise ValueError since 'exp'>1 (in py2)
696
+ df = DataFrame({"exp": ["A"] * 3 + ["B"] * 3, "var1": range(6)}).set_index(
697
+ "exp"
698
+ )
699
+ if axis in (1, "columns"):
700
+ df = df.T
701
+ depr_msg = "DataFrame.groupby with axis=1 is deprecated"
702
+ else:
703
+ depr_msg = "The 'axis' keyword in DataFrame.groupby is deprecated"
704
+ with tm.assert_produces_warning(FutureWarning, match=depr_msg):
705
+ df.groupby(level="exp", axis=axis)
706
+ msg = f"level name foo is not the name of the {df._get_axis_name(axis)}"
707
+ with pytest.raises(ValueError, match=msg):
708
+ with tm.assert_produces_warning(FutureWarning, match=depr_msg):
709
+ df.groupby(level="foo", axis=axis)
710
+
711
+ @pytest.mark.parametrize("sort", [True, False])
712
+ def test_groupby_level_with_nas(self, sort):
713
+ # GH 17537
714
+ index = MultiIndex(
715
+ levels=[[1, 0], [0, 1, 2, 3]],
716
+ codes=[[1, 1, 1, 1, 0, 0, 0, 0], [0, 1, 2, 3, 0, 1, 2, 3]],
717
+ )
718
+
719
+ # factorizing doesn't confuse things
720
+ s = Series(np.arange(8.0), index=index)
721
+ result = s.groupby(level=0, sort=sort).sum()
722
+ expected = Series([6.0, 22.0], index=[0, 1])
723
+ tm.assert_series_equal(result, expected)
724
+
725
+ index = MultiIndex(
726
+ levels=[[1, 0], [0, 1, 2, 3]],
727
+ codes=[[1, 1, 1, 1, -1, 0, 0, 0], [0, 1, 2, 3, 0, 1, 2, 3]],
728
+ )
729
+
730
+ # factorizing doesn't confuse things
731
+ s = Series(np.arange(8.0), index=index)
732
+ result = s.groupby(level=0, sort=sort).sum()
733
+ expected = Series([6.0, 18.0], index=[0.0, 1.0])
734
+ tm.assert_series_equal(result, expected)
735
+
736
+ def test_groupby_args(self, multiindex_dataframe_random_data):
737
+ # PR8618 and issue 8015
738
+ frame = multiindex_dataframe_random_data
739
+
740
+ msg = "You have to supply one of 'by' and 'level'"
741
+ with pytest.raises(TypeError, match=msg):
742
+ frame.groupby()
743
+
744
+ msg = "You have to supply one of 'by' and 'level'"
745
+ with pytest.raises(TypeError, match=msg):
746
+ frame.groupby(by=None, level=None)
747
+
748
+ @pytest.mark.parametrize(
749
+ "sort,labels",
750
+ [
751
+ [True, [2, 2, 2, 0, 0, 1, 1, 3, 3, 3]],
752
+ [False, [0, 0, 0, 1, 1, 2, 2, 3, 3, 3]],
753
+ ],
754
+ )
755
+ def test_level_preserve_order(self, sort, labels, multiindex_dataframe_random_data):
756
+ # GH 17537
757
+ grouped = multiindex_dataframe_random_data.groupby(level=0, sort=sort)
758
+ exp_labels = np.array(labels, np.intp)
759
+ tm.assert_almost_equal(grouped._grouper.codes[0], exp_labels)
760
+
761
+ def test_grouping_labels(self, multiindex_dataframe_random_data):
762
+ grouped = multiindex_dataframe_random_data.groupby(
763
+ multiindex_dataframe_random_data.index.get_level_values(0)
764
+ )
765
+ exp_labels = np.array([2, 2, 2, 0, 0, 1, 1, 3, 3, 3], dtype=np.intp)
766
+ tm.assert_almost_equal(grouped._grouper.codes[0], exp_labels)
767
+
768
+ def test_list_grouper_with_nat(self):
769
+ # GH 14715
770
+ df = DataFrame({"date": date_range("1/1/2011", periods=365, freq="D")})
771
+ df.iloc[-1] = pd.NaT
772
+ grouper = Grouper(key="date", freq="YS")
773
+
774
+ # Grouper in a list grouping
775
+ result = df.groupby([grouper])
776
+ expected = {Timestamp("2011-01-01"): Index(list(range(364)))}
777
+ tm.assert_dict_equal(result.groups, expected)
778
+
779
+ # Test case without a list
780
+ result = df.groupby(grouper)
781
+ expected = {Timestamp("2011-01-01"): 365}
782
+ tm.assert_dict_equal(result.groups, expected)
783
+
784
+ @pytest.mark.parametrize(
785
+ "func,expected",
786
+ [
787
+ (
788
+ "transform",
789
+ Series(name=2, dtype=np.float64),
790
+ ),
791
+ (
792
+ "agg",
793
+ Series(
794
+ name=2, dtype=np.float64, index=Index([], dtype=np.float64, name=1)
795
+ ),
796
+ ),
797
+ (
798
+ "apply",
799
+ Series(
800
+ name=2, dtype=np.float64, index=Index([], dtype=np.float64, name=1)
801
+ ),
802
+ ),
803
+ ],
804
+ )
805
+ def test_evaluate_with_empty_groups(self, func, expected):
806
+ # 26208
807
+ # test transform'ing empty groups
808
+ # (not testing other agg fns, because they return
809
+ # different index objects.
810
+ df = DataFrame({1: [], 2: []})
811
+ g = df.groupby(1, group_keys=False)
812
+ result = getattr(g[2], func)(lambda x: x)
813
+ tm.assert_series_equal(result, expected)
814
+
815
+ def test_groupby_empty(self):
816
+ # https://github.com/pandas-dev/pandas/issues/27190
817
+ s = Series([], name="name", dtype="float64")
818
+ gr = s.groupby([])
819
+
820
+ result = gr.mean()
821
+ expected = s.set_axis(Index([], dtype=np.intp))
822
+ tm.assert_series_equal(result, expected)
823
+
824
+ # check group properties
825
+ assert len(gr._grouper.groupings) == 1
826
+ tm.assert_numpy_array_equal(
827
+ gr._grouper.group_info[0], np.array([], dtype=np.dtype(np.intp))
828
+ )
829
+
830
+ tm.assert_numpy_array_equal(
831
+ gr._grouper.group_info[1], np.array([], dtype=np.dtype(np.intp))
832
+ )
833
+
834
+ assert gr._grouper.group_info[2] == 0
835
+
836
+ # check name
837
+ gb = s.groupby(s)
838
+ msg = "SeriesGroupBy.grouper is deprecated"
839
+ with tm.assert_produces_warning(FutureWarning, match=msg):
840
+ grouper = gb.grouper
841
+ result = grouper.names
842
+ expected = ["name"]
843
+ assert result == expected
844
+
845
+ def test_groupby_level_index_value_all_na(self):
846
+ # issue 20519
847
+ df = DataFrame(
848
+ [["x", np.nan, 10], [None, np.nan, 20]], columns=["A", "B", "C"]
849
+ ).set_index(["A", "B"])
850
+ result = df.groupby(level=["A", "B"]).sum()
851
+ expected = DataFrame(
852
+ data=[],
853
+ index=MultiIndex(
854
+ levels=[Index(["x"], dtype="object"), Index([], dtype="float64")],
855
+ codes=[[], []],
856
+ names=["A", "B"],
857
+ ),
858
+ columns=["C"],
859
+ dtype="int64",
860
+ )
861
+ tm.assert_frame_equal(result, expected)
862
+
863
+ def test_groupby_multiindex_level_empty(self):
864
+ # https://github.com/pandas-dev/pandas/issues/31670
865
+ df = DataFrame(
866
+ [[123, "a", 1.0], [123, "b", 2.0]], columns=["id", "category", "value"]
867
+ )
868
+ df = df.set_index(["id", "category"])
869
+ empty = df[df.value < 0]
870
+ result = empty.groupby("id").sum()
871
+ expected = DataFrame(
872
+ dtype="float64",
873
+ columns=["value"],
874
+ index=Index([], dtype=np.int64, name="id"),
875
+ )
876
+ tm.assert_frame_equal(result, expected)
877
+
878
+
879
+ # get_group
880
+ # --------------------------------
881
+
882
+
883
+ class TestGetGroup:
884
+ def test_get_group(self):
885
+ # GH 5267
886
+ # be datelike friendly
887
+ df = DataFrame(
888
+ {
889
+ "DATE": pd.to_datetime(
890
+ [
891
+ "10-Oct-2013",
892
+ "10-Oct-2013",
893
+ "10-Oct-2013",
894
+ "11-Oct-2013",
895
+ "11-Oct-2013",
896
+ "11-Oct-2013",
897
+ ]
898
+ ),
899
+ "label": ["foo", "foo", "bar", "foo", "foo", "bar"],
900
+ "VAL": [1, 2, 3, 4, 5, 6],
901
+ }
902
+ )
903
+
904
+ g = df.groupby("DATE")
905
+ key = next(iter(g.groups))
906
+ result1 = g.get_group(key)
907
+ result2 = g.get_group(Timestamp(key).to_pydatetime())
908
+ result3 = g.get_group(str(Timestamp(key)))
909
+ tm.assert_frame_equal(result1, result2)
910
+ tm.assert_frame_equal(result1, result3)
911
+
912
+ g = df.groupby(["DATE", "label"])
913
+
914
+ key = next(iter(g.groups))
915
+ result1 = g.get_group(key)
916
+ result2 = g.get_group((Timestamp(key[0]).to_pydatetime(), key[1]))
917
+ result3 = g.get_group((str(Timestamp(key[0])), key[1]))
918
+ tm.assert_frame_equal(result1, result2)
919
+ tm.assert_frame_equal(result1, result3)
920
+
921
+ # must pass a same-length tuple with multiple keys
922
+ msg = "must supply a tuple to get_group with multiple grouping keys"
923
+ with pytest.raises(ValueError, match=msg):
924
+ g.get_group("foo")
925
+ with pytest.raises(ValueError, match=msg):
926
+ g.get_group("foo")
927
+ msg = "must supply a same-length tuple to get_group with multiple grouping keys"
928
+ with pytest.raises(ValueError, match=msg):
929
+ g.get_group(("foo", "bar", "baz"))
930
+
931
+ def test_get_group_empty_bins(self, observed):
932
+ d = DataFrame([3, 1, 7, 6])
933
+ bins = [0, 5, 10, 15]
934
+ g = d.groupby(pd.cut(d[0], bins), observed=observed)
935
+
936
+ # TODO: should prob allow a str of Interval work as well
937
+ # IOW '(0, 5]'
938
+ result = g.get_group(pd.Interval(0, 5))
939
+ expected = DataFrame([3, 1], index=[0, 1])
940
+ tm.assert_frame_equal(result, expected)
941
+
942
+ msg = r"Interval\(10, 15, closed='right'\)"
943
+ with pytest.raises(KeyError, match=msg):
944
+ g.get_group(pd.Interval(10, 15))
945
+
946
+ def test_get_group_grouped_by_tuple(self):
947
+ # GH 8121
948
+ df = DataFrame([[(1,), (1, 2), (1,), (1, 2)]], index=["ids"]).T
949
+ gr = df.groupby("ids")
950
+ expected = DataFrame({"ids": [(1,), (1,)]}, index=[0, 2])
951
+ result = gr.get_group((1,))
952
+ tm.assert_frame_equal(result, expected)
953
+
954
+ dt = pd.to_datetime(["2010-01-01", "2010-01-02", "2010-01-01", "2010-01-02"])
955
+ df = DataFrame({"ids": [(x,) for x in dt]})
956
+ gr = df.groupby("ids")
957
+ result = gr.get_group(("2010-01-01",))
958
+ expected = DataFrame({"ids": [(dt[0],), (dt[0],)]}, index=[0, 2])
959
+ tm.assert_frame_equal(result, expected)
960
+
961
+ def test_get_group_grouped_by_tuple_with_lambda(self):
962
+ # GH 36158
963
+ df = DataFrame(
964
+ {
965
+ "Tuples": (
966
+ (x, y)
967
+ for x in [0, 1]
968
+ for y in np.random.default_rng(2).integers(3, 5, 5)
969
+ )
970
+ }
971
+ )
972
+
973
+ gb = df.groupby("Tuples")
974
+ gb_lambda = df.groupby(lambda x: df.iloc[x, 0])
975
+
976
+ expected = gb.get_group(next(iter(gb.groups.keys())))
977
+ result = gb_lambda.get_group(next(iter(gb_lambda.groups.keys())))
978
+
979
+ tm.assert_frame_equal(result, expected)
980
+
981
+ def test_groupby_with_empty(self):
982
+ index = pd.DatetimeIndex(())
983
+ data = ()
984
+ series = Series(data, index, dtype=object)
985
+ grouper = Grouper(freq="D")
986
+ grouped = series.groupby(grouper)
987
+ assert next(iter(grouped), None) is None
988
+
989
+ def test_groupby_with_single_column(self):
990
+ df = DataFrame({"a": list("abssbab")})
991
+ tm.assert_frame_equal(df.groupby("a").get_group("a"), df.iloc[[0, 5]])
992
+ # GH 13530
993
+ exp = DataFrame(index=Index(["a", "b", "s"], name="a"), columns=[])
994
+ tm.assert_frame_equal(df.groupby("a").count(), exp)
995
+ tm.assert_frame_equal(df.groupby("a").sum(), exp)
996
+
997
+ exp = df.iloc[[3, 4, 5]]
998
+ tm.assert_frame_equal(df.groupby("a").nth(1), exp)
999
+
1000
+ def test_gb_key_len_equal_axis_len(self):
1001
+ # GH16843
1002
+ # test ensures that index and column keys are recognized correctly
1003
+ # when number of keys equals axis length of groupby
1004
+ df = DataFrame(
1005
+ [["foo", "bar", "B", 1], ["foo", "bar", "B", 2], ["foo", "baz", "C", 3]],
1006
+ columns=["first", "second", "third", "one"],
1007
+ )
1008
+ df = df.set_index(["first", "second"])
1009
+ df = df.groupby(["first", "second", "third"]).size()
1010
+ assert df.loc[("foo", "bar", "B")] == 2
1011
+ assert df.loc[("foo", "baz", "C")] == 1
1012
+
1013
+
1014
+ # groups & iteration
1015
+ # --------------------------------
1016
+
1017
+
1018
+ class TestIteration:
1019
+ def test_groups(self, df):
1020
+ grouped = df.groupby(["A"])
1021
+ groups = grouped.groups
1022
+ assert groups is grouped.groups # caching works
1023
+
1024
+ for k, v in grouped.groups.items():
1025
+ assert (df.loc[v]["A"] == k).all()
1026
+
1027
+ grouped = df.groupby(["A", "B"])
1028
+ groups = grouped.groups
1029
+ assert groups is grouped.groups # caching works
1030
+
1031
+ for k, v in grouped.groups.items():
1032
+ assert (df.loc[v]["A"] == k[0]).all()
1033
+ assert (df.loc[v]["B"] == k[1]).all()
1034
+
1035
+ def test_grouping_is_iterable(self, tsframe):
1036
+ # this code path isn't used anywhere else
1037
+ # not sure it's useful
1038
+ grouped = tsframe.groupby([lambda x: x.weekday(), lambda x: x.year])
1039
+
1040
+ # test it works
1041
+ for g in grouped._grouper.groupings[0]:
1042
+ pass
1043
+
1044
+ def test_multi_iter(self):
1045
+ s = Series(np.arange(6))
1046
+ k1 = np.array(["a", "a", "a", "b", "b", "b"])
1047
+ k2 = np.array(["1", "2", "1", "2", "1", "2"])
1048
+
1049
+ grouped = s.groupby([k1, k2])
1050
+
1051
+ iterated = list(grouped)
1052
+ expected = [
1053
+ ("a", "1", s[[0, 2]]),
1054
+ ("a", "2", s[[1]]),
1055
+ ("b", "1", s[[4]]),
1056
+ ("b", "2", s[[3, 5]]),
1057
+ ]
1058
+ for i, ((one, two), three) in enumerate(iterated):
1059
+ e1, e2, e3 = expected[i]
1060
+ assert e1 == one
1061
+ assert e2 == two
1062
+ tm.assert_series_equal(three, e3)
1063
+
1064
+ def test_multi_iter_frame(self, three_group):
1065
+ k1 = np.array(["b", "b", "b", "a", "a", "a"])
1066
+ k2 = np.array(["1", "2", "1", "2", "1", "2"])
1067
+ df = DataFrame(
1068
+ {
1069
+ "v1": np.random.default_rng(2).standard_normal(6),
1070
+ "v2": np.random.default_rng(2).standard_normal(6),
1071
+ "k1": k1,
1072
+ "k2": k2,
1073
+ },
1074
+ index=["one", "two", "three", "four", "five", "six"],
1075
+ )
1076
+
1077
+ grouped = df.groupby(["k1", "k2"])
1078
+
1079
+ # things get sorted!
1080
+ iterated = list(grouped)
1081
+ idx = df.index
1082
+ expected = [
1083
+ ("a", "1", df.loc[idx[[4]]]),
1084
+ ("a", "2", df.loc[idx[[3, 5]]]),
1085
+ ("b", "1", df.loc[idx[[0, 2]]]),
1086
+ ("b", "2", df.loc[idx[[1]]]),
1087
+ ]
1088
+ for i, ((one, two), three) in enumerate(iterated):
1089
+ e1, e2, e3 = expected[i]
1090
+ assert e1 == one
1091
+ assert e2 == two
1092
+ tm.assert_frame_equal(three, e3)
1093
+
1094
+ # don't iterate through groups with no data
1095
+ df["k1"] = np.array(["b", "b", "b", "a", "a", "a"])
1096
+ df["k2"] = np.array(["1", "1", "1", "2", "2", "2"])
1097
+ grouped = df.groupby(["k1", "k2"])
1098
+ # calling `dict` on a DataFrameGroupBy leads to a TypeError,
1099
+ # we need to use a dictionary comprehension here
1100
+ # pylint: disable-next=unnecessary-comprehension
1101
+ groups = {key: gp for key, gp in grouped} # noqa: C416
1102
+ assert len(groups) == 2
1103
+
1104
+ # axis = 1
1105
+ three_levels = three_group.groupby(["A", "B", "C"]).mean()
1106
+ depr_msg = "DataFrame.groupby with axis=1 is deprecated"
1107
+ with tm.assert_produces_warning(FutureWarning, match=depr_msg):
1108
+ grouped = three_levels.T.groupby(axis=1, level=(1, 2))
1109
+ for key, group in grouped:
1110
+ pass
1111
+
1112
+ def test_dictify(self, df):
1113
+ dict(iter(df.groupby("A")))
1114
+ dict(iter(df.groupby(["A", "B"])))
1115
+ dict(iter(df["C"].groupby(df["A"])))
1116
+ dict(iter(df["C"].groupby([df["A"], df["B"]])))
1117
+ dict(iter(df.groupby("A")["C"]))
1118
+ dict(iter(df.groupby(["A", "B"])["C"]))
1119
+
1120
+ def test_groupby_with_small_elem(self):
1121
+ # GH 8542
1122
+ # length=2
1123
+ df = DataFrame(
1124
+ {"event": ["start", "start"], "change": [1234, 5678]},
1125
+ index=pd.DatetimeIndex(["2014-09-10", "2013-10-10"]),
1126
+ )
1127
+ grouped = df.groupby([Grouper(freq="ME"), "event"])
1128
+ assert len(grouped.groups) == 2
1129
+ assert grouped.ngroups == 2
1130
+ assert (Timestamp("2014-09-30"), "start") in grouped.groups
1131
+ assert (Timestamp("2013-10-31"), "start") in grouped.groups
1132
+
1133
+ res = grouped.get_group((Timestamp("2014-09-30"), "start"))
1134
+ tm.assert_frame_equal(res, df.iloc[[0], :])
1135
+ res = grouped.get_group((Timestamp("2013-10-31"), "start"))
1136
+ tm.assert_frame_equal(res, df.iloc[[1], :])
1137
+
1138
+ df = DataFrame(
1139
+ {"event": ["start", "start", "start"], "change": [1234, 5678, 9123]},
1140
+ index=pd.DatetimeIndex(["2014-09-10", "2013-10-10", "2014-09-15"]),
1141
+ )
1142
+ grouped = df.groupby([Grouper(freq="ME"), "event"])
1143
+ assert len(grouped.groups) == 2
1144
+ assert grouped.ngroups == 2
1145
+ assert (Timestamp("2014-09-30"), "start") in grouped.groups
1146
+ assert (Timestamp("2013-10-31"), "start") in grouped.groups
1147
+
1148
+ res = grouped.get_group((Timestamp("2014-09-30"), "start"))
1149
+ tm.assert_frame_equal(res, df.iloc[[0, 2], :])
1150
+ res = grouped.get_group((Timestamp("2013-10-31"), "start"))
1151
+ tm.assert_frame_equal(res, df.iloc[[1], :])
1152
+
1153
+ # length=3
1154
+ df = DataFrame(
1155
+ {"event": ["start", "start", "start"], "change": [1234, 5678, 9123]},
1156
+ index=pd.DatetimeIndex(["2014-09-10", "2013-10-10", "2014-08-05"]),
1157
+ )
1158
+ grouped = df.groupby([Grouper(freq="ME"), "event"])
1159
+ assert len(grouped.groups) == 3
1160
+ assert grouped.ngroups == 3
1161
+ assert (Timestamp("2014-09-30"), "start") in grouped.groups
1162
+ assert (Timestamp("2013-10-31"), "start") in grouped.groups
1163
+ assert (Timestamp("2014-08-31"), "start") in grouped.groups
1164
+
1165
+ res = grouped.get_group((Timestamp("2014-09-30"), "start"))
1166
+ tm.assert_frame_equal(res, df.iloc[[0], :])
1167
+ res = grouped.get_group((Timestamp("2013-10-31"), "start"))
1168
+ tm.assert_frame_equal(res, df.iloc[[1], :])
1169
+ res = grouped.get_group((Timestamp("2014-08-31"), "start"))
1170
+ tm.assert_frame_equal(res, df.iloc[[2], :])
1171
+
1172
+ def test_grouping_string_repr(self):
1173
+ # GH 13394
1174
+ mi = MultiIndex.from_arrays([list("AAB"), list("aba")])
1175
+ df = DataFrame([[1, 2, 3]], columns=mi)
1176
+ gr = df.groupby(df[("A", "a")])
1177
+
1178
+ result = gr._grouper.groupings[0].__repr__()
1179
+ expected = "Grouping(('A', 'a'))"
1180
+ assert result == expected
1181
+
1182
+
1183
+ def test_grouping_by_key_is_in_axis():
1184
+ # GH#50413 - Groupers specified by key are in-axis
1185
+ df = DataFrame({"a": [1, 1, 2], "b": [1, 1, 2], "c": [3, 4, 5]}).set_index("a")
1186
+ gb = df.groupby([Grouper(level="a"), Grouper(key="b")], as_index=False)
1187
+ assert not gb._grouper.groupings[0].in_axis
1188
+ assert gb._grouper.groupings[1].in_axis
1189
+
1190
+ # Currently only in-axis groupings are including in the result when as_index=False;
1191
+ # This is likely to change in the future.
1192
+ msg = "A grouping .* was excluded from the result"
1193
+ with tm.assert_produces_warning(FutureWarning, match=msg):
1194
+ result = gb.sum()
1195
+ expected = DataFrame({"b": [1, 2], "c": [7, 5]})
1196
+ tm.assert_frame_equal(result, expected)
1197
+
1198
+
1199
+ def test_grouper_groups():
1200
+ # GH#51182 check Grouper.groups does not raise AttributeError
1201
+ df = DataFrame({"a": [1, 2, 3], "b": 1})
1202
+ grper = Grouper(key="a")
1203
+ gb = df.groupby(grper)
1204
+
1205
+ msg = "Use GroupBy.groups instead"
1206
+ with tm.assert_produces_warning(FutureWarning, match=msg):
1207
+ res = grper.groups
1208
+ assert res is gb.groups
1209
+
1210
+ msg = "Use GroupBy.grouper instead"
1211
+ with tm.assert_produces_warning(FutureWarning, match=msg):
1212
+ res = grper.grouper
1213
+ assert res is gb._grouper
1214
+
1215
+ msg = "Grouper.obj is deprecated and will be removed"
1216
+ with tm.assert_produces_warning(FutureWarning, match=msg):
1217
+ res = grper.obj
1218
+ assert res is gb.obj
1219
+
1220
+ msg = "Use Resampler.ax instead"
1221
+ with tm.assert_produces_warning(FutureWarning, match=msg):
1222
+ grper.ax
1223
+
1224
+ msg = "Grouper.indexer is deprecated"
1225
+ with tm.assert_produces_warning(FutureWarning, match=msg):
1226
+ grper.indexer
1227
+
1228
+
1229
+ @pytest.mark.parametrize("attr", ["group_index", "result_index", "group_arraylike"])
1230
+ def test_depr_grouping_attrs(attr):
1231
+ # GH#56148
1232
+ df = DataFrame({"a": [1, 1, 2], "b": [3, 4, 5]})
1233
+ gb = df.groupby("a")
1234
+ msg = f"{attr} is deprecated"
1235
+ with tm.assert_produces_warning(FutureWarning, match=msg):
1236
+ getattr(gb._grouper.groupings[0], attr)
venv/lib/python3.10/site-packages/pandas/tests/groupby/test_index_as_string.py ADDED
@@ -0,0 +1,85 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import numpy as np
2
+ import pytest
3
+
4
+ import pandas as pd
5
+ import pandas._testing as tm
6
+
7
+
8
+ @pytest.fixture(params=[["inner"], ["inner", "outer"]])
9
+ def frame(request):
10
+ levels = request.param
11
+ df = pd.DataFrame(
12
+ {
13
+ "outer": ["a", "a", "a", "b", "b", "b"],
14
+ "inner": [1, 2, 3, 1, 2, 3],
15
+ "A": np.arange(6),
16
+ "B": ["one", "one", "two", "two", "one", "one"],
17
+ }
18
+ )
19
+ if levels:
20
+ df = df.set_index(levels)
21
+
22
+ return df
23
+
24
+
25
+ @pytest.fixture()
26
+ def series():
27
+ df = pd.DataFrame(
28
+ {
29
+ "outer": ["a", "a", "a", "b", "b", "b"],
30
+ "inner": [1, 2, 3, 1, 2, 3],
31
+ "A": np.arange(6),
32
+ "B": ["one", "one", "two", "two", "one", "one"],
33
+ }
34
+ )
35
+ s = df.set_index(["outer", "inner", "B"])["A"]
36
+
37
+ return s
38
+
39
+
40
+ @pytest.mark.parametrize(
41
+ "key_strs,groupers",
42
+ [
43
+ ("inner", pd.Grouper(level="inner")), # Index name
44
+ (["inner"], [pd.Grouper(level="inner")]), # List of index name
45
+ (["B", "inner"], ["B", pd.Grouper(level="inner")]), # Column and index
46
+ (["inner", "B"], [pd.Grouper(level="inner"), "B"]), # Index and column
47
+ ],
48
+ )
49
+ def test_grouper_index_level_as_string(frame, key_strs, groupers):
50
+ if "B" not in key_strs or "outer" in frame.columns:
51
+ result = frame.groupby(key_strs).mean(numeric_only=True)
52
+ expected = frame.groupby(groupers).mean(numeric_only=True)
53
+ else:
54
+ result = frame.groupby(key_strs).mean()
55
+ expected = frame.groupby(groupers).mean()
56
+ tm.assert_frame_equal(result, expected)
57
+
58
+
59
+ @pytest.mark.parametrize(
60
+ "levels",
61
+ [
62
+ "inner",
63
+ "outer",
64
+ "B",
65
+ ["inner"],
66
+ ["outer"],
67
+ ["B"],
68
+ ["inner", "outer"],
69
+ ["outer", "inner"],
70
+ ["inner", "outer", "B"],
71
+ ["B", "outer", "inner"],
72
+ ],
73
+ )
74
+ def test_grouper_index_level_as_string_series(series, levels):
75
+ # Compute expected result
76
+ if isinstance(levels, list):
77
+ groupers = [pd.Grouper(level=lv) for lv in levels]
78
+ else:
79
+ groupers = pd.Grouper(level=levels)
80
+
81
+ expected = series.groupby(groupers).mean()
82
+
83
+ # Compute and check result
84
+ result = series.groupby(levels).mean()
85
+ tm.assert_series_equal(result, expected)
venv/lib/python3.10/site-packages/pandas/tests/groupby/test_indexing.py ADDED
@@ -0,0 +1,333 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Test GroupBy._positional_selector positional grouped indexing GH#42864
2
+
3
+ import numpy as np
4
+ import pytest
5
+
6
+ import pandas as pd
7
+ import pandas._testing as tm
8
+
9
+
10
+ @pytest.mark.parametrize(
11
+ "arg, expected_rows",
12
+ [
13
+ [0, [0, 1, 4]],
14
+ [2, [5]],
15
+ [5, []],
16
+ [-1, [3, 4, 7]],
17
+ [-2, [1, 6]],
18
+ [-6, []],
19
+ ],
20
+ )
21
+ def test_int(slice_test_df, slice_test_grouped, arg, expected_rows):
22
+ # Test single integer
23
+ result = slice_test_grouped._positional_selector[arg]
24
+ expected = slice_test_df.iloc[expected_rows]
25
+
26
+ tm.assert_frame_equal(result, expected)
27
+
28
+
29
+ def test_slice(slice_test_df, slice_test_grouped):
30
+ # Test single slice
31
+ result = slice_test_grouped._positional_selector[0:3:2]
32
+ expected = slice_test_df.iloc[[0, 1, 4, 5]]
33
+
34
+ tm.assert_frame_equal(result, expected)
35
+
36
+
37
+ @pytest.mark.parametrize(
38
+ "arg, expected_rows",
39
+ [
40
+ [[0, 2], [0, 1, 4, 5]],
41
+ [[0, 2, -1], [0, 1, 3, 4, 5, 7]],
42
+ [range(0, 3, 2), [0, 1, 4, 5]],
43
+ [{0, 2}, [0, 1, 4, 5]],
44
+ ],
45
+ ids=[
46
+ "list",
47
+ "negative",
48
+ "range",
49
+ "set",
50
+ ],
51
+ )
52
+ def test_list(slice_test_df, slice_test_grouped, arg, expected_rows):
53
+ # Test lists of integers and integer valued iterables
54
+ result = slice_test_grouped._positional_selector[arg]
55
+ expected = slice_test_df.iloc[expected_rows]
56
+
57
+ tm.assert_frame_equal(result, expected)
58
+
59
+
60
+ def test_ints(slice_test_df, slice_test_grouped):
61
+ # Test tuple of ints
62
+ result = slice_test_grouped._positional_selector[0, 2, -1]
63
+ expected = slice_test_df.iloc[[0, 1, 3, 4, 5, 7]]
64
+
65
+ tm.assert_frame_equal(result, expected)
66
+
67
+
68
+ def test_slices(slice_test_df, slice_test_grouped):
69
+ # Test tuple of slices
70
+ result = slice_test_grouped._positional_selector[:2, -2:]
71
+ expected = slice_test_df.iloc[[0, 1, 2, 3, 4, 6, 7]]
72
+
73
+ tm.assert_frame_equal(result, expected)
74
+
75
+
76
+ def test_mix(slice_test_df, slice_test_grouped):
77
+ # Test mixed tuple of ints and slices
78
+ result = slice_test_grouped._positional_selector[0, 1, -2:]
79
+ expected = slice_test_df.iloc[[0, 1, 2, 3, 4, 6, 7]]
80
+
81
+ tm.assert_frame_equal(result, expected)
82
+
83
+
84
+ @pytest.mark.parametrize(
85
+ "arg, expected_rows",
86
+ [
87
+ [0, [0, 1, 4]],
88
+ [[0, 2, -1], [0, 1, 3, 4, 5, 7]],
89
+ [(slice(None, 2), slice(-2, None)), [0, 1, 2, 3, 4, 6, 7]],
90
+ ],
91
+ )
92
+ def test_as_index(slice_test_df, arg, expected_rows):
93
+ # Test the default as_index behaviour
94
+ result = slice_test_df.groupby("Group", sort=False)._positional_selector[arg]
95
+ expected = slice_test_df.iloc[expected_rows]
96
+
97
+ tm.assert_frame_equal(result, expected)
98
+
99
+
100
+ def test_doc_examples():
101
+ # Test the examples in the documentation
102
+ df = pd.DataFrame(
103
+ [["a", 1], ["a", 2], ["a", 3], ["b", 4], ["b", 5]], columns=["A", "B"]
104
+ )
105
+
106
+ grouped = df.groupby("A", as_index=False)
107
+
108
+ result = grouped._positional_selector[1:2]
109
+ expected = pd.DataFrame([["a", 2], ["b", 5]], columns=["A", "B"], index=[1, 4])
110
+
111
+ tm.assert_frame_equal(result, expected)
112
+
113
+ result = grouped._positional_selector[1, -1]
114
+ expected = pd.DataFrame(
115
+ [["a", 2], ["a", 3], ["b", 5]], columns=["A", "B"], index=[1, 2, 4]
116
+ )
117
+
118
+ tm.assert_frame_equal(result, expected)
119
+
120
+
121
+ @pytest.fixture()
122
+ def multiindex_data():
123
+ rng = np.random.default_rng(2)
124
+ ndates = 100
125
+ nitems = 20
126
+ dates = pd.date_range("20130101", periods=ndates, freq="D")
127
+ items = [f"item {i}" for i in range(nitems)]
128
+
129
+ data = {}
130
+ for date in dates:
131
+ nitems_for_date = nitems - rng.integers(0, 12)
132
+ levels = [
133
+ (item, rng.integers(0, 10000) / 100, rng.integers(0, 10000) / 100)
134
+ for item in items[:nitems_for_date]
135
+ ]
136
+ levels.sort(key=lambda x: x[1])
137
+ data[date] = levels
138
+
139
+ return data
140
+
141
+
142
+ def _make_df_from_data(data):
143
+ rows = {}
144
+ for date in data:
145
+ for level in data[date]:
146
+ rows[(date, level[0])] = {"A": level[1], "B": level[2]}
147
+
148
+ df = pd.DataFrame.from_dict(rows, orient="index")
149
+ df.index.names = ("Date", "Item")
150
+ return df
151
+
152
+
153
+ def test_multiindex(multiindex_data):
154
+ # Test the multiindex mentioned as the use-case in the documentation
155
+ df = _make_df_from_data(multiindex_data)
156
+ result = df.groupby("Date", as_index=False).nth(slice(3, -3))
157
+
158
+ sliced = {date: multiindex_data[date][3:-3] for date in multiindex_data}
159
+ expected = _make_df_from_data(sliced)
160
+
161
+ tm.assert_frame_equal(result, expected)
162
+
163
+
164
+ @pytest.mark.parametrize("arg", [1, 5, 30, 1000, -1, -5, -30, -1000])
165
+ @pytest.mark.parametrize("method", ["head", "tail"])
166
+ @pytest.mark.parametrize("simulated", [True, False])
167
+ def test_against_head_and_tail(arg, method, simulated):
168
+ # Test gives the same results as grouped head and tail
169
+ n_groups = 100
170
+ n_rows_per_group = 30
171
+
172
+ data = {
173
+ "group": [
174
+ f"group {g}" for j in range(n_rows_per_group) for g in range(n_groups)
175
+ ],
176
+ "value": [
177
+ f"group {g} row {j}"
178
+ for j in range(n_rows_per_group)
179
+ for g in range(n_groups)
180
+ ],
181
+ }
182
+ df = pd.DataFrame(data)
183
+ grouped = df.groupby("group", as_index=False)
184
+ size = arg if arg >= 0 else n_rows_per_group + arg
185
+
186
+ if method == "head":
187
+ result = grouped._positional_selector[:arg]
188
+
189
+ if simulated:
190
+ indices = [
191
+ j * n_groups + i
192
+ for j in range(size)
193
+ for i in range(n_groups)
194
+ if j * n_groups + i < n_groups * n_rows_per_group
195
+ ]
196
+ expected = df.iloc[indices]
197
+
198
+ else:
199
+ expected = grouped.head(arg)
200
+
201
+ else:
202
+ result = grouped._positional_selector[-arg:]
203
+
204
+ if simulated:
205
+ indices = [
206
+ (n_rows_per_group + j - size) * n_groups + i
207
+ for j in range(size)
208
+ for i in range(n_groups)
209
+ if (n_rows_per_group + j - size) * n_groups + i >= 0
210
+ ]
211
+ expected = df.iloc[indices]
212
+
213
+ else:
214
+ expected = grouped.tail(arg)
215
+
216
+ tm.assert_frame_equal(result, expected)
217
+
218
+
219
+ @pytest.mark.parametrize("start", [None, 0, 1, 10, -1, -10])
220
+ @pytest.mark.parametrize("stop", [None, 0, 1, 10, -1, -10])
221
+ @pytest.mark.parametrize("step", [None, 1, 5])
222
+ def test_against_df_iloc(start, stop, step):
223
+ # Test that a single group gives the same results as DataFrame.iloc
224
+ n_rows = 30
225
+
226
+ data = {
227
+ "group": ["group 0"] * n_rows,
228
+ "value": list(range(n_rows)),
229
+ }
230
+ df = pd.DataFrame(data)
231
+ grouped = df.groupby("group", as_index=False)
232
+
233
+ result = grouped._positional_selector[start:stop:step]
234
+ expected = df.iloc[start:stop:step]
235
+
236
+ tm.assert_frame_equal(result, expected)
237
+
238
+
239
+ def test_series():
240
+ # Test grouped Series
241
+ ser = pd.Series([1, 2, 3, 4, 5], index=["a", "a", "a", "b", "b"])
242
+ grouped = ser.groupby(level=0)
243
+ result = grouped._positional_selector[1:2]
244
+ expected = pd.Series([2, 5], index=["a", "b"])
245
+
246
+ tm.assert_series_equal(result, expected)
247
+
248
+
249
+ @pytest.mark.parametrize("step", [1, 2, 3, 4, 5])
250
+ def test_step(step):
251
+ # Test slice with various step values
252
+ data = [["x", f"x{i}"] for i in range(5)]
253
+ data += [["y", f"y{i}"] for i in range(4)]
254
+ data += [["z", f"z{i}"] for i in range(3)]
255
+ df = pd.DataFrame(data, columns=["A", "B"])
256
+
257
+ grouped = df.groupby("A", as_index=False)
258
+
259
+ result = grouped._positional_selector[::step]
260
+
261
+ data = [["x", f"x{i}"] for i in range(0, 5, step)]
262
+ data += [["y", f"y{i}"] for i in range(0, 4, step)]
263
+ data += [["z", f"z{i}"] for i in range(0, 3, step)]
264
+
265
+ index = [0 + i for i in range(0, 5, step)]
266
+ index += [5 + i for i in range(0, 4, step)]
267
+ index += [9 + i for i in range(0, 3, step)]
268
+
269
+ expected = pd.DataFrame(data, columns=["A", "B"], index=index)
270
+
271
+ tm.assert_frame_equal(result, expected)
272
+
273
+
274
+ @pytest.fixture()
275
+ def column_group_df():
276
+ return pd.DataFrame(
277
+ [[0, 1, 2, 3, 4, 5, 6], [0, 0, 1, 0, 1, 0, 2]],
278
+ columns=["A", "B", "C", "D", "E", "F", "G"],
279
+ )
280
+
281
+
282
+ def test_column_axis(column_group_df):
283
+ msg = "DataFrame.groupby with axis=1"
284
+ with tm.assert_produces_warning(FutureWarning, match=msg):
285
+ g = column_group_df.groupby(column_group_df.iloc[1], axis=1)
286
+ result = g._positional_selector[1:-1]
287
+ expected = column_group_df.iloc[:, [1, 3]]
288
+
289
+ tm.assert_frame_equal(result, expected)
290
+
291
+
292
+ def test_columns_on_iter():
293
+ # GitHub issue #44821
294
+ df = pd.DataFrame({k: range(10) for k in "ABC"})
295
+
296
+ # Group-by and select columns
297
+ cols = ["A", "B"]
298
+ for _, dg in df.groupby(df.A < 4)[cols]:
299
+ tm.assert_index_equal(dg.columns, pd.Index(cols))
300
+ assert "C" not in dg.columns
301
+
302
+
303
+ @pytest.mark.parametrize("func", [list, pd.Index, pd.Series, np.array])
304
+ def test_groupby_duplicated_columns(func):
305
+ # GH#44924
306
+ df = pd.DataFrame(
307
+ {
308
+ "A": [1, 2],
309
+ "B": [3, 3],
310
+ "C": ["G", "G"],
311
+ }
312
+ )
313
+ result = df.groupby("C")[func(["A", "B", "A"])].mean()
314
+ expected = pd.DataFrame(
315
+ [[1.5, 3.0, 1.5]], columns=["A", "B", "A"], index=pd.Index(["G"], name="C")
316
+ )
317
+ tm.assert_frame_equal(result, expected)
318
+
319
+
320
+ def test_groupby_get_nonexisting_groups():
321
+ # GH#32492
322
+ df = pd.DataFrame(
323
+ data={
324
+ "A": ["a1", "a2", None],
325
+ "B": ["b1", "b2", "b1"],
326
+ "val": [1, 2, 3],
327
+ }
328
+ )
329
+ grps = df.groupby(by=["A", "B"])
330
+
331
+ msg = "('a2', 'b1')"
332
+ with pytest.raises(KeyError, match=msg):
333
+ grps.get_group(("a2", "b1"))
venv/lib/python3.10/site-packages/pandas/tests/groupby/test_libgroupby.py ADDED
@@ -0,0 +1,331 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import numpy as np
2
+ import pytest
3
+
4
+ from pandas._libs import groupby as libgroupby
5
+ from pandas._libs.groupby import (
6
+ group_cumprod,
7
+ group_cumsum,
8
+ group_mean,
9
+ group_sum,
10
+ group_var,
11
+ )
12
+
13
+ from pandas.core.dtypes.common import ensure_platform_int
14
+
15
+ from pandas import isna
16
+ import pandas._testing as tm
17
+
18
+
19
+ class GroupVarTestMixin:
20
+ def test_group_var_generic_1d(self):
21
+ prng = np.random.default_rng(2)
22
+
23
+ out = (np.nan * np.ones((5, 1))).astype(self.dtype)
24
+ counts = np.zeros(5, dtype="int64")
25
+ values = 10 * prng.random((15, 1)).astype(self.dtype)
26
+ labels = np.tile(np.arange(5), (3,)).astype("intp")
27
+
28
+ expected_out = (
29
+ np.squeeze(values).reshape((5, 3), order="F").std(axis=1, ddof=1) ** 2
30
+ )[:, np.newaxis]
31
+ expected_counts = counts + 3
32
+
33
+ self.algo(out, counts, values, labels)
34
+ assert np.allclose(out, expected_out, self.rtol)
35
+ tm.assert_numpy_array_equal(counts, expected_counts)
36
+
37
+ def test_group_var_generic_1d_flat_labels(self):
38
+ prng = np.random.default_rng(2)
39
+
40
+ out = (np.nan * np.ones((1, 1))).astype(self.dtype)
41
+ counts = np.zeros(1, dtype="int64")
42
+ values = 10 * prng.random((5, 1)).astype(self.dtype)
43
+ labels = np.zeros(5, dtype="intp")
44
+
45
+ expected_out = np.array([[values.std(ddof=1) ** 2]])
46
+ expected_counts = counts + 5
47
+
48
+ self.algo(out, counts, values, labels)
49
+
50
+ assert np.allclose(out, expected_out, self.rtol)
51
+ tm.assert_numpy_array_equal(counts, expected_counts)
52
+
53
+ def test_group_var_generic_2d_all_finite(self):
54
+ prng = np.random.default_rng(2)
55
+
56
+ out = (np.nan * np.ones((5, 2))).astype(self.dtype)
57
+ counts = np.zeros(5, dtype="int64")
58
+ values = 10 * prng.random((10, 2)).astype(self.dtype)
59
+ labels = np.tile(np.arange(5), (2,)).astype("intp")
60
+
61
+ expected_out = np.std(values.reshape(2, 5, 2), ddof=1, axis=0) ** 2
62
+ expected_counts = counts + 2
63
+
64
+ self.algo(out, counts, values, labels)
65
+ assert np.allclose(out, expected_out, self.rtol)
66
+ tm.assert_numpy_array_equal(counts, expected_counts)
67
+
68
+ def test_group_var_generic_2d_some_nan(self):
69
+ prng = np.random.default_rng(2)
70
+
71
+ out = (np.nan * np.ones((5, 2))).astype(self.dtype)
72
+ counts = np.zeros(5, dtype="int64")
73
+ values = 10 * prng.random((10, 2)).astype(self.dtype)
74
+ values[:, 1] = np.nan
75
+ labels = np.tile(np.arange(5), (2,)).astype("intp")
76
+
77
+ expected_out = np.vstack(
78
+ [
79
+ values[:, 0].reshape(5, 2, order="F").std(ddof=1, axis=1) ** 2,
80
+ np.nan * np.ones(5),
81
+ ]
82
+ ).T.astype(self.dtype)
83
+ expected_counts = counts + 2
84
+
85
+ self.algo(out, counts, values, labels)
86
+ tm.assert_almost_equal(out, expected_out, rtol=0.5e-06)
87
+ tm.assert_numpy_array_equal(counts, expected_counts)
88
+
89
+ def test_group_var_constant(self):
90
+ # Regression test from GH 10448.
91
+
92
+ out = np.array([[np.nan]], dtype=self.dtype)
93
+ counts = np.array([0], dtype="int64")
94
+ values = 0.832845131556193 * np.ones((3, 1), dtype=self.dtype)
95
+ labels = np.zeros(3, dtype="intp")
96
+
97
+ self.algo(out, counts, values, labels)
98
+
99
+ assert counts[0] == 3
100
+ assert out[0, 0] >= 0
101
+ tm.assert_almost_equal(out[0, 0], 0.0)
102
+
103
+
104
+ class TestGroupVarFloat64(GroupVarTestMixin):
105
+ __test__ = True
106
+
107
+ algo = staticmethod(group_var)
108
+ dtype = np.float64
109
+ rtol = 1e-5
110
+
111
+ def test_group_var_large_inputs(self):
112
+ prng = np.random.default_rng(2)
113
+
114
+ out = np.array([[np.nan]], dtype=self.dtype)
115
+ counts = np.array([0], dtype="int64")
116
+ values = (prng.random(10**6) + 10**12).astype(self.dtype)
117
+ values.shape = (10**6, 1)
118
+ labels = np.zeros(10**6, dtype="intp")
119
+
120
+ self.algo(out, counts, values, labels)
121
+
122
+ assert counts[0] == 10**6
123
+ tm.assert_almost_equal(out[0, 0], 1.0 / 12, rtol=0.5e-3)
124
+
125
+
126
+ class TestGroupVarFloat32(GroupVarTestMixin):
127
+ __test__ = True
128
+
129
+ algo = staticmethod(group_var)
130
+ dtype = np.float32
131
+ rtol = 1e-2
132
+
133
+
134
+ @pytest.mark.parametrize("dtype", ["float32", "float64"])
135
+ def test_group_ohlc(dtype):
136
+ obj = np.array(np.random.default_rng(2).standard_normal(20), dtype=dtype)
137
+
138
+ bins = np.array([6, 12, 20])
139
+ out = np.zeros((3, 4), dtype)
140
+ counts = np.zeros(len(out), dtype=np.int64)
141
+ labels = ensure_platform_int(np.repeat(np.arange(3), np.diff(np.r_[0, bins])))
142
+
143
+ func = libgroupby.group_ohlc
144
+ func(out, counts, obj[:, None], labels)
145
+
146
+ def _ohlc(group):
147
+ if isna(group).all():
148
+ return np.repeat(np.nan, 4)
149
+ return [group[0], group.max(), group.min(), group[-1]]
150
+
151
+ expected = np.array([_ohlc(obj[:6]), _ohlc(obj[6:12]), _ohlc(obj[12:])])
152
+
153
+ tm.assert_almost_equal(out, expected)
154
+ tm.assert_numpy_array_equal(counts, np.array([6, 6, 8], dtype=np.int64))
155
+
156
+ obj[:6] = np.nan
157
+ func(out, counts, obj[:, None], labels)
158
+ expected[0] = np.nan
159
+ tm.assert_almost_equal(out, expected)
160
+
161
+
162
+ def _check_cython_group_transform_cumulative(pd_op, np_op, dtype):
163
+ """
164
+ Check a group transform that executes a cumulative function.
165
+
166
+ Parameters
167
+ ----------
168
+ pd_op : callable
169
+ The pandas cumulative function.
170
+ np_op : callable
171
+ The analogous one in NumPy.
172
+ dtype : type
173
+ The specified dtype of the data.
174
+ """
175
+ is_datetimelike = False
176
+
177
+ data = np.array([[1], [2], [3], [4]], dtype=dtype)
178
+ answer = np.zeros_like(data)
179
+
180
+ labels = np.array([0, 0, 0, 0], dtype=np.intp)
181
+ ngroups = 1
182
+ pd_op(answer, data, labels, ngroups, is_datetimelike)
183
+
184
+ tm.assert_numpy_array_equal(np_op(data), answer[:, 0], check_dtype=False)
185
+
186
+
187
+ @pytest.mark.parametrize("np_dtype", ["int64", "uint64", "float32", "float64"])
188
+ def test_cython_group_transform_cumsum(np_dtype):
189
+ # see gh-4095
190
+ dtype = np.dtype(np_dtype).type
191
+ pd_op, np_op = group_cumsum, np.cumsum
192
+ _check_cython_group_transform_cumulative(pd_op, np_op, dtype)
193
+
194
+
195
+ def test_cython_group_transform_cumprod():
196
+ # see gh-4095
197
+ dtype = np.float64
198
+ pd_op, np_op = group_cumprod, np.cumprod
199
+ _check_cython_group_transform_cumulative(pd_op, np_op, dtype)
200
+
201
+
202
+ def test_cython_group_transform_algos():
203
+ # see gh-4095
204
+ is_datetimelike = False
205
+
206
+ # with nans
207
+ labels = np.array([0, 0, 0, 0, 0], dtype=np.intp)
208
+ ngroups = 1
209
+
210
+ data = np.array([[1], [2], [3], [np.nan], [4]], dtype="float64")
211
+ actual = np.zeros_like(data)
212
+ actual.fill(np.nan)
213
+ group_cumprod(actual, data, labels, ngroups, is_datetimelike)
214
+ expected = np.array([1, 2, 6, np.nan, 24], dtype="float64")
215
+ tm.assert_numpy_array_equal(actual[:, 0], expected)
216
+
217
+ actual = np.zeros_like(data)
218
+ actual.fill(np.nan)
219
+ group_cumsum(actual, data, labels, ngroups, is_datetimelike)
220
+ expected = np.array([1, 3, 6, np.nan, 10], dtype="float64")
221
+ tm.assert_numpy_array_equal(actual[:, 0], expected)
222
+
223
+ # timedelta
224
+ is_datetimelike = True
225
+ data = np.array([np.timedelta64(1, "ns")] * 5, dtype="m8[ns]")[:, None]
226
+ actual = np.zeros_like(data, dtype="int64")
227
+ group_cumsum(actual, data.view("int64"), labels, ngroups, is_datetimelike)
228
+ expected = np.array(
229
+ [
230
+ np.timedelta64(1, "ns"),
231
+ np.timedelta64(2, "ns"),
232
+ np.timedelta64(3, "ns"),
233
+ np.timedelta64(4, "ns"),
234
+ np.timedelta64(5, "ns"),
235
+ ]
236
+ )
237
+ tm.assert_numpy_array_equal(actual[:, 0].view("m8[ns]"), expected)
238
+
239
+
240
+ def test_cython_group_mean_datetimelike():
241
+ actual = np.zeros(shape=(1, 1), dtype="float64")
242
+ counts = np.array([0], dtype="int64")
243
+ data = (
244
+ np.array(
245
+ [np.timedelta64(2, "ns"), np.timedelta64(4, "ns"), np.timedelta64("NaT")],
246
+ dtype="m8[ns]",
247
+ )[:, None]
248
+ .view("int64")
249
+ .astype("float64")
250
+ )
251
+ labels = np.zeros(len(data), dtype=np.intp)
252
+
253
+ group_mean(actual, counts, data, labels, is_datetimelike=True)
254
+
255
+ tm.assert_numpy_array_equal(actual[:, 0], np.array([3], dtype="float64"))
256
+
257
+
258
+ def test_cython_group_mean_wrong_min_count():
259
+ actual = np.zeros(shape=(1, 1), dtype="float64")
260
+ counts = np.zeros(1, dtype="int64")
261
+ data = np.zeros(1, dtype="float64")[:, None]
262
+ labels = np.zeros(1, dtype=np.intp)
263
+
264
+ with pytest.raises(AssertionError, match="min_count"):
265
+ group_mean(actual, counts, data, labels, is_datetimelike=True, min_count=0)
266
+
267
+
268
+ def test_cython_group_mean_not_datetimelike_but_has_NaT_values():
269
+ actual = np.zeros(shape=(1, 1), dtype="float64")
270
+ counts = np.array([0], dtype="int64")
271
+ data = (
272
+ np.array(
273
+ [np.timedelta64("NaT"), np.timedelta64("NaT")],
274
+ dtype="m8[ns]",
275
+ )[:, None]
276
+ .view("int64")
277
+ .astype("float64")
278
+ )
279
+ labels = np.zeros(len(data), dtype=np.intp)
280
+
281
+ group_mean(actual, counts, data, labels, is_datetimelike=False)
282
+
283
+ tm.assert_numpy_array_equal(
284
+ actual[:, 0], np.array(np.divide(np.add(data[0], data[1]), 2), dtype="float64")
285
+ )
286
+
287
+
288
+ def test_cython_group_mean_Inf_at_begining_and_end():
289
+ # GH 50367
290
+ actual = np.array([[np.nan, np.nan], [np.nan, np.nan]], dtype="float64")
291
+ counts = np.array([0, 0], dtype="int64")
292
+ data = np.array(
293
+ [[np.inf, 1.0], [1.0, 2.0], [2.0, 3.0], [3.0, 4.0], [4.0, 5.0], [5, np.inf]],
294
+ dtype="float64",
295
+ )
296
+ labels = np.array([0, 1, 0, 1, 0, 1], dtype=np.intp)
297
+
298
+ group_mean(actual, counts, data, labels, is_datetimelike=False)
299
+
300
+ expected = np.array([[np.inf, 3], [3, np.inf]], dtype="float64")
301
+
302
+ tm.assert_numpy_array_equal(
303
+ actual,
304
+ expected,
305
+ )
306
+
307
+
308
+ @pytest.mark.parametrize(
309
+ "values, out",
310
+ [
311
+ ([[np.inf], [np.inf], [np.inf]], [[np.inf], [np.inf]]),
312
+ ([[np.inf], [np.inf], [-np.inf]], [[np.inf], [np.nan]]),
313
+ ([[np.inf], [-np.inf], [np.inf]], [[np.inf], [np.nan]]),
314
+ ([[np.inf], [-np.inf], [-np.inf]], [[np.inf], [-np.inf]]),
315
+ ],
316
+ )
317
+ def test_cython_group_sum_Inf_at_begining_and_end(values, out):
318
+ # GH #53606
319
+ actual = np.array([[np.nan], [np.nan]], dtype="float64")
320
+ counts = np.array([0, 0], dtype="int64")
321
+ data = np.array(values, dtype="float64")
322
+ labels = np.array([0, 1, 1], dtype=np.intp)
323
+
324
+ group_sum(actual, counts, data, labels, None, is_datetimelike=False)
325
+
326
+ expected = np.array(out, dtype="float64")
327
+
328
+ tm.assert_numpy_array_equal(
329
+ actual,
330
+ expected,
331
+ )
venv/lib/python3.10/site-packages/pandas/tests/groupby/test_missing.py ADDED
@@ -0,0 +1,163 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import numpy as np
2
+ import pytest
3
+
4
+ import pandas as pd
5
+ from pandas import (
6
+ DataFrame,
7
+ Index,
8
+ date_range,
9
+ )
10
+ import pandas._testing as tm
11
+
12
+
13
+ @pytest.mark.parametrize("func", ["ffill", "bfill"])
14
+ def test_groupby_column_index_name_lost_fill_funcs(func):
15
+ # GH: 29764 groupby loses index sometimes
16
+ df = DataFrame(
17
+ [[1, 1.0, -1.0], [1, np.nan, np.nan], [1, 2.0, -2.0]],
18
+ columns=Index(["type", "a", "b"], name="idx"),
19
+ )
20
+ df_grouped = df.groupby(["type"])[["a", "b"]]
21
+ result = getattr(df_grouped, func)().columns
22
+ expected = Index(["a", "b"], name="idx")
23
+ tm.assert_index_equal(result, expected)
24
+
25
+
26
+ @pytest.mark.parametrize("func", ["ffill", "bfill"])
27
+ def test_groupby_fill_duplicate_column_names(func):
28
+ # GH: 25610 ValueError with duplicate column names
29
+ df1 = DataFrame({"field1": [1, 3, 4], "field2": [1, 3, 4]})
30
+ df2 = DataFrame({"field1": [1, np.nan, 4]})
31
+ df_grouped = pd.concat([df1, df2], axis=1).groupby(by=["field2"])
32
+ expected = DataFrame(
33
+ [[1, 1.0], [3, np.nan], [4, 4.0]], columns=["field1", "field1"]
34
+ )
35
+ result = getattr(df_grouped, func)()
36
+ tm.assert_frame_equal(result, expected)
37
+
38
+
39
+ def test_ffill_missing_arguments():
40
+ # GH 14955
41
+ df = DataFrame({"a": [1, 2], "b": [1, 1]})
42
+ msg = "DataFrameGroupBy.fillna is deprecated"
43
+ with tm.assert_produces_warning(FutureWarning, match=msg):
44
+ with pytest.raises(ValueError, match="Must specify a fill"):
45
+ df.groupby("b").fillna()
46
+
47
+
48
+ @pytest.mark.parametrize(
49
+ "method, expected", [("ffill", [None, "a", "a"]), ("bfill", ["a", "a", None])]
50
+ )
51
+ def test_fillna_with_string_dtype(method, expected):
52
+ # GH 40250
53
+ df = DataFrame({"a": pd.array([None, "a", None], dtype="string"), "b": [0, 0, 0]})
54
+ grp = df.groupby("b")
55
+ msg = "DataFrameGroupBy.fillna is deprecated"
56
+ with tm.assert_produces_warning(FutureWarning, match=msg):
57
+ result = grp.fillna(method=method)
58
+ expected = DataFrame({"a": pd.array(expected, dtype="string")})
59
+ tm.assert_frame_equal(result, expected)
60
+
61
+
62
+ def test_fill_consistency():
63
+ # GH9221
64
+ # pass thru keyword arguments to the generated wrapper
65
+ # are set if the passed kw is None (only)
66
+ df = DataFrame(
67
+ index=pd.MultiIndex.from_product(
68
+ [["value1", "value2"], date_range("2014-01-01", "2014-01-06")]
69
+ ),
70
+ columns=Index(["1", "2"], name="id"),
71
+ )
72
+ df["1"] = [
73
+ np.nan,
74
+ 1,
75
+ np.nan,
76
+ np.nan,
77
+ 11,
78
+ np.nan,
79
+ np.nan,
80
+ 2,
81
+ np.nan,
82
+ np.nan,
83
+ 22,
84
+ np.nan,
85
+ ]
86
+ df["2"] = [
87
+ np.nan,
88
+ 3,
89
+ np.nan,
90
+ np.nan,
91
+ 33,
92
+ np.nan,
93
+ np.nan,
94
+ 4,
95
+ np.nan,
96
+ np.nan,
97
+ 44,
98
+ np.nan,
99
+ ]
100
+
101
+ msg = "The 'axis' keyword in DataFrame.groupby is deprecated"
102
+ with tm.assert_produces_warning(FutureWarning, match=msg):
103
+ expected = df.groupby(level=0, axis=0).fillna(method="ffill")
104
+
105
+ msg = "DataFrame.groupby with axis=1 is deprecated"
106
+ with tm.assert_produces_warning(FutureWarning, match=msg):
107
+ result = df.T.groupby(level=0, axis=1).fillna(method="ffill").T
108
+ tm.assert_frame_equal(result, expected)
109
+
110
+
111
+ @pytest.mark.parametrize("method", ["ffill", "bfill"])
112
+ @pytest.mark.parametrize("dropna", [True, False])
113
+ @pytest.mark.parametrize("has_nan_group", [True, False])
114
+ def test_ffill_handles_nan_groups(dropna, method, has_nan_group):
115
+ # GH 34725
116
+
117
+ df_without_nan_rows = DataFrame([(1, 0.1), (2, 0.2)])
118
+
119
+ ridx = [-1, 0, -1, -1, 1, -1]
120
+ df = df_without_nan_rows.reindex(ridx).reset_index(drop=True)
121
+
122
+ group_b = np.nan if has_nan_group else "b"
123
+ df["group_col"] = pd.Series(["a"] * 3 + [group_b] * 3)
124
+
125
+ grouped = df.groupby(by="group_col", dropna=dropna)
126
+ result = getattr(grouped, method)(limit=None)
127
+
128
+ expected_rows = {
129
+ ("ffill", True, True): [-1, 0, 0, -1, -1, -1],
130
+ ("ffill", True, False): [-1, 0, 0, -1, 1, 1],
131
+ ("ffill", False, True): [-1, 0, 0, -1, 1, 1],
132
+ ("ffill", False, False): [-1, 0, 0, -1, 1, 1],
133
+ ("bfill", True, True): [0, 0, -1, -1, -1, -1],
134
+ ("bfill", True, False): [0, 0, -1, 1, 1, -1],
135
+ ("bfill", False, True): [0, 0, -1, 1, 1, -1],
136
+ ("bfill", False, False): [0, 0, -1, 1, 1, -1],
137
+ }
138
+
139
+ ridx = expected_rows.get((method, dropna, has_nan_group))
140
+ expected = df_without_nan_rows.reindex(ridx).reset_index(drop=True)
141
+ # columns are a 'take' on df.columns, which are object dtype
142
+ expected.columns = expected.columns.astype(object)
143
+
144
+ tm.assert_frame_equal(result, expected)
145
+
146
+
147
+ @pytest.mark.parametrize("min_count, value", [(2, np.nan), (-1, 1.0)])
148
+ @pytest.mark.parametrize("func", ["first", "last", "max", "min"])
149
+ def test_min_count(func, min_count, value):
150
+ # GH#37821
151
+ df = DataFrame({"a": [1] * 3, "b": [1, np.nan, np.nan], "c": [np.nan] * 3})
152
+ result = getattr(df.groupby("a"), func)(min_count=min_count)
153
+ expected = DataFrame({"b": [value], "c": [np.nan]}, index=Index([1], name="a"))
154
+ tm.assert_frame_equal(result, expected)
155
+
156
+
157
+ def test_indices_with_missing():
158
+ # GH 9304
159
+ df = DataFrame({"a": [1, 1, np.nan], "b": [2, 3, 4], "c": [5, 6, 7]})
160
+ g = df.groupby(["a", "b"])
161
+ result = g.indices
162
+ expected = {(1.0, 2): np.array([0]), (1.0, 3): np.array([1])}
163
+ assert result == expected
venv/lib/python3.10/site-packages/pandas/tests/groupby/test_numba.py ADDED
@@ -0,0 +1,80 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import pytest
2
+
3
+ from pandas import (
4
+ DataFrame,
5
+ Series,
6
+ option_context,
7
+ )
8
+ import pandas._testing as tm
9
+
10
+ pytestmark = pytest.mark.single_cpu
11
+
12
+ pytest.importorskip("numba")
13
+
14
+
15
+ @pytest.mark.filterwarnings("ignore")
16
+ # Filter warnings when parallel=True and the function can't be parallelized by Numba
17
+ class TestEngine:
18
+ def test_cython_vs_numba_frame(
19
+ self, sort, nogil, parallel, nopython, numba_supported_reductions
20
+ ):
21
+ func, kwargs = numba_supported_reductions
22
+ df = DataFrame({"a": [3, 2, 3, 2], "b": range(4), "c": range(1, 5)})
23
+ engine_kwargs = {"nogil": nogil, "parallel": parallel, "nopython": nopython}
24
+ gb = df.groupby("a", sort=sort)
25
+ result = getattr(gb, func)(
26
+ engine="numba", engine_kwargs=engine_kwargs, **kwargs
27
+ )
28
+ expected = getattr(gb, func)(**kwargs)
29
+ tm.assert_frame_equal(result, expected)
30
+
31
+ def test_cython_vs_numba_getitem(
32
+ self, sort, nogil, parallel, nopython, numba_supported_reductions
33
+ ):
34
+ func, kwargs = numba_supported_reductions
35
+ df = DataFrame({"a": [3, 2, 3, 2], "b": range(4), "c": range(1, 5)})
36
+ engine_kwargs = {"nogil": nogil, "parallel": parallel, "nopython": nopython}
37
+ gb = df.groupby("a", sort=sort)["c"]
38
+ result = getattr(gb, func)(
39
+ engine="numba", engine_kwargs=engine_kwargs, **kwargs
40
+ )
41
+ expected = getattr(gb, func)(**kwargs)
42
+ tm.assert_series_equal(result, expected)
43
+
44
+ def test_cython_vs_numba_series(
45
+ self, sort, nogil, parallel, nopython, numba_supported_reductions
46
+ ):
47
+ func, kwargs = numba_supported_reductions
48
+ ser = Series(range(3), index=[1, 2, 1], name="foo")
49
+ engine_kwargs = {"nogil": nogil, "parallel": parallel, "nopython": nopython}
50
+ gb = ser.groupby(level=0, sort=sort)
51
+ result = getattr(gb, func)(
52
+ engine="numba", engine_kwargs=engine_kwargs, **kwargs
53
+ )
54
+ expected = getattr(gb, func)(**kwargs)
55
+ tm.assert_series_equal(result, expected)
56
+
57
+ def test_as_index_false_unsupported(self, numba_supported_reductions):
58
+ func, kwargs = numba_supported_reductions
59
+ df = DataFrame({"a": [3, 2, 3, 2], "b": range(4), "c": range(1, 5)})
60
+ gb = df.groupby("a", as_index=False)
61
+ with pytest.raises(NotImplementedError, match="as_index=False"):
62
+ getattr(gb, func)(engine="numba", **kwargs)
63
+
64
+ def test_axis_1_unsupported(self, numba_supported_reductions):
65
+ func, kwargs = numba_supported_reductions
66
+ df = DataFrame({"a": [3, 2, 3, 2], "b": range(4), "c": range(1, 5)})
67
+ gb = df.groupby("a", axis=1)
68
+ with pytest.raises(NotImplementedError, match="axis=1"):
69
+ getattr(gb, func)(engine="numba", **kwargs)
70
+
71
+ def test_no_engine_doesnt_raise(self):
72
+ # GH55520
73
+ df = DataFrame({"a": [3, 2, 3, 2], "b": range(4), "c": range(1, 5)})
74
+ gb = df.groupby("a")
75
+ # Make sure behavior of functions w/out engine argument don't raise
76
+ # when the global use_numba option is set
77
+ with option_context("compute.use_numba", True):
78
+ res = gb.agg({"b": "first"})
79
+ expected = gb.agg({"b": "first"})
80
+ tm.assert_frame_equal(res, expected)
venv/lib/python3.10/site-packages/pandas/tests/groupby/test_numeric_only.py ADDED
@@ -0,0 +1,521 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import re
2
+
3
+ import numpy as np
4
+ import pytest
5
+
6
+ from pandas._libs import lib
7
+
8
+ import pandas as pd
9
+ from pandas import (
10
+ DataFrame,
11
+ Index,
12
+ Series,
13
+ Timestamp,
14
+ date_range,
15
+ )
16
+ import pandas._testing as tm
17
+ from pandas.tests.groupby import get_groupby_method_args
18
+
19
+
20
+ class TestNumericOnly:
21
+ # make sure that we are passing thru kwargs to our agg functions
22
+
23
+ @pytest.fixture
24
+ def df(self):
25
+ # GH3668
26
+ # GH5724
27
+ df = DataFrame(
28
+ {
29
+ "group": [1, 1, 2],
30
+ "int": [1, 2, 3],
31
+ "float": [4.0, 5.0, 6.0],
32
+ "string": list("abc"),
33
+ "category_string": Series(list("abc")).astype("category"),
34
+ "category_int": [7, 8, 9],
35
+ "datetime": date_range("20130101", periods=3),
36
+ "datetimetz": date_range("20130101", periods=3, tz="US/Eastern"),
37
+ "timedelta": pd.timedelta_range("1 s", periods=3, freq="s"),
38
+ },
39
+ columns=[
40
+ "group",
41
+ "int",
42
+ "float",
43
+ "string",
44
+ "category_string",
45
+ "category_int",
46
+ "datetime",
47
+ "datetimetz",
48
+ "timedelta",
49
+ ],
50
+ )
51
+ return df
52
+
53
+ @pytest.mark.parametrize("method", ["mean", "median"])
54
+ def test_averages(self, df, method):
55
+ # mean / median
56
+ expected_columns_numeric = Index(["int", "float", "category_int"])
57
+
58
+ gb = df.groupby("group")
59
+ expected = DataFrame(
60
+ {
61
+ "category_int": [7.5, 9],
62
+ "float": [4.5, 6.0],
63
+ "timedelta": [pd.Timedelta("1.5s"), pd.Timedelta("3s")],
64
+ "int": [1.5, 3],
65
+ "datetime": [
66
+ Timestamp("2013-01-01 12:00:00"),
67
+ Timestamp("2013-01-03 00:00:00"),
68
+ ],
69
+ "datetimetz": [
70
+ Timestamp("2013-01-01 12:00:00", tz="US/Eastern"),
71
+ Timestamp("2013-01-03 00:00:00", tz="US/Eastern"),
72
+ ],
73
+ },
74
+ index=Index([1, 2], name="group"),
75
+ columns=[
76
+ "int",
77
+ "float",
78
+ "category_int",
79
+ ],
80
+ )
81
+
82
+ result = getattr(gb, method)(numeric_only=True)
83
+ tm.assert_frame_equal(result.reindex_like(expected), expected)
84
+
85
+ expected_columns = expected.columns
86
+
87
+ self._check(df, method, expected_columns, expected_columns_numeric)
88
+
89
+ @pytest.mark.parametrize("method", ["min", "max"])
90
+ def test_extrema(self, df, method):
91
+ # TODO: min, max *should* handle
92
+ # categorical (ordered) dtype
93
+
94
+ expected_columns = Index(
95
+ [
96
+ "int",
97
+ "float",
98
+ "string",
99
+ "category_int",
100
+ "datetime",
101
+ "datetimetz",
102
+ "timedelta",
103
+ ]
104
+ )
105
+ expected_columns_numeric = expected_columns
106
+
107
+ self._check(df, method, expected_columns, expected_columns_numeric)
108
+
109
+ @pytest.mark.parametrize("method", ["first", "last"])
110
+ def test_first_last(self, df, method):
111
+ expected_columns = Index(
112
+ [
113
+ "int",
114
+ "float",
115
+ "string",
116
+ "category_string",
117
+ "category_int",
118
+ "datetime",
119
+ "datetimetz",
120
+ "timedelta",
121
+ ]
122
+ )
123
+ expected_columns_numeric = expected_columns
124
+
125
+ self._check(df, method, expected_columns, expected_columns_numeric)
126
+
127
+ @pytest.mark.parametrize("method", ["sum", "cumsum"])
128
+ def test_sum_cumsum(self, df, method):
129
+ expected_columns_numeric = Index(["int", "float", "category_int"])
130
+ expected_columns = Index(
131
+ ["int", "float", "string", "category_int", "timedelta"]
132
+ )
133
+ if method == "cumsum":
134
+ # cumsum loses string
135
+ expected_columns = Index(["int", "float", "category_int", "timedelta"])
136
+
137
+ self._check(df, method, expected_columns, expected_columns_numeric)
138
+
139
+ @pytest.mark.parametrize("method", ["prod", "cumprod"])
140
+ def test_prod_cumprod(self, df, method):
141
+ expected_columns = Index(["int", "float", "category_int"])
142
+ expected_columns_numeric = expected_columns
143
+
144
+ self._check(df, method, expected_columns, expected_columns_numeric)
145
+
146
+ @pytest.mark.parametrize("method", ["cummin", "cummax"])
147
+ def test_cummin_cummax(self, df, method):
148
+ # like min, max, but don't include strings
149
+ expected_columns = Index(
150
+ ["int", "float", "category_int", "datetime", "datetimetz", "timedelta"]
151
+ )
152
+
153
+ # GH#15561: numeric_only=False set by default like min/max
154
+ expected_columns_numeric = expected_columns
155
+
156
+ self._check(df, method, expected_columns, expected_columns_numeric)
157
+
158
+ def _check(self, df, method, expected_columns, expected_columns_numeric):
159
+ gb = df.groupby("group")
160
+
161
+ # object dtypes for transformations are not implemented in Cython and
162
+ # have no Python fallback
163
+ exception = NotImplementedError if method.startswith("cum") else TypeError
164
+
165
+ if method in ("min", "max", "cummin", "cummax", "cumsum", "cumprod"):
166
+ # The methods default to numeric_only=False and raise TypeError
167
+ msg = "|".join(
168
+ [
169
+ "Categorical is not ordered",
170
+ f"Cannot perform {method} with non-ordered Categorical",
171
+ re.escape(f"agg function failed [how->{method},dtype->object]"),
172
+ # cumsum/cummin/cummax/cumprod
173
+ "function is not implemented for this dtype",
174
+ ]
175
+ )
176
+ with pytest.raises(exception, match=msg):
177
+ getattr(gb, method)()
178
+ elif method in ("sum", "mean", "median", "prod"):
179
+ msg = "|".join(
180
+ [
181
+ "category type does not support sum operations",
182
+ re.escape(f"agg function failed [how->{method},dtype->object]"),
183
+ re.escape(f"agg function failed [how->{method},dtype->string]"),
184
+ ]
185
+ )
186
+ with pytest.raises(exception, match=msg):
187
+ getattr(gb, method)()
188
+ else:
189
+ result = getattr(gb, method)()
190
+ tm.assert_index_equal(result.columns, expected_columns_numeric)
191
+
192
+ if method not in ("first", "last"):
193
+ msg = "|".join(
194
+ [
195
+ "Categorical is not ordered",
196
+ "category type does not support",
197
+ "function is not implemented for this dtype",
198
+ f"Cannot perform {method} with non-ordered Categorical",
199
+ re.escape(f"agg function failed [how->{method},dtype->object]"),
200
+ re.escape(f"agg function failed [how->{method},dtype->string]"),
201
+ ]
202
+ )
203
+ with pytest.raises(exception, match=msg):
204
+ getattr(gb, method)(numeric_only=False)
205
+ else:
206
+ result = getattr(gb, method)(numeric_only=False)
207
+ tm.assert_index_equal(result.columns, expected_columns)
208
+
209
+
210
+ @pytest.mark.parametrize("numeric_only", [True, False, None])
211
+ def test_axis1_numeric_only(request, groupby_func, numeric_only, using_infer_string):
212
+ if groupby_func in ("idxmax", "idxmin"):
213
+ pytest.skip("idxmax and idx_min tested in test_idxmin_idxmax_axis1")
214
+ if groupby_func in ("corrwith", "skew"):
215
+ msg = "GH#47723 groupby.corrwith and skew do not correctly implement axis=1"
216
+ request.applymarker(pytest.mark.xfail(reason=msg))
217
+
218
+ df = DataFrame(
219
+ np.random.default_rng(2).standard_normal((10, 4)), columns=["A", "B", "C", "D"]
220
+ )
221
+ df["E"] = "x"
222
+ groups = [1, 2, 3, 1, 2, 3, 1, 2, 3, 4]
223
+ gb = df.groupby(groups)
224
+ method = getattr(gb, groupby_func)
225
+ args = get_groupby_method_args(groupby_func, df)
226
+ kwargs = {"axis": 1}
227
+ if numeric_only is not None:
228
+ # when numeric_only is None we don't pass any argument
229
+ kwargs["numeric_only"] = numeric_only
230
+
231
+ # Functions without numeric_only and axis args
232
+ no_args = ("cumprod", "cumsum", "diff", "fillna", "pct_change", "rank", "shift")
233
+ # Functions with axis args
234
+ has_axis = (
235
+ "cumprod",
236
+ "cumsum",
237
+ "diff",
238
+ "pct_change",
239
+ "rank",
240
+ "shift",
241
+ "cummax",
242
+ "cummin",
243
+ "idxmin",
244
+ "idxmax",
245
+ "fillna",
246
+ )
247
+ warn_msg = f"DataFrameGroupBy.{groupby_func} with axis=1 is deprecated"
248
+ if numeric_only is not None and groupby_func in no_args:
249
+ msg = "got an unexpected keyword argument 'numeric_only'"
250
+ if groupby_func in ["cumprod", "cumsum"]:
251
+ with pytest.raises(TypeError, match=msg):
252
+ with tm.assert_produces_warning(FutureWarning, match=warn_msg):
253
+ method(*args, **kwargs)
254
+ else:
255
+ with pytest.raises(TypeError, match=msg):
256
+ method(*args, **kwargs)
257
+ elif groupby_func not in has_axis:
258
+ msg = "got an unexpected keyword argument 'axis'"
259
+ with pytest.raises(TypeError, match=msg):
260
+ method(*args, **kwargs)
261
+ # fillna and shift are successful even on object dtypes
262
+ elif (numeric_only is None or not numeric_only) and groupby_func not in (
263
+ "fillna",
264
+ "shift",
265
+ ):
266
+ msgs = (
267
+ # cummax, cummin, rank
268
+ "not supported between instances of",
269
+ # cumprod
270
+ "can't multiply sequence by non-int of type 'float'",
271
+ # cumsum, diff, pct_change
272
+ "unsupported operand type",
273
+ "has no kernel",
274
+ )
275
+ if using_infer_string:
276
+ import pyarrow as pa
277
+
278
+ errs = (TypeError, pa.lib.ArrowNotImplementedError)
279
+ else:
280
+ errs = TypeError
281
+ with pytest.raises(errs, match=f"({'|'.join(msgs)})"):
282
+ with tm.assert_produces_warning(FutureWarning, match=warn_msg):
283
+ method(*args, **kwargs)
284
+ else:
285
+ with tm.assert_produces_warning(FutureWarning, match=warn_msg):
286
+ result = method(*args, **kwargs)
287
+
288
+ df_expected = df.drop(columns="E").T if numeric_only else df.T
289
+ expected = getattr(df_expected, groupby_func)(*args).T
290
+ if groupby_func == "shift" and not numeric_only:
291
+ # shift with axis=1 leaves the leftmost column as numeric
292
+ # but transposing for expected gives us object dtype
293
+ expected = expected.astype(float)
294
+
295
+ tm.assert_equal(result, expected)
296
+
297
+
298
+ @pytest.mark.parametrize(
299
+ "kernel, has_arg",
300
+ [
301
+ ("all", False),
302
+ ("any", False),
303
+ ("bfill", False),
304
+ ("corr", True),
305
+ ("corrwith", True),
306
+ ("cov", True),
307
+ ("cummax", True),
308
+ ("cummin", True),
309
+ ("cumprod", True),
310
+ ("cumsum", True),
311
+ ("diff", False),
312
+ ("ffill", False),
313
+ ("fillna", False),
314
+ ("first", True),
315
+ ("idxmax", True),
316
+ ("idxmin", True),
317
+ ("last", True),
318
+ ("max", True),
319
+ ("mean", True),
320
+ ("median", True),
321
+ ("min", True),
322
+ ("nth", False),
323
+ ("nunique", False),
324
+ ("pct_change", False),
325
+ ("prod", True),
326
+ ("quantile", True),
327
+ ("sem", True),
328
+ ("skew", True),
329
+ ("std", True),
330
+ ("sum", True),
331
+ ("var", True),
332
+ ],
333
+ )
334
+ @pytest.mark.parametrize("numeric_only", [True, False, lib.no_default])
335
+ @pytest.mark.parametrize("keys", [["a1"], ["a1", "a2"]])
336
+ def test_numeric_only(kernel, has_arg, numeric_only, keys):
337
+ # GH#46072
338
+ # drops_nuisance: Whether the op drops nuisance columns even when numeric_only=False
339
+ # has_arg: Whether the op has a numeric_only arg
340
+ df = DataFrame({"a1": [1, 1], "a2": [2, 2], "a3": [5, 6], "b": 2 * [object]})
341
+
342
+ args = get_groupby_method_args(kernel, df)
343
+ kwargs = {} if numeric_only is lib.no_default else {"numeric_only": numeric_only}
344
+
345
+ gb = df.groupby(keys)
346
+ method = getattr(gb, kernel)
347
+ if has_arg and numeric_only is True:
348
+ # Cases where b does not appear in the result
349
+ result = method(*args, **kwargs)
350
+ assert "b" not in result.columns
351
+ elif (
352
+ # kernels that work on any dtype and have numeric_only arg
353
+ kernel in ("first", "last")
354
+ or (
355
+ # kernels that work on any dtype and don't have numeric_only arg
356
+ kernel in ("any", "all", "bfill", "ffill", "fillna", "nth", "nunique")
357
+ and numeric_only is lib.no_default
358
+ )
359
+ ):
360
+ warn = FutureWarning if kernel == "fillna" else None
361
+ msg = "DataFrameGroupBy.fillna is deprecated"
362
+ with tm.assert_produces_warning(warn, match=msg):
363
+ result = method(*args, **kwargs)
364
+ assert "b" in result.columns
365
+ elif has_arg:
366
+ assert numeric_only is not True
367
+ # kernels that are successful on any dtype were above; this will fail
368
+
369
+ # object dtypes for transformations are not implemented in Cython and
370
+ # have no Python fallback
371
+ exception = NotImplementedError if kernel.startswith("cum") else TypeError
372
+
373
+ msg = "|".join(
374
+ [
375
+ "not allowed for this dtype",
376
+ "cannot be performed against 'object' dtypes",
377
+ # On PY39 message is "a number"; on PY310 and after is "a real number"
378
+ "must be a string or a.* number",
379
+ "unsupported operand type",
380
+ "function is not implemented for this dtype",
381
+ re.escape(f"agg function failed [how->{kernel},dtype->object]"),
382
+ ]
383
+ )
384
+ if kernel == "idxmin":
385
+ msg = "'<' not supported between instances of 'type' and 'type'"
386
+ elif kernel == "idxmax":
387
+ msg = "'>' not supported between instances of 'type' and 'type'"
388
+ with pytest.raises(exception, match=msg):
389
+ method(*args, **kwargs)
390
+ elif not has_arg and numeric_only is not lib.no_default:
391
+ with pytest.raises(
392
+ TypeError, match="got an unexpected keyword argument 'numeric_only'"
393
+ ):
394
+ method(*args, **kwargs)
395
+ else:
396
+ assert kernel in ("diff", "pct_change")
397
+ assert numeric_only is lib.no_default
398
+ # Doesn't have numeric_only argument and fails on nuisance columns
399
+ with pytest.raises(TypeError, match=r"unsupported operand type"):
400
+ method(*args, **kwargs)
401
+
402
+
403
+ @pytest.mark.filterwarnings("ignore:Downcasting object dtype arrays:FutureWarning")
404
+ @pytest.mark.parametrize("dtype", [bool, int, float, object])
405
+ def test_deprecate_numeric_only_series(dtype, groupby_func, request):
406
+ # GH#46560
407
+ grouper = [0, 0, 1]
408
+
409
+ ser = Series([1, 0, 0], dtype=dtype)
410
+ gb = ser.groupby(grouper)
411
+
412
+ if groupby_func == "corrwith":
413
+ # corrwith is not implemented on SeriesGroupBy
414
+ assert not hasattr(gb, groupby_func)
415
+ return
416
+
417
+ method = getattr(gb, groupby_func)
418
+
419
+ expected_ser = Series([1, 0, 0])
420
+ expected_gb = expected_ser.groupby(grouper)
421
+ expected_method = getattr(expected_gb, groupby_func)
422
+
423
+ args = get_groupby_method_args(groupby_func, ser)
424
+
425
+ fails_on_numeric_object = (
426
+ "corr",
427
+ "cov",
428
+ "cummax",
429
+ "cummin",
430
+ "cumprod",
431
+ "cumsum",
432
+ "quantile",
433
+ )
434
+ # ops that give an object result on object input
435
+ obj_result = (
436
+ "first",
437
+ "last",
438
+ "nth",
439
+ "bfill",
440
+ "ffill",
441
+ "shift",
442
+ "sum",
443
+ "diff",
444
+ "pct_change",
445
+ "var",
446
+ "mean",
447
+ "median",
448
+ "min",
449
+ "max",
450
+ "prod",
451
+ "skew",
452
+ )
453
+
454
+ # Test default behavior; kernels that fail may be enabled in the future but kernels
455
+ # that succeed should not be allowed to fail (without deprecation, at least)
456
+ if groupby_func in fails_on_numeric_object and dtype is object:
457
+ if groupby_func == "quantile":
458
+ msg = "cannot be performed against 'object' dtypes"
459
+ else:
460
+ msg = "is not supported for object dtype"
461
+ warn = FutureWarning if groupby_func == "fillna" else None
462
+ warn_msg = "DataFrameGroupBy.fillna is deprecated"
463
+ with tm.assert_produces_warning(warn, match=warn_msg):
464
+ with pytest.raises(TypeError, match=msg):
465
+ method(*args)
466
+ elif dtype is object:
467
+ warn = FutureWarning if groupby_func == "fillna" else None
468
+ warn_msg = "SeriesGroupBy.fillna is deprecated"
469
+ with tm.assert_produces_warning(warn, match=warn_msg):
470
+ result = method(*args)
471
+ with tm.assert_produces_warning(warn, match=warn_msg):
472
+ expected = expected_method(*args)
473
+ if groupby_func in obj_result:
474
+ expected = expected.astype(object)
475
+ tm.assert_series_equal(result, expected)
476
+
477
+ has_numeric_only = (
478
+ "first",
479
+ "last",
480
+ "max",
481
+ "mean",
482
+ "median",
483
+ "min",
484
+ "prod",
485
+ "quantile",
486
+ "sem",
487
+ "skew",
488
+ "std",
489
+ "sum",
490
+ "var",
491
+ "cummax",
492
+ "cummin",
493
+ "cumprod",
494
+ "cumsum",
495
+ )
496
+ if groupby_func not in has_numeric_only:
497
+ msg = "got an unexpected keyword argument 'numeric_only'"
498
+ with pytest.raises(TypeError, match=msg):
499
+ method(*args, numeric_only=True)
500
+ elif dtype is object:
501
+ msg = "|".join(
502
+ [
503
+ "SeriesGroupBy.sem called with numeric_only=True and dtype object",
504
+ "Series.skew does not allow numeric_only=True with non-numeric",
505
+ "cum(sum|prod|min|max) is not supported for object dtype",
506
+ r"Cannot use numeric_only=True with SeriesGroupBy\..* and non-numeric",
507
+ ]
508
+ )
509
+ with pytest.raises(TypeError, match=msg):
510
+ method(*args, numeric_only=True)
511
+ elif dtype == bool and groupby_func == "quantile":
512
+ msg = "Allowing bool dtype in SeriesGroupBy.quantile"
513
+ with tm.assert_produces_warning(FutureWarning, match=msg):
514
+ # GH#51424
515
+ result = method(*args, numeric_only=True)
516
+ expected = method(*args, numeric_only=False)
517
+ tm.assert_series_equal(result, expected)
518
+ else:
519
+ result = method(*args, numeric_only=True)
520
+ expected = method(*args, numeric_only=False)
521
+ tm.assert_series_equal(result, expected)
venv/lib/python3.10/site-packages/pandas/tests/groupby/test_pipe.py ADDED
@@ -0,0 +1,80 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import numpy as np
2
+
3
+ import pandas as pd
4
+ from pandas import (
5
+ DataFrame,
6
+ Index,
7
+ )
8
+ import pandas._testing as tm
9
+
10
+
11
+ def test_pipe():
12
+ # Test the pipe method of DataFrameGroupBy.
13
+ # Issue #17871
14
+
15
+ random_state = np.random.default_rng(2)
16
+
17
+ df = DataFrame(
18
+ {
19
+ "A": ["foo", "bar", "foo", "bar", "foo", "bar", "foo", "foo"],
20
+ "B": random_state.standard_normal(8),
21
+ "C": random_state.standard_normal(8),
22
+ }
23
+ )
24
+
25
+ def f(dfgb):
26
+ return dfgb.B.max() - dfgb.C.min().min()
27
+
28
+ def square(srs):
29
+ return srs**2
30
+
31
+ # Note that the transformations are
32
+ # GroupBy -> Series
33
+ # Series -> Series
34
+ # This then chains the GroupBy.pipe and the
35
+ # NDFrame.pipe methods
36
+ result = df.groupby("A").pipe(f).pipe(square)
37
+
38
+ index = Index(["bar", "foo"], dtype="object", name="A")
39
+ expected = pd.Series([3.749306591013693, 6.717707873081384], name="B", index=index)
40
+
41
+ tm.assert_series_equal(expected, result)
42
+
43
+
44
+ def test_pipe_args():
45
+ # Test passing args to the pipe method of DataFrameGroupBy.
46
+ # Issue #17871
47
+
48
+ df = DataFrame(
49
+ {
50
+ "group": ["A", "A", "B", "B", "C"],
51
+ "x": [1.0, 2.0, 3.0, 2.0, 5.0],
52
+ "y": [10.0, 100.0, 1000.0, -100.0, -1000.0],
53
+ }
54
+ )
55
+
56
+ def f(dfgb, arg1):
57
+ filtered = dfgb.filter(lambda grp: grp.y.mean() > arg1, dropna=False)
58
+ return filtered.groupby("group")
59
+
60
+ def g(dfgb, arg2):
61
+ return dfgb.sum() / dfgb.sum().sum() + arg2
62
+
63
+ def h(df, arg3):
64
+ return df.x + df.y - arg3
65
+
66
+ result = df.groupby("group").pipe(f, 0).pipe(g, 10).pipe(h, 100)
67
+
68
+ # Assert the results here
69
+ index = Index(["A", "B"], name="group")
70
+ expected = pd.Series([-79.5160891089, -78.4839108911], index=index)
71
+
72
+ tm.assert_series_equal(result, expected)
73
+
74
+ # test SeriesGroupby.pipe
75
+ ser = pd.Series([1, 1, 2, 2, 3, 3])
76
+ result = ser.groupby(ser).pipe(lambda grp: grp.sum() * grp.count())
77
+
78
+ expected = pd.Series([4, 8, 12], index=Index([1, 2, 3], dtype=np.int64))
79
+
80
+ tm.assert_series_equal(result, expected)
venv/lib/python3.10/site-packages/pandas/tests/groupby/test_raises.py ADDED
@@ -0,0 +1,716 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Only tests that raise an error and have no better location should go here.
2
+ # Tests for specific groupby methods should go in their respective
3
+ # test file.
4
+
5
+ import datetime
6
+ import re
7
+
8
+ import numpy as np
9
+ import pytest
10
+
11
+ from pandas import (
12
+ Categorical,
13
+ DataFrame,
14
+ Grouper,
15
+ Series,
16
+ )
17
+ import pandas._testing as tm
18
+ from pandas.tests.groupby import get_groupby_method_args
19
+
20
+
21
+ @pytest.fixture(
22
+ params=[
23
+ "a",
24
+ ["a"],
25
+ ["a", "b"],
26
+ Grouper(key="a"),
27
+ lambda x: x % 2,
28
+ [0, 0, 0, 1, 2, 2, 2, 3, 3],
29
+ np.array([0, 0, 0, 1, 2, 2, 2, 3, 3]),
30
+ dict(zip(range(9), [0, 0, 0, 1, 2, 2, 2, 3, 3])),
31
+ Series([1, 1, 1, 1, 1, 2, 2, 2, 2]),
32
+ [Series([1, 1, 1, 1, 1, 2, 2, 2, 2]), Series([3, 3, 4, 4, 4, 4, 4, 3, 3])],
33
+ ]
34
+ )
35
+ def by(request):
36
+ return request.param
37
+
38
+
39
+ @pytest.fixture(params=[True, False])
40
+ def groupby_series(request):
41
+ return request.param
42
+
43
+
44
+ @pytest.fixture
45
+ def df_with_string_col():
46
+ df = DataFrame(
47
+ {
48
+ "a": [1, 1, 1, 1, 1, 2, 2, 2, 2],
49
+ "b": [3, 3, 4, 4, 4, 4, 4, 3, 3],
50
+ "c": range(9),
51
+ "d": list("xyzwtyuio"),
52
+ }
53
+ )
54
+ return df
55
+
56
+
57
+ @pytest.fixture
58
+ def df_with_datetime_col():
59
+ df = DataFrame(
60
+ {
61
+ "a": [1, 1, 1, 1, 1, 2, 2, 2, 2],
62
+ "b": [3, 3, 4, 4, 4, 4, 4, 3, 3],
63
+ "c": range(9),
64
+ "d": datetime.datetime(2005, 1, 1, 10, 30, 23, 540000),
65
+ }
66
+ )
67
+ return df
68
+
69
+
70
+ @pytest.fixture
71
+ def df_with_timedelta_col():
72
+ df = DataFrame(
73
+ {
74
+ "a": [1, 1, 1, 1, 1, 2, 2, 2, 2],
75
+ "b": [3, 3, 4, 4, 4, 4, 4, 3, 3],
76
+ "c": range(9),
77
+ "d": datetime.timedelta(days=1),
78
+ }
79
+ )
80
+ return df
81
+
82
+
83
+ @pytest.fixture
84
+ def df_with_cat_col():
85
+ df = DataFrame(
86
+ {
87
+ "a": [1, 1, 1, 1, 1, 2, 2, 2, 2],
88
+ "b": [3, 3, 4, 4, 4, 4, 4, 3, 3],
89
+ "c": range(9),
90
+ "d": Categorical(
91
+ ["a", "a", "a", "a", "b", "b", "b", "b", "c"],
92
+ categories=["a", "b", "c", "d"],
93
+ ordered=True,
94
+ ),
95
+ }
96
+ )
97
+ return df
98
+
99
+
100
+ def _call_and_check(klass, msg, how, gb, groupby_func, args, warn_msg=""):
101
+ warn_klass = None if warn_msg == "" else FutureWarning
102
+ with tm.assert_produces_warning(warn_klass, match=warn_msg):
103
+ if klass is None:
104
+ if how == "method":
105
+ getattr(gb, groupby_func)(*args)
106
+ elif how == "agg":
107
+ gb.agg(groupby_func, *args)
108
+ else:
109
+ gb.transform(groupby_func, *args)
110
+ else:
111
+ with pytest.raises(klass, match=msg):
112
+ if how == "method":
113
+ getattr(gb, groupby_func)(*args)
114
+ elif how == "agg":
115
+ gb.agg(groupby_func, *args)
116
+ else:
117
+ gb.transform(groupby_func, *args)
118
+
119
+
120
+ @pytest.mark.parametrize("how", ["method", "agg", "transform"])
121
+ def test_groupby_raises_string(
122
+ how, by, groupby_series, groupby_func, df_with_string_col
123
+ ):
124
+ df = df_with_string_col
125
+ args = get_groupby_method_args(groupby_func, df)
126
+ gb = df.groupby(by=by)
127
+
128
+ if groupby_series:
129
+ gb = gb["d"]
130
+
131
+ if groupby_func == "corrwith":
132
+ assert not hasattr(gb, "corrwith")
133
+ return
134
+
135
+ klass, msg = {
136
+ "all": (None, ""),
137
+ "any": (None, ""),
138
+ "bfill": (None, ""),
139
+ "corrwith": (TypeError, "Could not convert"),
140
+ "count": (None, ""),
141
+ "cumcount": (None, ""),
142
+ "cummax": (
143
+ (NotImplementedError, TypeError),
144
+ "(function|cummax) is not (implemented|supported) for (this|object) dtype",
145
+ ),
146
+ "cummin": (
147
+ (NotImplementedError, TypeError),
148
+ "(function|cummin) is not (implemented|supported) for (this|object) dtype",
149
+ ),
150
+ "cumprod": (
151
+ (NotImplementedError, TypeError),
152
+ "(function|cumprod) is not (implemented|supported) for (this|object) dtype",
153
+ ),
154
+ "cumsum": (
155
+ (NotImplementedError, TypeError),
156
+ "(function|cumsum) is not (implemented|supported) for (this|object) dtype",
157
+ ),
158
+ "diff": (TypeError, "unsupported operand type"),
159
+ "ffill": (None, ""),
160
+ "fillna": (None, ""),
161
+ "first": (None, ""),
162
+ "idxmax": (None, ""),
163
+ "idxmin": (None, ""),
164
+ "last": (None, ""),
165
+ "max": (None, ""),
166
+ "mean": (
167
+ TypeError,
168
+ re.escape("agg function failed [how->mean,dtype->object]"),
169
+ ),
170
+ "median": (
171
+ TypeError,
172
+ re.escape("agg function failed [how->median,dtype->object]"),
173
+ ),
174
+ "min": (None, ""),
175
+ "ngroup": (None, ""),
176
+ "nunique": (None, ""),
177
+ "pct_change": (TypeError, "unsupported operand type"),
178
+ "prod": (
179
+ TypeError,
180
+ re.escape("agg function failed [how->prod,dtype->object]"),
181
+ ),
182
+ "quantile": (TypeError, "cannot be performed against 'object' dtypes!"),
183
+ "rank": (None, ""),
184
+ "sem": (ValueError, "could not convert string to float"),
185
+ "shift": (None, ""),
186
+ "size": (None, ""),
187
+ "skew": (ValueError, "could not convert string to float"),
188
+ "std": (ValueError, "could not convert string to float"),
189
+ "sum": (None, ""),
190
+ "var": (
191
+ TypeError,
192
+ re.escape("agg function failed [how->var,dtype->"),
193
+ ),
194
+ }[groupby_func]
195
+
196
+ if groupby_func == "fillna":
197
+ kind = "Series" if groupby_series else "DataFrame"
198
+ warn_msg = f"{kind}GroupBy.fillna is deprecated"
199
+ else:
200
+ warn_msg = ""
201
+ _call_and_check(klass, msg, how, gb, groupby_func, args, warn_msg)
202
+
203
+
204
+ @pytest.mark.parametrize("how", ["agg", "transform"])
205
+ def test_groupby_raises_string_udf(how, by, groupby_series, df_with_string_col):
206
+ df = df_with_string_col
207
+ gb = df.groupby(by=by)
208
+
209
+ if groupby_series:
210
+ gb = gb["d"]
211
+
212
+ def func(x):
213
+ raise TypeError("Test error message")
214
+
215
+ with pytest.raises(TypeError, match="Test error message"):
216
+ getattr(gb, how)(func)
217
+
218
+
219
+ @pytest.mark.parametrize("how", ["agg", "transform"])
220
+ @pytest.mark.parametrize("groupby_func_np", [np.sum, np.mean])
221
+ def test_groupby_raises_string_np(
222
+ how, by, groupby_series, groupby_func_np, df_with_string_col
223
+ ):
224
+ # GH#50749
225
+ df = df_with_string_col
226
+ gb = df.groupby(by=by)
227
+
228
+ if groupby_series:
229
+ gb = gb["d"]
230
+
231
+ klass, msg = {
232
+ np.sum: (None, ""),
233
+ np.mean: (
234
+ TypeError,
235
+ re.escape("agg function failed [how->mean,dtype->object]"),
236
+ ),
237
+ }[groupby_func_np]
238
+
239
+ if groupby_series:
240
+ warn_msg = "using SeriesGroupBy.[sum|mean]"
241
+ else:
242
+ warn_msg = "using DataFrameGroupBy.[sum|mean]"
243
+ _call_and_check(klass, msg, how, gb, groupby_func_np, (), warn_msg=warn_msg)
244
+
245
+
246
+ @pytest.mark.parametrize("how", ["method", "agg", "transform"])
247
+ def test_groupby_raises_datetime(
248
+ how, by, groupby_series, groupby_func, df_with_datetime_col
249
+ ):
250
+ df = df_with_datetime_col
251
+ args = get_groupby_method_args(groupby_func, df)
252
+ gb = df.groupby(by=by)
253
+
254
+ if groupby_series:
255
+ gb = gb["d"]
256
+
257
+ if groupby_func == "corrwith":
258
+ assert not hasattr(gb, "corrwith")
259
+ return
260
+
261
+ klass, msg = {
262
+ "all": (None, ""),
263
+ "any": (None, ""),
264
+ "bfill": (None, ""),
265
+ "corrwith": (TypeError, "cannot perform __mul__ with this index type"),
266
+ "count": (None, ""),
267
+ "cumcount": (None, ""),
268
+ "cummax": (None, ""),
269
+ "cummin": (None, ""),
270
+ "cumprod": (TypeError, "datetime64 type does not support cumprod operations"),
271
+ "cumsum": (TypeError, "datetime64 type does not support cumsum operations"),
272
+ "diff": (None, ""),
273
+ "ffill": (None, ""),
274
+ "fillna": (None, ""),
275
+ "first": (None, ""),
276
+ "idxmax": (None, ""),
277
+ "idxmin": (None, ""),
278
+ "last": (None, ""),
279
+ "max": (None, ""),
280
+ "mean": (None, ""),
281
+ "median": (None, ""),
282
+ "min": (None, ""),
283
+ "ngroup": (None, ""),
284
+ "nunique": (None, ""),
285
+ "pct_change": (TypeError, "cannot perform __truediv__ with this index type"),
286
+ "prod": (TypeError, "datetime64 type does not support prod"),
287
+ "quantile": (None, ""),
288
+ "rank": (None, ""),
289
+ "sem": (None, ""),
290
+ "shift": (None, ""),
291
+ "size": (None, ""),
292
+ "skew": (
293
+ TypeError,
294
+ "|".join(
295
+ [
296
+ r"dtype datetime64\[ns\] does not support reduction",
297
+ "datetime64 type does not support skew operations",
298
+ ]
299
+ ),
300
+ ),
301
+ "std": (None, ""),
302
+ "sum": (TypeError, "datetime64 type does not support sum operations"),
303
+ "var": (TypeError, "datetime64 type does not support var operations"),
304
+ }[groupby_func]
305
+
306
+ if groupby_func in ["any", "all"]:
307
+ warn_msg = f"'{groupby_func}' with datetime64 dtypes is deprecated"
308
+ elif groupby_func == "fillna":
309
+ kind = "Series" if groupby_series else "DataFrame"
310
+ warn_msg = f"{kind}GroupBy.fillna is deprecated"
311
+ else:
312
+ warn_msg = ""
313
+ _call_and_check(klass, msg, how, gb, groupby_func, args, warn_msg=warn_msg)
314
+
315
+
316
+ @pytest.mark.parametrize("how", ["agg", "transform"])
317
+ def test_groupby_raises_datetime_udf(how, by, groupby_series, df_with_datetime_col):
318
+ df = df_with_datetime_col
319
+ gb = df.groupby(by=by)
320
+
321
+ if groupby_series:
322
+ gb = gb["d"]
323
+
324
+ def func(x):
325
+ raise TypeError("Test error message")
326
+
327
+ with pytest.raises(TypeError, match="Test error message"):
328
+ getattr(gb, how)(func)
329
+
330
+
331
+ @pytest.mark.parametrize("how", ["agg", "transform"])
332
+ @pytest.mark.parametrize("groupby_func_np", [np.sum, np.mean])
333
+ def test_groupby_raises_datetime_np(
334
+ how, by, groupby_series, groupby_func_np, df_with_datetime_col
335
+ ):
336
+ # GH#50749
337
+ df = df_with_datetime_col
338
+ gb = df.groupby(by=by)
339
+
340
+ if groupby_series:
341
+ gb = gb["d"]
342
+
343
+ klass, msg = {
344
+ np.sum: (TypeError, "datetime64 type does not support sum operations"),
345
+ np.mean: (None, ""),
346
+ }[groupby_func_np]
347
+
348
+ if groupby_series:
349
+ warn_msg = "using SeriesGroupBy.[sum|mean]"
350
+ else:
351
+ warn_msg = "using DataFrameGroupBy.[sum|mean]"
352
+ _call_and_check(klass, msg, how, gb, groupby_func_np, (), warn_msg=warn_msg)
353
+
354
+
355
+ @pytest.mark.parametrize("func", ["prod", "cumprod", "skew", "var"])
356
+ def test_groupby_raises_timedelta(func, df_with_timedelta_col):
357
+ df = df_with_timedelta_col
358
+ gb = df.groupby(by="a")
359
+
360
+ _call_and_check(
361
+ TypeError,
362
+ "timedelta64 type does not support .* operations",
363
+ "method",
364
+ gb,
365
+ func,
366
+ [],
367
+ )
368
+
369
+
370
+ @pytest.mark.parametrize("how", ["method", "agg", "transform"])
371
+ def test_groupby_raises_category(
372
+ how, by, groupby_series, groupby_func, using_copy_on_write, df_with_cat_col
373
+ ):
374
+ # GH#50749
375
+ df = df_with_cat_col
376
+ args = get_groupby_method_args(groupby_func, df)
377
+ gb = df.groupby(by=by)
378
+
379
+ if groupby_series:
380
+ gb = gb["d"]
381
+
382
+ if groupby_func == "corrwith":
383
+ assert not hasattr(gb, "corrwith")
384
+ return
385
+
386
+ klass, msg = {
387
+ "all": (None, ""),
388
+ "any": (None, ""),
389
+ "bfill": (None, ""),
390
+ "corrwith": (
391
+ TypeError,
392
+ r"unsupported operand type\(s\) for \*: 'Categorical' and 'int'",
393
+ ),
394
+ "count": (None, ""),
395
+ "cumcount": (None, ""),
396
+ "cummax": (
397
+ (NotImplementedError, TypeError),
398
+ "(category type does not support cummax operations|"
399
+ "category dtype not supported|"
400
+ "cummax is not supported for category dtype)",
401
+ ),
402
+ "cummin": (
403
+ (NotImplementedError, TypeError),
404
+ "(category type does not support cummin operations|"
405
+ "category dtype not supported|"
406
+ "cummin is not supported for category dtype)",
407
+ ),
408
+ "cumprod": (
409
+ (NotImplementedError, TypeError),
410
+ "(category type does not support cumprod operations|"
411
+ "category dtype not supported|"
412
+ "cumprod is not supported for category dtype)",
413
+ ),
414
+ "cumsum": (
415
+ (NotImplementedError, TypeError),
416
+ "(category type does not support cumsum operations|"
417
+ "category dtype not supported|"
418
+ "cumsum is not supported for category dtype)",
419
+ ),
420
+ "diff": (
421
+ TypeError,
422
+ r"unsupported operand type\(s\) for -: 'Categorical' and 'Categorical'",
423
+ ),
424
+ "ffill": (None, ""),
425
+ "fillna": (
426
+ TypeError,
427
+ r"Cannot setitem on a Categorical with a new category \(0\), "
428
+ "set the categories first",
429
+ )
430
+ if not using_copy_on_write
431
+ else (None, ""), # no-op with CoW
432
+ "first": (None, ""),
433
+ "idxmax": (None, ""),
434
+ "idxmin": (None, ""),
435
+ "last": (None, ""),
436
+ "max": (None, ""),
437
+ "mean": (
438
+ TypeError,
439
+ "|".join(
440
+ [
441
+ "'Categorical' .* does not support reduction 'mean'",
442
+ "category dtype does not support aggregation 'mean'",
443
+ ]
444
+ ),
445
+ ),
446
+ "median": (
447
+ TypeError,
448
+ "|".join(
449
+ [
450
+ "'Categorical' .* does not support reduction 'median'",
451
+ "category dtype does not support aggregation 'median'",
452
+ ]
453
+ ),
454
+ ),
455
+ "min": (None, ""),
456
+ "ngroup": (None, ""),
457
+ "nunique": (None, ""),
458
+ "pct_change": (
459
+ TypeError,
460
+ r"unsupported operand type\(s\) for /: 'Categorical' and 'Categorical'",
461
+ ),
462
+ "prod": (TypeError, "category type does not support prod operations"),
463
+ "quantile": (TypeError, "No matching signature found"),
464
+ "rank": (None, ""),
465
+ "sem": (
466
+ TypeError,
467
+ "|".join(
468
+ [
469
+ "'Categorical' .* does not support reduction 'sem'",
470
+ "category dtype does not support aggregation 'sem'",
471
+ ]
472
+ ),
473
+ ),
474
+ "shift": (None, ""),
475
+ "size": (None, ""),
476
+ "skew": (
477
+ TypeError,
478
+ "|".join(
479
+ [
480
+ "dtype category does not support reduction 'skew'",
481
+ "category type does not support skew operations",
482
+ ]
483
+ ),
484
+ ),
485
+ "std": (
486
+ TypeError,
487
+ "|".join(
488
+ [
489
+ "'Categorical' .* does not support reduction 'std'",
490
+ "category dtype does not support aggregation 'std'",
491
+ ]
492
+ ),
493
+ ),
494
+ "sum": (TypeError, "category type does not support sum operations"),
495
+ "var": (
496
+ TypeError,
497
+ "|".join(
498
+ [
499
+ "'Categorical' .* does not support reduction 'var'",
500
+ "category dtype does not support aggregation 'var'",
501
+ ]
502
+ ),
503
+ ),
504
+ }[groupby_func]
505
+
506
+ if groupby_func == "fillna":
507
+ kind = "Series" if groupby_series else "DataFrame"
508
+ warn_msg = f"{kind}GroupBy.fillna is deprecated"
509
+ else:
510
+ warn_msg = ""
511
+ _call_and_check(klass, msg, how, gb, groupby_func, args, warn_msg)
512
+
513
+
514
+ @pytest.mark.parametrize("how", ["agg", "transform"])
515
+ def test_groupby_raises_category_udf(how, by, groupby_series, df_with_cat_col):
516
+ # GH#50749
517
+ df = df_with_cat_col
518
+ gb = df.groupby(by=by)
519
+
520
+ if groupby_series:
521
+ gb = gb["d"]
522
+
523
+ def func(x):
524
+ raise TypeError("Test error message")
525
+
526
+ with pytest.raises(TypeError, match="Test error message"):
527
+ getattr(gb, how)(func)
528
+
529
+
530
+ @pytest.mark.parametrize("how", ["agg", "transform"])
531
+ @pytest.mark.parametrize("groupby_func_np", [np.sum, np.mean])
532
+ def test_groupby_raises_category_np(
533
+ how, by, groupby_series, groupby_func_np, df_with_cat_col
534
+ ):
535
+ # GH#50749
536
+ df = df_with_cat_col
537
+ gb = df.groupby(by=by)
538
+
539
+ if groupby_series:
540
+ gb = gb["d"]
541
+
542
+ klass, msg = {
543
+ np.sum: (TypeError, "category type does not support sum operations"),
544
+ np.mean: (
545
+ TypeError,
546
+ "category dtype does not support aggregation 'mean'",
547
+ ),
548
+ }[groupby_func_np]
549
+
550
+ if groupby_series:
551
+ warn_msg = "using SeriesGroupBy.[sum|mean]"
552
+ else:
553
+ warn_msg = "using DataFrameGroupBy.[sum|mean]"
554
+ _call_and_check(klass, msg, how, gb, groupby_func_np, (), warn_msg=warn_msg)
555
+
556
+
557
+ @pytest.mark.parametrize("how", ["method", "agg", "transform"])
558
+ def test_groupby_raises_category_on_category(
559
+ how,
560
+ by,
561
+ groupby_series,
562
+ groupby_func,
563
+ observed,
564
+ using_copy_on_write,
565
+ df_with_cat_col,
566
+ ):
567
+ # GH#50749
568
+ df = df_with_cat_col
569
+ df["a"] = Categorical(
570
+ ["a", "a", "a", "a", "b", "b", "b", "b", "c"],
571
+ categories=["a", "b", "c", "d"],
572
+ ordered=True,
573
+ )
574
+ args = get_groupby_method_args(groupby_func, df)
575
+ gb = df.groupby(by=by, observed=observed)
576
+
577
+ if groupby_series:
578
+ gb = gb["d"]
579
+
580
+ if groupby_func == "corrwith":
581
+ assert not hasattr(gb, "corrwith")
582
+ return
583
+
584
+ empty_groups = not observed and any(group.empty for group in gb.groups.values())
585
+ if (
586
+ not observed
587
+ and how != "transform"
588
+ and isinstance(by, list)
589
+ and isinstance(by[0], str)
590
+ and by == ["a", "b"]
591
+ ):
592
+ assert not empty_groups
593
+ # TODO: empty_groups should be true due to unobserved categorical combinations
594
+ empty_groups = True
595
+ if how == "transform":
596
+ # empty groups will be ignored
597
+ empty_groups = False
598
+
599
+ klass, msg = {
600
+ "all": (None, ""),
601
+ "any": (None, ""),
602
+ "bfill": (None, ""),
603
+ "corrwith": (
604
+ TypeError,
605
+ r"unsupported operand type\(s\) for \*: 'Categorical' and 'int'",
606
+ ),
607
+ "count": (None, ""),
608
+ "cumcount": (None, ""),
609
+ "cummax": (
610
+ (NotImplementedError, TypeError),
611
+ "(cummax is not supported for category dtype|"
612
+ "category dtype not supported|"
613
+ "category type does not support cummax operations)",
614
+ ),
615
+ "cummin": (
616
+ (NotImplementedError, TypeError),
617
+ "(cummin is not supported for category dtype|"
618
+ "category dtype not supported|"
619
+ "category type does not support cummin operations)",
620
+ ),
621
+ "cumprod": (
622
+ (NotImplementedError, TypeError),
623
+ "(cumprod is not supported for category dtype|"
624
+ "category dtype not supported|"
625
+ "category type does not support cumprod operations)",
626
+ ),
627
+ "cumsum": (
628
+ (NotImplementedError, TypeError),
629
+ "(cumsum is not supported for category dtype|"
630
+ "category dtype not supported|"
631
+ "category type does not support cumsum operations)",
632
+ ),
633
+ "diff": (TypeError, "unsupported operand type"),
634
+ "ffill": (None, ""),
635
+ "fillna": (
636
+ TypeError,
637
+ r"Cannot setitem on a Categorical with a new category \(0\), "
638
+ "set the categories first",
639
+ )
640
+ if not using_copy_on_write
641
+ else (None, ""), # no-op with CoW
642
+ "first": (None, ""),
643
+ "idxmax": (ValueError, "empty group due to unobserved categories")
644
+ if empty_groups
645
+ else (None, ""),
646
+ "idxmin": (ValueError, "empty group due to unobserved categories")
647
+ if empty_groups
648
+ else (None, ""),
649
+ "last": (None, ""),
650
+ "max": (None, ""),
651
+ "mean": (TypeError, "category dtype does not support aggregation 'mean'"),
652
+ "median": (TypeError, "category dtype does not support aggregation 'median'"),
653
+ "min": (None, ""),
654
+ "ngroup": (None, ""),
655
+ "nunique": (None, ""),
656
+ "pct_change": (TypeError, "unsupported operand type"),
657
+ "prod": (TypeError, "category type does not support prod operations"),
658
+ "quantile": (TypeError, ""),
659
+ "rank": (None, ""),
660
+ "sem": (
661
+ TypeError,
662
+ "|".join(
663
+ [
664
+ "'Categorical' .* does not support reduction 'sem'",
665
+ "category dtype does not support aggregation 'sem'",
666
+ ]
667
+ ),
668
+ ),
669
+ "shift": (None, ""),
670
+ "size": (None, ""),
671
+ "skew": (
672
+ TypeError,
673
+ "|".join(
674
+ [
675
+ "category type does not support skew operations",
676
+ "dtype category does not support reduction 'skew'",
677
+ ]
678
+ ),
679
+ ),
680
+ "std": (
681
+ TypeError,
682
+ "|".join(
683
+ [
684
+ "'Categorical' .* does not support reduction 'std'",
685
+ "category dtype does not support aggregation 'std'",
686
+ ]
687
+ ),
688
+ ),
689
+ "sum": (TypeError, "category type does not support sum operations"),
690
+ "var": (
691
+ TypeError,
692
+ "|".join(
693
+ [
694
+ "'Categorical' .* does not support reduction 'var'",
695
+ "category dtype does not support aggregation 'var'",
696
+ ]
697
+ ),
698
+ ),
699
+ }[groupby_func]
700
+
701
+ if groupby_func == "fillna":
702
+ kind = "Series" if groupby_series else "DataFrame"
703
+ warn_msg = f"{kind}GroupBy.fillna is deprecated"
704
+ else:
705
+ warn_msg = ""
706
+ _call_and_check(klass, msg, how, gb, groupby_func, args, warn_msg)
707
+
708
+
709
+ def test_subsetting_columns_axis_1_raises():
710
+ # GH 35443
711
+ df = DataFrame({"a": [1], "b": [2], "c": [3]})
712
+ msg = "DataFrame.groupby with axis=1 is deprecated"
713
+ with tm.assert_produces_warning(FutureWarning, match=msg):
714
+ gb = df.groupby("a", axis=1)
715
+ with pytest.raises(ValueError, match="Cannot subset columns when using axis=1"):
716
+ gb["b"]
venv/lib/python3.10/site-packages/pandas/tests/groupby/test_reductions.py ADDED
@@ -0,0 +1,1176 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import builtins
2
+ import datetime as dt
3
+ from string import ascii_lowercase
4
+
5
+ import numpy as np
6
+ import pytest
7
+
8
+ from pandas._libs.tslibs import iNaT
9
+
10
+ from pandas.core.dtypes.common import pandas_dtype
11
+ from pandas.core.dtypes.missing import na_value_for_dtype
12
+
13
+ import pandas as pd
14
+ from pandas import (
15
+ DataFrame,
16
+ MultiIndex,
17
+ Series,
18
+ Timestamp,
19
+ date_range,
20
+ isna,
21
+ )
22
+ import pandas._testing as tm
23
+ from pandas.util import _test_decorators as td
24
+
25
+
26
+ @pytest.mark.parametrize("agg_func", ["any", "all"])
27
+ @pytest.mark.parametrize(
28
+ "vals",
29
+ [
30
+ ["foo", "bar", "baz"],
31
+ ["foo", "", ""],
32
+ ["", "", ""],
33
+ [1, 2, 3],
34
+ [1, 0, 0],
35
+ [0, 0, 0],
36
+ [1.0, 2.0, 3.0],
37
+ [1.0, 0.0, 0.0],
38
+ [0.0, 0.0, 0.0],
39
+ [True, True, True],
40
+ [True, False, False],
41
+ [False, False, False],
42
+ [np.nan, np.nan, np.nan],
43
+ ],
44
+ )
45
+ def test_groupby_bool_aggs(skipna, agg_func, vals):
46
+ df = DataFrame({"key": ["a"] * 3 + ["b"] * 3, "val": vals * 2})
47
+
48
+ # Figure out expectation using Python builtin
49
+ exp = getattr(builtins, agg_func)(vals)
50
+
51
+ # edge case for missing data with skipna and 'any'
52
+ if skipna and all(isna(vals)) and agg_func == "any":
53
+ exp = False
54
+
55
+ expected = DataFrame(
56
+ [exp] * 2, columns=["val"], index=pd.Index(["a", "b"], name="key")
57
+ )
58
+ result = getattr(df.groupby("key"), agg_func)(skipna=skipna)
59
+ tm.assert_frame_equal(result, expected)
60
+
61
+
62
+ def test_any():
63
+ df = DataFrame(
64
+ [[1, 2, "foo"], [1, np.nan, "bar"], [3, np.nan, "baz"]],
65
+ columns=["A", "B", "C"],
66
+ )
67
+ expected = DataFrame(
68
+ [[True, True], [False, True]], columns=["B", "C"], index=[1, 3]
69
+ )
70
+ expected.index.name = "A"
71
+ result = df.groupby("A").any()
72
+ tm.assert_frame_equal(result, expected)
73
+
74
+
75
+ @pytest.mark.parametrize("bool_agg_func", ["any", "all"])
76
+ def test_bool_aggs_dup_column_labels(bool_agg_func):
77
+ # GH#21668
78
+ df = DataFrame([[True, True]], columns=["a", "a"])
79
+ grp_by = df.groupby([0])
80
+ result = getattr(grp_by, bool_agg_func)()
81
+
82
+ expected = df.set_axis(np.array([0]))
83
+ tm.assert_frame_equal(result, expected)
84
+
85
+
86
+ @pytest.mark.parametrize("bool_agg_func", ["any", "all"])
87
+ @pytest.mark.parametrize(
88
+ "data",
89
+ [
90
+ [False, False, False],
91
+ [True, True, True],
92
+ [pd.NA, pd.NA, pd.NA],
93
+ [False, pd.NA, False],
94
+ [True, pd.NA, True],
95
+ [True, pd.NA, False],
96
+ ],
97
+ )
98
+ def test_masked_kleene_logic(bool_agg_func, skipna, data):
99
+ # GH#37506
100
+ ser = Series(data, dtype="boolean")
101
+
102
+ # The result should match aggregating on the whole series. Correctness
103
+ # there is verified in test_reductions.py::test_any_all_boolean_kleene_logic
104
+ expected_data = getattr(ser, bool_agg_func)(skipna=skipna)
105
+ expected = Series(expected_data, index=np.array([0]), dtype="boolean")
106
+
107
+ result = ser.groupby([0, 0, 0]).agg(bool_agg_func, skipna=skipna)
108
+ tm.assert_series_equal(result, expected)
109
+
110
+
111
+ @pytest.mark.parametrize(
112
+ "dtype1,dtype2,exp_col1,exp_col2",
113
+ [
114
+ (
115
+ "float",
116
+ "Float64",
117
+ np.array([True], dtype=bool),
118
+ pd.array([pd.NA], dtype="boolean"),
119
+ ),
120
+ (
121
+ "Int64",
122
+ "float",
123
+ pd.array([pd.NA], dtype="boolean"),
124
+ np.array([True], dtype=bool),
125
+ ),
126
+ (
127
+ "Int64",
128
+ "Int64",
129
+ pd.array([pd.NA], dtype="boolean"),
130
+ pd.array([pd.NA], dtype="boolean"),
131
+ ),
132
+ (
133
+ "Float64",
134
+ "boolean",
135
+ pd.array([pd.NA], dtype="boolean"),
136
+ pd.array([pd.NA], dtype="boolean"),
137
+ ),
138
+ ],
139
+ )
140
+ def test_masked_mixed_types(dtype1, dtype2, exp_col1, exp_col2):
141
+ # GH#37506
142
+ data = [1.0, np.nan]
143
+ df = DataFrame(
144
+ {"col1": pd.array(data, dtype=dtype1), "col2": pd.array(data, dtype=dtype2)}
145
+ )
146
+ result = df.groupby([1, 1]).agg("all", skipna=False)
147
+
148
+ expected = DataFrame({"col1": exp_col1, "col2": exp_col2}, index=np.array([1]))
149
+ tm.assert_frame_equal(result, expected)
150
+
151
+
152
+ @pytest.mark.parametrize("bool_agg_func", ["any", "all"])
153
+ @pytest.mark.parametrize("dtype", ["Int64", "Float64", "boolean"])
154
+ def test_masked_bool_aggs_skipna(bool_agg_func, dtype, skipna, frame_or_series):
155
+ # GH#40585
156
+ obj = frame_or_series([pd.NA, 1], dtype=dtype)
157
+ expected_res = True
158
+ if not skipna and bool_agg_func == "all":
159
+ expected_res = pd.NA
160
+ expected = frame_or_series([expected_res], index=np.array([1]), dtype="boolean")
161
+
162
+ result = obj.groupby([1, 1]).agg(bool_agg_func, skipna=skipna)
163
+ tm.assert_equal(result, expected)
164
+
165
+
166
+ @pytest.mark.parametrize(
167
+ "bool_agg_func,data,expected_res",
168
+ [
169
+ ("any", [pd.NA, np.nan], False),
170
+ ("any", [pd.NA, 1, np.nan], True),
171
+ ("all", [pd.NA, pd.NaT], True),
172
+ ("all", [pd.NA, False, pd.NaT], False),
173
+ ],
174
+ )
175
+ def test_object_type_missing_vals(bool_agg_func, data, expected_res, frame_or_series):
176
+ # GH#37501
177
+ obj = frame_or_series(data, dtype=object)
178
+ result = obj.groupby([1] * len(data)).agg(bool_agg_func)
179
+ expected = frame_or_series([expected_res], index=np.array([1]), dtype="bool")
180
+ tm.assert_equal(result, expected)
181
+
182
+
183
+ @pytest.mark.parametrize("bool_agg_func", ["any", "all"])
184
+ def test_object_NA_raises_with_skipna_false(bool_agg_func):
185
+ # GH#37501
186
+ ser = Series([pd.NA], dtype=object)
187
+ with pytest.raises(TypeError, match="boolean value of NA is ambiguous"):
188
+ ser.groupby([1]).agg(bool_agg_func, skipna=False)
189
+
190
+
191
+ @pytest.mark.parametrize("bool_agg_func", ["any", "all"])
192
+ def test_empty(frame_or_series, bool_agg_func):
193
+ # GH 45231
194
+ kwargs = {"columns": ["a"]} if frame_or_series is DataFrame else {"name": "a"}
195
+ obj = frame_or_series(**kwargs, dtype=object)
196
+ result = getattr(obj.groupby(obj.index), bool_agg_func)()
197
+ expected = frame_or_series(**kwargs, dtype=bool)
198
+ tm.assert_equal(result, expected)
199
+
200
+
201
+ @pytest.mark.parametrize("how", ["idxmin", "idxmax"])
202
+ def test_idxmin_idxmax_extremes(how, any_real_numpy_dtype):
203
+ # GH#57040
204
+ if any_real_numpy_dtype is int or any_real_numpy_dtype is float:
205
+ # No need to test
206
+ return
207
+ info = np.iinfo if "int" in any_real_numpy_dtype else np.finfo
208
+ min_value = info(any_real_numpy_dtype).min
209
+ max_value = info(any_real_numpy_dtype).max
210
+ df = DataFrame(
211
+ {"a": [2, 1, 1, 2], "b": [min_value, max_value, max_value, min_value]},
212
+ dtype=any_real_numpy_dtype,
213
+ )
214
+ gb = df.groupby("a")
215
+ result = getattr(gb, how)()
216
+ expected = DataFrame(
217
+ {"b": [1, 0]}, index=pd.Index([1, 2], name="a", dtype=any_real_numpy_dtype)
218
+ )
219
+ tm.assert_frame_equal(result, expected)
220
+
221
+
222
+ @pytest.mark.parametrize("how", ["idxmin", "idxmax"])
223
+ def test_idxmin_idxmax_extremes_skipna(skipna, how, float_numpy_dtype):
224
+ # GH#57040
225
+ min_value = np.finfo(float_numpy_dtype).min
226
+ max_value = np.finfo(float_numpy_dtype).max
227
+ df = DataFrame(
228
+ {
229
+ "a": Series(np.repeat(range(1, 6), repeats=2), dtype="intp"),
230
+ "b": Series(
231
+ [
232
+ np.nan,
233
+ min_value,
234
+ np.nan,
235
+ max_value,
236
+ min_value,
237
+ np.nan,
238
+ max_value,
239
+ np.nan,
240
+ np.nan,
241
+ np.nan,
242
+ ],
243
+ dtype=float_numpy_dtype,
244
+ ),
245
+ },
246
+ )
247
+ gb = df.groupby("a")
248
+
249
+ warn = None if skipna else FutureWarning
250
+ msg = f"The behavior of DataFrameGroupBy.{how} with all-NA values"
251
+ with tm.assert_produces_warning(warn, match=msg):
252
+ result = getattr(gb, how)(skipna=skipna)
253
+ if skipna:
254
+ values = [1, 3, 4, 6, np.nan]
255
+ else:
256
+ values = np.nan
257
+ expected = DataFrame(
258
+ {"b": values}, index=pd.Index(range(1, 6), name="a", dtype="intp")
259
+ )
260
+ tm.assert_frame_equal(result, expected)
261
+
262
+
263
+ @pytest.mark.parametrize(
264
+ "func, values",
265
+ [
266
+ ("idxmin", {"c_int": [0, 2], "c_float": [1, 3], "c_date": [1, 2]}),
267
+ ("idxmax", {"c_int": [1, 3], "c_float": [0, 2], "c_date": [0, 3]}),
268
+ ],
269
+ )
270
+ @pytest.mark.parametrize("numeric_only", [True, False])
271
+ def test_idxmin_idxmax_returns_int_types(func, values, numeric_only):
272
+ # GH 25444
273
+ df = DataFrame(
274
+ {
275
+ "name": ["A", "A", "B", "B"],
276
+ "c_int": [1, 2, 3, 4],
277
+ "c_float": [4.02, 3.03, 2.04, 1.05],
278
+ "c_date": ["2019", "2018", "2016", "2017"],
279
+ }
280
+ )
281
+ df["c_date"] = pd.to_datetime(df["c_date"])
282
+ df["c_date_tz"] = df["c_date"].dt.tz_localize("US/Pacific")
283
+ df["c_timedelta"] = df["c_date"] - df["c_date"].iloc[0]
284
+ df["c_period"] = df["c_date"].dt.to_period("W")
285
+ df["c_Integer"] = df["c_int"].astype("Int64")
286
+ df["c_Floating"] = df["c_float"].astype("Float64")
287
+
288
+ result = getattr(df.groupby("name"), func)(numeric_only=numeric_only)
289
+
290
+ expected = DataFrame(values, index=pd.Index(["A", "B"], name="name"))
291
+ if numeric_only:
292
+ expected = expected.drop(columns=["c_date"])
293
+ else:
294
+ expected["c_date_tz"] = expected["c_date"]
295
+ expected["c_timedelta"] = expected["c_date"]
296
+ expected["c_period"] = expected["c_date"]
297
+ expected["c_Integer"] = expected["c_int"]
298
+ expected["c_Floating"] = expected["c_float"]
299
+
300
+ tm.assert_frame_equal(result, expected)
301
+
302
+
303
+ @pytest.mark.parametrize(
304
+ "data",
305
+ [
306
+ (
307
+ Timestamp("2011-01-15 12:50:28.502376"),
308
+ Timestamp("2011-01-20 12:50:28.593448"),
309
+ ),
310
+ (24650000000000001, 24650000000000002),
311
+ ],
312
+ )
313
+ @pytest.mark.parametrize("method", ["count", "min", "max", "first", "last"])
314
+ def test_groupby_non_arithmetic_agg_int_like_precision(method, data):
315
+ # GH#6620, GH#9311
316
+ df = DataFrame({"a": [1, 1], "b": data})
317
+
318
+ grouped = df.groupby("a")
319
+ result = getattr(grouped, method)()
320
+ if method == "count":
321
+ expected_value = 2
322
+ elif method == "first":
323
+ expected_value = data[0]
324
+ elif method == "last":
325
+ expected_value = data[1]
326
+ else:
327
+ expected_value = getattr(df["b"], method)()
328
+ expected = DataFrame({"b": [expected_value]}, index=pd.Index([1], name="a"))
329
+
330
+ tm.assert_frame_equal(result, expected)
331
+
332
+
333
+ @pytest.mark.parametrize("how", ["first", "last"])
334
+ def test_first_last_skipna(any_real_nullable_dtype, sort, skipna, how):
335
+ # GH#57019
336
+ na_value = na_value_for_dtype(pandas_dtype(any_real_nullable_dtype))
337
+ df = DataFrame(
338
+ {
339
+ "a": [2, 1, 1, 2, 3, 3],
340
+ "b": [na_value, 3.0, na_value, 4.0, np.nan, np.nan],
341
+ "c": [na_value, 3.0, na_value, 4.0, np.nan, np.nan],
342
+ },
343
+ dtype=any_real_nullable_dtype,
344
+ )
345
+ gb = df.groupby("a", sort=sort)
346
+ method = getattr(gb, how)
347
+ result = method(skipna=skipna)
348
+
349
+ ilocs = {
350
+ ("first", True): [3, 1, 4],
351
+ ("first", False): [0, 1, 4],
352
+ ("last", True): [3, 1, 5],
353
+ ("last", False): [3, 2, 5],
354
+ }[how, skipna]
355
+ expected = df.iloc[ilocs].set_index("a")
356
+ if sort:
357
+ expected = expected.sort_index()
358
+ tm.assert_frame_equal(result, expected)
359
+
360
+
361
+ def test_idxmin_idxmax_axis1():
362
+ df = DataFrame(
363
+ np.random.default_rng(2).standard_normal((10, 4)), columns=["A", "B", "C", "D"]
364
+ )
365
+ df["A"] = [1, 2, 3, 1, 2, 3, 1, 2, 3, 4]
366
+
367
+ gb = df.groupby("A")
368
+
369
+ warn_msg = "DataFrameGroupBy.idxmax with axis=1 is deprecated"
370
+ with tm.assert_produces_warning(FutureWarning, match=warn_msg):
371
+ res = gb.idxmax(axis=1)
372
+
373
+ alt = df.iloc[:, 1:].idxmax(axis=1)
374
+ indexer = res.index.get_level_values(1)
375
+
376
+ tm.assert_series_equal(alt[indexer], res.droplevel("A"))
377
+
378
+ df["E"] = date_range("2016-01-01", periods=10)
379
+ gb2 = df.groupby("A")
380
+
381
+ msg = "'>' not supported between instances of 'Timestamp' and 'float'"
382
+ with pytest.raises(TypeError, match=msg):
383
+ with tm.assert_produces_warning(FutureWarning, match=warn_msg):
384
+ gb2.idxmax(axis=1)
385
+
386
+
387
+ def test_groupby_mean_no_overflow():
388
+ # Regression test for (#22487)
389
+ df = DataFrame(
390
+ {
391
+ "user": ["A", "A", "A", "A", "A"],
392
+ "connections": [4970, 4749, 4719, 4704, 18446744073699999744],
393
+ }
394
+ )
395
+ assert df.groupby("user")["connections"].mean()["A"] == 3689348814740003840
396
+
397
+
398
+ def test_mean_on_timedelta():
399
+ # GH 17382
400
+ df = DataFrame({"time": pd.to_timedelta(range(10)), "cat": ["A", "B"] * 5})
401
+ result = df.groupby("cat")["time"].mean()
402
+ expected = Series(
403
+ pd.to_timedelta([4, 5]), name="time", index=pd.Index(["A", "B"], name="cat")
404
+ )
405
+ tm.assert_series_equal(result, expected)
406
+
407
+
408
+ def test_cython_median():
409
+ arr = np.random.default_rng(2).standard_normal(1000)
410
+ arr[::2] = np.nan
411
+ df = DataFrame(arr)
412
+
413
+ labels = np.random.default_rng(2).integers(0, 50, size=1000).astype(float)
414
+ labels[::17] = np.nan
415
+
416
+ result = df.groupby(labels).median()
417
+ msg = "using DataFrameGroupBy.median"
418
+ with tm.assert_produces_warning(FutureWarning, match=msg):
419
+ exp = df.groupby(labels).agg(np.nanmedian)
420
+ tm.assert_frame_equal(result, exp)
421
+
422
+ df = DataFrame(np.random.default_rng(2).standard_normal((1000, 5)))
423
+ msg = "using DataFrameGroupBy.median"
424
+ with tm.assert_produces_warning(FutureWarning, match=msg):
425
+ rs = df.groupby(labels).agg(np.median)
426
+ xp = df.groupby(labels).median()
427
+ tm.assert_frame_equal(rs, xp)
428
+
429
+
430
+ def test_median_empty_bins(observed):
431
+ df = DataFrame(np.random.default_rng(2).integers(0, 44, 500))
432
+
433
+ grps = range(0, 55, 5)
434
+ bins = pd.cut(df[0], grps)
435
+
436
+ result = df.groupby(bins, observed=observed).median()
437
+ expected = df.groupby(bins, observed=observed).agg(lambda x: x.median())
438
+ tm.assert_frame_equal(result, expected)
439
+
440
+
441
+ def test_max_min_non_numeric():
442
+ # #2700
443
+ aa = DataFrame({"nn": [11, 11, 22, 22], "ii": [1, 2, 3, 4], "ss": 4 * ["mama"]})
444
+
445
+ result = aa.groupby("nn").max()
446
+ assert "ss" in result
447
+
448
+ result = aa.groupby("nn").max(numeric_only=False)
449
+ assert "ss" in result
450
+
451
+ result = aa.groupby("nn").min()
452
+ assert "ss" in result
453
+
454
+ result = aa.groupby("nn").min(numeric_only=False)
455
+ assert "ss" in result
456
+
457
+
458
+ def test_max_min_object_multiple_columns(using_array_manager):
459
+ # GH#41111 case where the aggregation is valid for some columns but not
460
+ # others; we split object blocks column-wise, consistent with
461
+ # DataFrame._reduce
462
+
463
+ df = DataFrame(
464
+ {
465
+ "A": [1, 1, 2, 2, 3],
466
+ "B": [1, "foo", 2, "bar", False],
467
+ "C": ["a", "b", "c", "d", "e"],
468
+ }
469
+ )
470
+ df._consolidate_inplace() # should already be consolidate, but double-check
471
+ if not using_array_manager:
472
+ assert len(df._mgr.blocks) == 2
473
+
474
+ gb = df.groupby("A")
475
+
476
+ result = gb[["C"]].max()
477
+ # "max" is valid for column "C" but not for "B"
478
+ ei = pd.Index([1, 2, 3], name="A")
479
+ expected = DataFrame({"C": ["b", "d", "e"]}, index=ei)
480
+ tm.assert_frame_equal(result, expected)
481
+
482
+ result = gb[["C"]].min()
483
+ # "min" is valid for column "C" but not for "B"
484
+ ei = pd.Index([1, 2, 3], name="A")
485
+ expected = DataFrame({"C": ["a", "c", "e"]}, index=ei)
486
+ tm.assert_frame_equal(result, expected)
487
+
488
+
489
+ def test_min_date_with_nans():
490
+ # GH26321
491
+ dates = pd.to_datetime(
492
+ Series(["2019-05-09", "2019-05-09", "2019-05-09"]), format="%Y-%m-%d"
493
+ ).dt.date
494
+ df = DataFrame({"a": [np.nan, "1", np.nan], "b": [0, 1, 1], "c": dates})
495
+
496
+ result = df.groupby("b", as_index=False)["c"].min()["c"]
497
+ expected = pd.to_datetime(
498
+ Series(["2019-05-09", "2019-05-09"], name="c"), format="%Y-%m-%d"
499
+ ).dt.date
500
+ tm.assert_series_equal(result, expected)
501
+
502
+ result = df.groupby("b")["c"].min()
503
+ expected.index.name = "b"
504
+ tm.assert_series_equal(result, expected)
505
+
506
+
507
+ def test_max_inat():
508
+ # GH#40767 dont interpret iNaT as NaN
509
+ ser = Series([1, iNaT])
510
+ key = np.array([1, 1], dtype=np.int64)
511
+ gb = ser.groupby(key)
512
+
513
+ result = gb.max(min_count=2)
514
+ expected = Series({1: 1}, dtype=np.int64)
515
+ tm.assert_series_equal(result, expected, check_exact=True)
516
+
517
+ result = gb.min(min_count=2)
518
+ expected = Series({1: iNaT}, dtype=np.int64)
519
+ tm.assert_series_equal(result, expected, check_exact=True)
520
+
521
+ # not enough entries -> gets masked to NaN
522
+ result = gb.min(min_count=3)
523
+ expected = Series({1: np.nan})
524
+ tm.assert_series_equal(result, expected, check_exact=True)
525
+
526
+
527
+ def test_max_inat_not_all_na():
528
+ # GH#40767 dont interpret iNaT as NaN
529
+
530
+ # make sure we dont round iNaT+1 to iNaT
531
+ ser = Series([1, iNaT, 2, iNaT + 1])
532
+ gb = ser.groupby([1, 2, 3, 3])
533
+ result = gb.min(min_count=2)
534
+
535
+ # Note: in converting to float64, the iNaT + 1 maps to iNaT, i.e. is lossy
536
+ expected = Series({1: np.nan, 2: np.nan, 3: iNaT + 1})
537
+ expected.index = expected.index.astype(int)
538
+ tm.assert_series_equal(result, expected, check_exact=True)
539
+
540
+
541
+ @pytest.mark.parametrize("func", ["min", "max"])
542
+ def test_groupby_aggregate_period_column(func):
543
+ # GH 31471
544
+ groups = [1, 2]
545
+ periods = pd.period_range("2020", periods=2, freq="Y")
546
+ df = DataFrame({"a": groups, "b": periods})
547
+
548
+ result = getattr(df.groupby("a")["b"], func)()
549
+ idx = pd.Index([1, 2], name="a")
550
+ expected = Series(periods, index=idx, name="b")
551
+
552
+ tm.assert_series_equal(result, expected)
553
+
554
+
555
+ @pytest.mark.parametrize("func", ["min", "max"])
556
+ def test_groupby_aggregate_period_frame(func):
557
+ # GH 31471
558
+ groups = [1, 2]
559
+ periods = pd.period_range("2020", periods=2, freq="Y")
560
+ df = DataFrame({"a": groups, "b": periods})
561
+
562
+ result = getattr(df.groupby("a"), func)()
563
+ idx = pd.Index([1, 2], name="a")
564
+ expected = DataFrame({"b": periods}, index=idx)
565
+
566
+ tm.assert_frame_equal(result, expected)
567
+
568
+
569
+ def test_aggregate_numeric_object_dtype():
570
+ # https://github.com/pandas-dev/pandas/issues/39329
571
+ # simplified case: multiple object columns where one is all-NaN
572
+ # -> gets split as the all-NaN is inferred as float
573
+ df = DataFrame(
574
+ {"key": ["A", "A", "B", "B"], "col1": list("abcd"), "col2": [np.nan] * 4},
575
+ ).astype(object)
576
+ result = df.groupby("key").min()
577
+ expected = (
578
+ DataFrame(
579
+ {"key": ["A", "B"], "col1": ["a", "c"], "col2": [np.nan, np.nan]},
580
+ )
581
+ .set_index("key")
582
+ .astype(object)
583
+ )
584
+ tm.assert_frame_equal(result, expected)
585
+
586
+ # same but with numbers
587
+ df = DataFrame(
588
+ {"key": ["A", "A", "B", "B"], "col1": list("abcd"), "col2": range(4)},
589
+ ).astype(object)
590
+ result = df.groupby("key").min()
591
+ expected = (
592
+ DataFrame({"key": ["A", "B"], "col1": ["a", "c"], "col2": [0, 2]})
593
+ .set_index("key")
594
+ .astype(object)
595
+ )
596
+ tm.assert_frame_equal(result, expected)
597
+
598
+
599
+ @pytest.mark.parametrize("func", ["min", "max"])
600
+ def test_aggregate_categorical_lost_index(func: str):
601
+ # GH: 28641 groupby drops index, when grouping over categorical column with min/max
602
+ ds = Series(["b"], dtype="category").cat.as_ordered()
603
+ df = DataFrame({"A": [1997], "B": ds})
604
+ result = df.groupby("A").agg({"B": func})
605
+ expected = DataFrame({"B": ["b"]}, index=pd.Index([1997], name="A"))
606
+
607
+ # ordered categorical dtype should be preserved
608
+ expected["B"] = expected["B"].astype(ds.dtype)
609
+
610
+ tm.assert_frame_equal(result, expected)
611
+
612
+
613
+ @pytest.mark.parametrize("dtype", ["Int64", "Int32", "Float64", "Float32", "boolean"])
614
+ def test_groupby_min_max_nullable(dtype):
615
+ if dtype == "Int64":
616
+ # GH#41743 avoid precision loss
617
+ ts = 1618556707013635762
618
+ elif dtype == "boolean":
619
+ ts = 0
620
+ else:
621
+ ts = 4.0
622
+
623
+ df = DataFrame({"id": [2, 2], "ts": [ts, ts + 1]})
624
+ df["ts"] = df["ts"].astype(dtype)
625
+
626
+ gb = df.groupby("id")
627
+
628
+ result = gb.min()
629
+ expected = df.iloc[:1].set_index("id")
630
+ tm.assert_frame_equal(result, expected)
631
+
632
+ res_max = gb.max()
633
+ expected_max = df.iloc[1:].set_index("id")
634
+ tm.assert_frame_equal(res_max, expected_max)
635
+
636
+ result2 = gb.min(min_count=3)
637
+ expected2 = DataFrame({"ts": [pd.NA]}, index=expected.index, dtype=dtype)
638
+ tm.assert_frame_equal(result2, expected2)
639
+
640
+ res_max2 = gb.max(min_count=3)
641
+ tm.assert_frame_equal(res_max2, expected2)
642
+
643
+ # Case with NA values
644
+ df2 = DataFrame({"id": [2, 2, 2], "ts": [ts, pd.NA, ts + 1]})
645
+ df2["ts"] = df2["ts"].astype(dtype)
646
+ gb2 = df2.groupby("id")
647
+
648
+ result3 = gb2.min()
649
+ tm.assert_frame_equal(result3, expected)
650
+
651
+ res_max3 = gb2.max()
652
+ tm.assert_frame_equal(res_max3, expected_max)
653
+
654
+ result4 = gb2.min(min_count=100)
655
+ tm.assert_frame_equal(result4, expected2)
656
+
657
+ res_max4 = gb2.max(min_count=100)
658
+ tm.assert_frame_equal(res_max4, expected2)
659
+
660
+
661
+ def test_min_max_nullable_uint64_empty_group():
662
+ # don't raise NotImplementedError from libgroupby
663
+ cat = pd.Categorical([0] * 10, categories=[0, 1])
664
+ df = DataFrame({"A": cat, "B": pd.array(np.arange(10, dtype=np.uint64))})
665
+ gb = df.groupby("A", observed=False)
666
+
667
+ res = gb.min()
668
+
669
+ idx = pd.CategoricalIndex([0, 1], dtype=cat.dtype, name="A")
670
+ expected = DataFrame({"B": pd.array([0, pd.NA], dtype="UInt64")}, index=idx)
671
+ tm.assert_frame_equal(res, expected)
672
+
673
+ res = gb.max()
674
+ expected.iloc[0, 0] = 9
675
+ tm.assert_frame_equal(res, expected)
676
+
677
+
678
+ @pytest.mark.parametrize("func", ["first", "last", "min", "max"])
679
+ def test_groupby_min_max_categorical(func):
680
+ # GH: 52151
681
+ df = DataFrame(
682
+ {
683
+ "col1": pd.Categorical(["A"], categories=list("AB"), ordered=True),
684
+ "col2": pd.Categorical([1], categories=[1, 2], ordered=True),
685
+ "value": 0.1,
686
+ }
687
+ )
688
+ result = getattr(df.groupby("col1", observed=False), func)()
689
+
690
+ idx = pd.CategoricalIndex(data=["A", "B"], name="col1", ordered=True)
691
+ expected = DataFrame(
692
+ {
693
+ "col2": pd.Categorical([1, None], categories=[1, 2], ordered=True),
694
+ "value": [0.1, None],
695
+ },
696
+ index=idx,
697
+ )
698
+ tm.assert_frame_equal(result, expected)
699
+
700
+
701
+ @pytest.mark.parametrize("func", ["min", "max"])
702
+ def test_min_empty_string_dtype(func):
703
+ # GH#55619
704
+ pytest.importorskip("pyarrow")
705
+ dtype = "string[pyarrow_numpy]"
706
+ df = DataFrame({"a": ["a"], "b": "a", "c": "a"}, dtype=dtype).iloc[:0]
707
+ result = getattr(df.groupby("a"), func)()
708
+ expected = DataFrame(
709
+ columns=["b", "c"], dtype=dtype, index=pd.Index([], dtype=dtype, name="a")
710
+ )
711
+ tm.assert_frame_equal(result, expected)
712
+
713
+
714
+ def test_max_nan_bug():
715
+ df = DataFrame(
716
+ {
717
+ "Unnamed: 0": ["-04-23", "-05-06", "-05-07"],
718
+ "Date": [
719
+ "2013-04-23 00:00:00",
720
+ "2013-05-06 00:00:00",
721
+ "2013-05-07 00:00:00",
722
+ ],
723
+ "app": Series([np.nan, np.nan, "OE"]),
724
+ "File": ["log080001.log", "log.log", "xlsx"],
725
+ }
726
+ )
727
+ gb = df.groupby("Date")
728
+ r = gb[["File"]].max()
729
+ e = gb["File"].max().to_frame()
730
+ tm.assert_frame_equal(r, e)
731
+ assert not r["File"].isna().any()
732
+
733
+
734
+ @pytest.mark.slow
735
+ @pytest.mark.parametrize("sort", [False, True])
736
+ @pytest.mark.parametrize("dropna", [False, True])
737
+ @pytest.mark.parametrize("as_index", [True, False])
738
+ @pytest.mark.parametrize("with_nan", [True, False])
739
+ @pytest.mark.parametrize("keys", [["joe"], ["joe", "jim"]])
740
+ def test_series_groupby_nunique(sort, dropna, as_index, with_nan, keys):
741
+ n = 100
742
+ m = 10
743
+ days = date_range("2015-08-23", periods=10)
744
+ df = DataFrame(
745
+ {
746
+ "jim": np.random.default_rng(2).choice(list(ascii_lowercase), n),
747
+ "joe": np.random.default_rng(2).choice(days, n),
748
+ "julie": np.random.default_rng(2).integers(0, m, n),
749
+ }
750
+ )
751
+ if with_nan:
752
+ df = df.astype({"julie": float}) # Explicit cast to avoid implicit cast below
753
+ df.loc[1::17, "jim"] = None
754
+ df.loc[3::37, "joe"] = None
755
+ df.loc[7::19, "julie"] = None
756
+ df.loc[8::19, "julie"] = None
757
+ df.loc[9::19, "julie"] = None
758
+ original_df = df.copy()
759
+ gr = df.groupby(keys, as_index=as_index, sort=sort)
760
+ left = gr["julie"].nunique(dropna=dropna)
761
+
762
+ gr = df.groupby(keys, as_index=as_index, sort=sort)
763
+ right = gr["julie"].apply(Series.nunique, dropna=dropna)
764
+ if not as_index:
765
+ right = right.reset_index(drop=True)
766
+
767
+ if as_index:
768
+ tm.assert_series_equal(left, right, check_names=False)
769
+ else:
770
+ tm.assert_frame_equal(left, right, check_names=False)
771
+ tm.assert_frame_equal(df, original_df)
772
+
773
+
774
+ def test_nunique():
775
+ df = DataFrame({"A": list("abbacc"), "B": list("abxacc"), "C": list("abbacx")})
776
+
777
+ expected = DataFrame({"A": list("abc"), "B": [1, 2, 1], "C": [1, 1, 2]})
778
+ result = df.groupby("A", as_index=False).nunique()
779
+ tm.assert_frame_equal(result, expected)
780
+
781
+ # as_index
782
+ expected.index = list("abc")
783
+ expected.index.name = "A"
784
+ expected = expected.drop(columns="A")
785
+ result = df.groupby("A").nunique()
786
+ tm.assert_frame_equal(result, expected)
787
+
788
+ # with na
789
+ result = df.replace({"x": None}).groupby("A").nunique(dropna=False)
790
+ tm.assert_frame_equal(result, expected)
791
+
792
+ # dropna
793
+ expected = DataFrame({"B": [1] * 3, "C": [1] * 3}, index=list("abc"))
794
+ expected.index.name = "A"
795
+ result = df.replace({"x": None}).groupby("A").nunique()
796
+ tm.assert_frame_equal(result, expected)
797
+
798
+
799
+ def test_nunique_with_object():
800
+ # GH 11077
801
+ data = DataFrame(
802
+ [
803
+ [100, 1, "Alice"],
804
+ [200, 2, "Bob"],
805
+ [300, 3, "Charlie"],
806
+ [-400, 4, "Dan"],
807
+ [500, 5, "Edith"],
808
+ ],
809
+ columns=["amount", "id", "name"],
810
+ )
811
+
812
+ result = data.groupby(["id", "amount"])["name"].nunique()
813
+ index = MultiIndex.from_arrays([data.id, data.amount])
814
+ expected = Series([1] * 5, name="name", index=index)
815
+ tm.assert_series_equal(result, expected)
816
+
817
+
818
+ def test_nunique_with_empty_series():
819
+ # GH 12553
820
+ data = Series(name="name", dtype=object)
821
+ result = data.groupby(level=0).nunique()
822
+ expected = Series(name="name", dtype="int64")
823
+ tm.assert_series_equal(result, expected)
824
+
825
+
826
+ def test_nunique_with_timegrouper():
827
+ # GH 13453
828
+ test = DataFrame(
829
+ {
830
+ "time": [
831
+ Timestamp("2016-06-28 09:35:35"),
832
+ Timestamp("2016-06-28 16:09:30"),
833
+ Timestamp("2016-06-28 16:46:28"),
834
+ ],
835
+ "data": ["1", "2", "3"],
836
+ }
837
+ ).set_index("time")
838
+ result = test.groupby(pd.Grouper(freq="h"))["data"].nunique()
839
+ expected = test.groupby(pd.Grouper(freq="h"))["data"].apply(Series.nunique)
840
+ tm.assert_series_equal(result, expected)
841
+
842
+
843
+ @pytest.mark.parametrize(
844
+ "key, data, dropna, expected",
845
+ [
846
+ (
847
+ ["x", "x", "x"],
848
+ [Timestamp("2019-01-01"), pd.NaT, Timestamp("2019-01-01")],
849
+ True,
850
+ Series([1], index=pd.Index(["x"], name="key"), name="data"),
851
+ ),
852
+ (
853
+ ["x", "x", "x"],
854
+ [dt.date(2019, 1, 1), pd.NaT, dt.date(2019, 1, 1)],
855
+ True,
856
+ Series([1], index=pd.Index(["x"], name="key"), name="data"),
857
+ ),
858
+ (
859
+ ["x", "x", "x", "y", "y"],
860
+ [
861
+ dt.date(2019, 1, 1),
862
+ pd.NaT,
863
+ dt.date(2019, 1, 1),
864
+ pd.NaT,
865
+ dt.date(2019, 1, 1),
866
+ ],
867
+ False,
868
+ Series([2, 2], index=pd.Index(["x", "y"], name="key"), name="data"),
869
+ ),
870
+ (
871
+ ["x", "x", "x", "x", "y"],
872
+ [
873
+ dt.date(2019, 1, 1),
874
+ pd.NaT,
875
+ dt.date(2019, 1, 1),
876
+ pd.NaT,
877
+ dt.date(2019, 1, 1),
878
+ ],
879
+ False,
880
+ Series([2, 1], index=pd.Index(["x", "y"], name="key"), name="data"),
881
+ ),
882
+ ],
883
+ )
884
+ def test_nunique_with_NaT(key, data, dropna, expected):
885
+ # GH 27951
886
+ df = DataFrame({"key": key, "data": data})
887
+ result = df.groupby(["key"])["data"].nunique(dropna=dropna)
888
+ tm.assert_series_equal(result, expected)
889
+
890
+
891
+ def test_nunique_preserves_column_level_names():
892
+ # GH 23222
893
+ test = DataFrame([1, 2, 2], columns=pd.Index(["A"], name="level_0"))
894
+ result = test.groupby([0, 0, 0]).nunique()
895
+ expected = DataFrame([2], index=np.array([0]), columns=test.columns)
896
+ tm.assert_frame_equal(result, expected)
897
+
898
+
899
+ def test_nunique_transform_with_datetime():
900
+ # GH 35109 - transform with nunique on datetimes results in integers
901
+ df = DataFrame(date_range("2008-12-31", "2009-01-02"), columns=["date"])
902
+ result = df.groupby([0, 0, 1])["date"].transform("nunique")
903
+ expected = Series([2, 2, 1], name="date")
904
+ tm.assert_series_equal(result, expected)
905
+
906
+
907
+ def test_empty_categorical(observed):
908
+ # GH#21334
909
+ cat = Series([1]).astype("category")
910
+ ser = cat[:0]
911
+ gb = ser.groupby(ser, observed=observed)
912
+ result = gb.nunique()
913
+ if observed:
914
+ expected = Series([], index=cat[:0], dtype="int64")
915
+ else:
916
+ expected = Series([0], index=cat, dtype="int64")
917
+ tm.assert_series_equal(result, expected)
918
+
919
+
920
+ def test_intercept_builtin_sum():
921
+ s = Series([1.0, 2.0, np.nan, 3.0])
922
+ grouped = s.groupby([0, 1, 2, 2])
923
+
924
+ msg = "using SeriesGroupBy.sum"
925
+ with tm.assert_produces_warning(FutureWarning, match=msg):
926
+ # GH#53425
927
+ result = grouped.agg(builtins.sum)
928
+ msg = "using np.sum"
929
+ with tm.assert_produces_warning(FutureWarning, match=msg):
930
+ # GH#53425
931
+ result2 = grouped.apply(builtins.sum)
932
+ expected = grouped.sum()
933
+ tm.assert_series_equal(result, expected)
934
+ tm.assert_series_equal(result2, expected)
935
+
936
+
937
+ @pytest.mark.parametrize("min_count", [0, 10])
938
+ def test_groupby_sum_mincount_boolean(min_count):
939
+ b = True
940
+ a = False
941
+ na = np.nan
942
+ dfg = pd.array([b, b, na, na, a, a, b], dtype="boolean")
943
+
944
+ df = DataFrame({"A": [1, 1, 2, 2, 3, 3, 1], "B": dfg})
945
+ result = df.groupby("A").sum(min_count=min_count)
946
+ if min_count == 0:
947
+ expected = DataFrame(
948
+ {"B": pd.array([3, 0, 0], dtype="Int64")},
949
+ index=pd.Index([1, 2, 3], name="A"),
950
+ )
951
+ tm.assert_frame_equal(result, expected)
952
+ else:
953
+ expected = DataFrame(
954
+ {"B": pd.array([pd.NA] * 3, dtype="Int64")},
955
+ index=pd.Index([1, 2, 3], name="A"),
956
+ )
957
+ tm.assert_frame_equal(result, expected)
958
+
959
+
960
+ def test_groupby_sum_below_mincount_nullable_integer():
961
+ # https://github.com/pandas-dev/pandas/issues/32861
962
+ df = DataFrame({"a": [0, 1, 2], "b": [0, 1, 2], "c": [0, 1, 2]}, dtype="Int64")
963
+ grouped = df.groupby("a")
964
+ idx = pd.Index([0, 1, 2], name="a", dtype="Int64")
965
+
966
+ result = grouped["b"].sum(min_count=2)
967
+ expected = Series([pd.NA] * 3, dtype="Int64", index=idx, name="b")
968
+ tm.assert_series_equal(result, expected)
969
+
970
+ result = grouped.sum(min_count=2)
971
+ expected = DataFrame({"b": [pd.NA] * 3, "c": [pd.NA] * 3}, dtype="Int64", index=idx)
972
+ tm.assert_frame_equal(result, expected)
973
+
974
+
975
+ def test_groupby_sum_timedelta_with_nat():
976
+ # GH#42659
977
+ df = DataFrame(
978
+ {
979
+ "a": [1, 1, 2, 2],
980
+ "b": [pd.Timedelta("1d"), pd.Timedelta("2d"), pd.Timedelta("3d"), pd.NaT],
981
+ }
982
+ )
983
+ td3 = pd.Timedelta(days=3)
984
+
985
+ gb = df.groupby("a")
986
+
987
+ res = gb.sum()
988
+ expected = DataFrame({"b": [td3, td3]}, index=pd.Index([1, 2], name="a"))
989
+ tm.assert_frame_equal(res, expected)
990
+
991
+ res = gb["b"].sum()
992
+ tm.assert_series_equal(res, expected["b"])
993
+
994
+ res = gb["b"].sum(min_count=2)
995
+ expected = Series([td3, pd.NaT], dtype="m8[ns]", name="b", index=expected.index)
996
+ tm.assert_series_equal(res, expected)
997
+
998
+
999
+ @pytest.mark.parametrize(
1000
+ "dtype", ["int8", "int16", "int32", "int64", "float32", "float64", "uint64"]
1001
+ )
1002
+ @pytest.mark.parametrize(
1003
+ "method,data",
1004
+ [
1005
+ ("first", {"df": [{"a": 1, "b": 1}, {"a": 2, "b": 3}]}),
1006
+ ("last", {"df": [{"a": 1, "b": 2}, {"a": 2, "b": 4}]}),
1007
+ ("min", {"df": [{"a": 1, "b": 1}, {"a": 2, "b": 3}]}),
1008
+ ("max", {"df": [{"a": 1, "b": 2}, {"a": 2, "b": 4}]}),
1009
+ ("count", {"df": [{"a": 1, "b": 2}, {"a": 2, "b": 2}], "out_type": "int64"}),
1010
+ ],
1011
+ )
1012
+ def test_groupby_non_arithmetic_agg_types(dtype, method, data):
1013
+ # GH9311, GH6620
1014
+ df = DataFrame(
1015
+ [{"a": 1, "b": 1}, {"a": 1, "b": 2}, {"a": 2, "b": 3}, {"a": 2, "b": 4}]
1016
+ )
1017
+
1018
+ df["b"] = df.b.astype(dtype)
1019
+
1020
+ if "args" not in data:
1021
+ data["args"] = []
1022
+
1023
+ if "out_type" in data:
1024
+ out_type = data["out_type"]
1025
+ else:
1026
+ out_type = dtype
1027
+
1028
+ exp = data["df"]
1029
+ df_out = DataFrame(exp)
1030
+
1031
+ df_out["b"] = df_out.b.astype(out_type)
1032
+ df_out.set_index("a", inplace=True)
1033
+
1034
+ grpd = df.groupby("a")
1035
+ t = getattr(grpd, method)(*data["args"])
1036
+ tm.assert_frame_equal(t, df_out)
1037
+
1038
+
1039
+ def scipy_sem(*args, **kwargs):
1040
+ from scipy.stats import sem
1041
+
1042
+ return sem(*args, ddof=1, **kwargs)
1043
+
1044
+
1045
+ @pytest.mark.parametrize(
1046
+ "op,targop",
1047
+ [
1048
+ ("mean", np.mean),
1049
+ ("median", np.median),
1050
+ ("std", np.std),
1051
+ ("var", np.var),
1052
+ ("sum", np.sum),
1053
+ ("prod", np.prod),
1054
+ ("min", np.min),
1055
+ ("max", np.max),
1056
+ ("first", lambda x: x.iloc[0]),
1057
+ ("last", lambda x: x.iloc[-1]),
1058
+ ("count", np.size),
1059
+ pytest.param("sem", scipy_sem, marks=td.skip_if_no("scipy")),
1060
+ ],
1061
+ )
1062
+ def test_ops_general(op, targop):
1063
+ df = DataFrame(np.random.default_rng(2).standard_normal(1000))
1064
+ labels = np.random.default_rng(2).integers(0, 50, size=1000).astype(float)
1065
+
1066
+ result = getattr(df.groupby(labels), op)()
1067
+ warn = None if op in ("first", "last", "count", "sem") else FutureWarning
1068
+ msg = f"using DataFrameGroupBy.{op}"
1069
+ with tm.assert_produces_warning(warn, match=msg):
1070
+ expected = df.groupby(labels).agg(targop)
1071
+ tm.assert_frame_equal(result, expected)
1072
+
1073
+
1074
+ @pytest.mark.parametrize(
1075
+ "values",
1076
+ [
1077
+ {
1078
+ "a": [1, 1, 1, 2, 2, 2, 3, 3, 3],
1079
+ "b": [1, pd.NA, 2, 1, pd.NA, 2, 1, pd.NA, 2],
1080
+ },
1081
+ {"a": [1, 1, 2, 2, 3, 3], "b": [1, 2, 1, 2, 1, 2]},
1082
+ ],
1083
+ )
1084
+ @pytest.mark.parametrize("function", ["mean", "median", "var"])
1085
+ def test_apply_to_nullable_integer_returns_float(values, function):
1086
+ # https://github.com/pandas-dev/pandas/issues/32219
1087
+ output = 0.5 if function == "var" else 1.5
1088
+ arr = np.array([output] * 3, dtype=float)
1089
+ idx = pd.Index([1, 2, 3], name="a", dtype="Int64")
1090
+ expected = DataFrame({"b": arr}, index=idx).astype("Float64")
1091
+
1092
+ groups = DataFrame(values, dtype="Int64").groupby("a")
1093
+
1094
+ result = getattr(groups, function)()
1095
+ tm.assert_frame_equal(result, expected)
1096
+
1097
+ result = groups.agg(function)
1098
+ tm.assert_frame_equal(result, expected)
1099
+
1100
+ result = groups.agg([function])
1101
+ expected.columns = MultiIndex.from_tuples([("b", function)])
1102
+ tm.assert_frame_equal(result, expected)
1103
+
1104
+
1105
+ @pytest.mark.parametrize(
1106
+ "op",
1107
+ [
1108
+ "sum",
1109
+ "prod",
1110
+ "min",
1111
+ "max",
1112
+ "median",
1113
+ "mean",
1114
+ "skew",
1115
+ "std",
1116
+ "var",
1117
+ "sem",
1118
+ ],
1119
+ )
1120
+ @pytest.mark.parametrize("axis", [0, 1])
1121
+ @pytest.mark.parametrize("skipna", [True, False])
1122
+ @pytest.mark.parametrize("sort", [True, False])
1123
+ def test_regression_allowlist_methods(op, axis, skipna, sort):
1124
+ # GH6944
1125
+ # GH 17537
1126
+ # explicitly test the allowlist methods
1127
+ raw_frame = DataFrame([0])
1128
+ if axis == 0:
1129
+ frame = raw_frame
1130
+ msg = "The 'axis' keyword in DataFrame.groupby is deprecated and will be"
1131
+ else:
1132
+ frame = raw_frame.T
1133
+ msg = "DataFrame.groupby with axis=1 is deprecated"
1134
+
1135
+ with tm.assert_produces_warning(FutureWarning, match=msg):
1136
+ grouped = frame.groupby(level=0, axis=axis, sort=sort)
1137
+
1138
+ if op == "skew":
1139
+ # skew has skipna
1140
+ result = getattr(grouped, op)(skipna=skipna)
1141
+ expected = frame.groupby(level=0).apply(
1142
+ lambda h: getattr(h, op)(axis=axis, skipna=skipna)
1143
+ )
1144
+ if sort:
1145
+ expected = expected.sort_index(axis=axis)
1146
+ tm.assert_frame_equal(result, expected)
1147
+ else:
1148
+ result = getattr(grouped, op)()
1149
+ expected = frame.groupby(level=0).apply(lambda h: getattr(h, op)(axis=axis))
1150
+ if sort:
1151
+ expected = expected.sort_index(axis=axis)
1152
+ tm.assert_frame_equal(result, expected)
1153
+
1154
+
1155
+ def test_groupby_prod_with_int64_dtype():
1156
+ # GH#46573
1157
+ data = [
1158
+ [1, 11],
1159
+ [1, 41],
1160
+ [1, 17],
1161
+ [1, 37],
1162
+ [1, 7],
1163
+ [1, 29],
1164
+ [1, 31],
1165
+ [1, 2],
1166
+ [1, 3],
1167
+ [1, 43],
1168
+ [1, 5],
1169
+ [1, 47],
1170
+ [1, 19],
1171
+ [1, 88],
1172
+ ]
1173
+ df = DataFrame(data, columns=["A", "B"], dtype="int64")
1174
+ result = df.groupby(["A"]).prod().reset_index()
1175
+ expected = DataFrame({"A": [1], "B": [180970905912331920]}, dtype="int64")
1176
+ tm.assert_frame_equal(result, expected)
venv/lib/python3.10/site-packages/pandas/tests/groupby/test_timegrouper.py ADDED
@@ -0,0 +1,963 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ test with the TimeGrouper / grouping with datetimes
3
+ """
4
+ from datetime import (
5
+ datetime,
6
+ timedelta,
7
+ )
8
+
9
+ import numpy as np
10
+ import pytest
11
+ import pytz
12
+
13
+ import pandas as pd
14
+ from pandas import (
15
+ DataFrame,
16
+ DatetimeIndex,
17
+ Index,
18
+ MultiIndex,
19
+ Series,
20
+ Timestamp,
21
+ date_range,
22
+ offsets,
23
+ )
24
+ import pandas._testing as tm
25
+ from pandas.core.groupby.grouper import Grouper
26
+ from pandas.core.groupby.ops import BinGrouper
27
+
28
+
29
+ @pytest.fixture
30
+ def frame_for_truncated_bingrouper():
31
+ """
32
+ DataFrame used by groupby_with_truncated_bingrouper, made into
33
+ a separate fixture for easier reuse in
34
+ test_groupby_apply_timegrouper_with_nat_apply_squeeze
35
+ """
36
+ df = DataFrame(
37
+ {
38
+ "Quantity": [18, 3, 5, 1, 9, 3],
39
+ "Date": [
40
+ Timestamp(2013, 9, 1, 13, 0),
41
+ Timestamp(2013, 9, 1, 13, 5),
42
+ Timestamp(2013, 10, 1, 20, 0),
43
+ Timestamp(2013, 10, 3, 10, 0),
44
+ pd.NaT,
45
+ Timestamp(2013, 9, 2, 14, 0),
46
+ ],
47
+ }
48
+ )
49
+ return df
50
+
51
+
52
+ @pytest.fixture
53
+ def groupby_with_truncated_bingrouper(frame_for_truncated_bingrouper):
54
+ """
55
+ GroupBy object such that gb._grouper is a BinGrouper and
56
+ len(gb._grouper.result_index) < len(gb._grouper.group_keys_seq)
57
+
58
+ Aggregations on this groupby should have
59
+
60
+ dti = date_range("2013-09-01", "2013-10-01", freq="5D", name="Date")
61
+
62
+ As either the index or an index level.
63
+ """
64
+ df = frame_for_truncated_bingrouper
65
+
66
+ tdg = Grouper(key="Date", freq="5D")
67
+ gb = df.groupby(tdg)
68
+
69
+ # check we're testing the case we're interested in
70
+ assert len(gb._grouper.result_index) != len(gb._grouper.group_keys_seq)
71
+
72
+ return gb
73
+
74
+
75
+ class TestGroupBy:
76
+ def test_groupby_with_timegrouper(self):
77
+ # GH 4161
78
+ # TimeGrouper requires a sorted index
79
+ # also verifies that the resultant index has the correct name
80
+ df_original = DataFrame(
81
+ {
82
+ "Buyer": "Carl Carl Carl Carl Joe Carl".split(),
83
+ "Quantity": [18, 3, 5, 1, 9, 3],
84
+ "Date": [
85
+ datetime(2013, 9, 1, 13, 0),
86
+ datetime(2013, 9, 1, 13, 5),
87
+ datetime(2013, 10, 1, 20, 0),
88
+ datetime(2013, 10, 3, 10, 0),
89
+ datetime(2013, 12, 2, 12, 0),
90
+ datetime(2013, 9, 2, 14, 0),
91
+ ],
92
+ }
93
+ )
94
+
95
+ # GH 6908 change target column's order
96
+ df_reordered = df_original.sort_values(by="Quantity")
97
+
98
+ for df in [df_original, df_reordered]:
99
+ df = df.set_index(["Date"])
100
+
101
+ exp_dti = date_range(
102
+ "20130901",
103
+ "20131205",
104
+ freq="5D",
105
+ name="Date",
106
+ inclusive="left",
107
+ unit=df.index.unit,
108
+ )
109
+ expected = DataFrame(
110
+ {"Buyer": 0, "Quantity": 0},
111
+ index=exp_dti,
112
+ )
113
+ # Cast to object to avoid implicit cast when setting entry to "CarlCarlCarl"
114
+ expected = expected.astype({"Buyer": object})
115
+ expected.iloc[0, 0] = "CarlCarlCarl"
116
+ expected.iloc[6, 0] = "CarlCarl"
117
+ expected.iloc[18, 0] = "Joe"
118
+ expected.iloc[[0, 6, 18], 1] = np.array([24, 6, 9], dtype="int64")
119
+
120
+ result1 = df.resample("5D").sum()
121
+ tm.assert_frame_equal(result1, expected)
122
+
123
+ df_sorted = df.sort_index()
124
+ result2 = df_sorted.groupby(Grouper(freq="5D")).sum()
125
+ tm.assert_frame_equal(result2, expected)
126
+
127
+ result3 = df.groupby(Grouper(freq="5D")).sum()
128
+ tm.assert_frame_equal(result3, expected)
129
+
130
+ @pytest.mark.parametrize("should_sort", [True, False])
131
+ def test_groupby_with_timegrouper_methods(self, should_sort):
132
+ # GH 3881
133
+ # make sure API of timegrouper conforms
134
+
135
+ df = DataFrame(
136
+ {
137
+ "Branch": "A A A A A B".split(),
138
+ "Buyer": "Carl Mark Carl Joe Joe Carl".split(),
139
+ "Quantity": [1, 3, 5, 8, 9, 3],
140
+ "Date": [
141
+ datetime(2013, 1, 1, 13, 0),
142
+ datetime(2013, 1, 1, 13, 5),
143
+ datetime(2013, 10, 1, 20, 0),
144
+ datetime(2013, 10, 2, 10, 0),
145
+ datetime(2013, 12, 2, 12, 0),
146
+ datetime(2013, 12, 2, 14, 0),
147
+ ],
148
+ }
149
+ )
150
+
151
+ if should_sort:
152
+ df = df.sort_values(by="Quantity", ascending=False)
153
+
154
+ df = df.set_index("Date", drop=False)
155
+ g = df.groupby(Grouper(freq="6ME"))
156
+ assert g.group_keys
157
+
158
+ assert isinstance(g._grouper, BinGrouper)
159
+ groups = g.groups
160
+ assert isinstance(groups, dict)
161
+ assert len(groups) == 3
162
+
163
+ def test_timegrouper_with_reg_groups(self):
164
+ # GH 3794
165
+ # allow combination of timegrouper/reg groups
166
+
167
+ df_original = DataFrame(
168
+ {
169
+ "Branch": "A A A A A A A B".split(),
170
+ "Buyer": "Carl Mark Carl Carl Joe Joe Joe Carl".split(),
171
+ "Quantity": [1, 3, 5, 1, 8, 1, 9, 3],
172
+ "Date": [
173
+ datetime(2013, 1, 1, 13, 0),
174
+ datetime(2013, 1, 1, 13, 5),
175
+ datetime(2013, 10, 1, 20, 0),
176
+ datetime(2013, 10, 2, 10, 0),
177
+ datetime(2013, 10, 1, 20, 0),
178
+ datetime(2013, 10, 2, 10, 0),
179
+ datetime(2013, 12, 2, 12, 0),
180
+ datetime(2013, 12, 2, 14, 0),
181
+ ],
182
+ }
183
+ ).set_index("Date")
184
+
185
+ df_sorted = df_original.sort_values(by="Quantity", ascending=False)
186
+
187
+ for df in [df_original, df_sorted]:
188
+ expected = DataFrame(
189
+ {
190
+ "Buyer": "Carl Joe Mark".split(),
191
+ "Quantity": [10, 18, 3],
192
+ "Date": [
193
+ datetime(2013, 12, 31, 0, 0),
194
+ datetime(2013, 12, 31, 0, 0),
195
+ datetime(2013, 12, 31, 0, 0),
196
+ ],
197
+ }
198
+ ).set_index(["Date", "Buyer"])
199
+
200
+ msg = "The default value of numeric_only"
201
+ result = df.groupby([Grouper(freq="YE"), "Buyer"]).sum(numeric_only=True)
202
+ tm.assert_frame_equal(result, expected)
203
+
204
+ expected = DataFrame(
205
+ {
206
+ "Buyer": "Carl Mark Carl Joe".split(),
207
+ "Quantity": [1, 3, 9, 18],
208
+ "Date": [
209
+ datetime(2013, 1, 1, 0, 0),
210
+ datetime(2013, 1, 1, 0, 0),
211
+ datetime(2013, 7, 1, 0, 0),
212
+ datetime(2013, 7, 1, 0, 0),
213
+ ],
214
+ }
215
+ ).set_index(["Date", "Buyer"])
216
+ result = df.groupby([Grouper(freq="6MS"), "Buyer"]).sum(numeric_only=True)
217
+ tm.assert_frame_equal(result, expected)
218
+
219
+ df_original = DataFrame(
220
+ {
221
+ "Branch": "A A A A A A A B".split(),
222
+ "Buyer": "Carl Mark Carl Carl Joe Joe Joe Carl".split(),
223
+ "Quantity": [1, 3, 5, 1, 8, 1, 9, 3],
224
+ "Date": [
225
+ datetime(2013, 10, 1, 13, 0),
226
+ datetime(2013, 10, 1, 13, 5),
227
+ datetime(2013, 10, 1, 20, 0),
228
+ datetime(2013, 10, 2, 10, 0),
229
+ datetime(2013, 10, 1, 20, 0),
230
+ datetime(2013, 10, 2, 10, 0),
231
+ datetime(2013, 10, 2, 12, 0),
232
+ datetime(2013, 10, 2, 14, 0),
233
+ ],
234
+ }
235
+ ).set_index("Date")
236
+
237
+ df_sorted = df_original.sort_values(by="Quantity", ascending=False)
238
+ for df in [df_original, df_sorted]:
239
+ expected = DataFrame(
240
+ {
241
+ "Buyer": "Carl Joe Mark Carl Joe".split(),
242
+ "Quantity": [6, 8, 3, 4, 10],
243
+ "Date": [
244
+ datetime(2013, 10, 1, 0, 0),
245
+ datetime(2013, 10, 1, 0, 0),
246
+ datetime(2013, 10, 1, 0, 0),
247
+ datetime(2013, 10, 2, 0, 0),
248
+ datetime(2013, 10, 2, 0, 0),
249
+ ],
250
+ }
251
+ ).set_index(["Date", "Buyer"])
252
+
253
+ result = df.groupby([Grouper(freq="1D"), "Buyer"]).sum(numeric_only=True)
254
+ tm.assert_frame_equal(result, expected)
255
+
256
+ result = df.groupby([Grouper(freq="1ME"), "Buyer"]).sum(numeric_only=True)
257
+ expected = DataFrame(
258
+ {
259
+ "Buyer": "Carl Joe Mark".split(),
260
+ "Quantity": [10, 18, 3],
261
+ "Date": [
262
+ datetime(2013, 10, 31, 0, 0),
263
+ datetime(2013, 10, 31, 0, 0),
264
+ datetime(2013, 10, 31, 0, 0),
265
+ ],
266
+ }
267
+ ).set_index(["Date", "Buyer"])
268
+ tm.assert_frame_equal(result, expected)
269
+
270
+ # passing the name
271
+ df = df.reset_index()
272
+ result = df.groupby([Grouper(freq="1ME", key="Date"), "Buyer"]).sum(
273
+ numeric_only=True
274
+ )
275
+ tm.assert_frame_equal(result, expected)
276
+
277
+ with pytest.raises(KeyError, match="'The grouper name foo is not found'"):
278
+ df.groupby([Grouper(freq="1ME", key="foo"), "Buyer"]).sum()
279
+
280
+ # passing the level
281
+ df = df.set_index("Date")
282
+ result = df.groupby([Grouper(freq="1ME", level="Date"), "Buyer"]).sum(
283
+ numeric_only=True
284
+ )
285
+ tm.assert_frame_equal(result, expected)
286
+ result = df.groupby([Grouper(freq="1ME", level=0), "Buyer"]).sum(
287
+ numeric_only=True
288
+ )
289
+ tm.assert_frame_equal(result, expected)
290
+
291
+ with pytest.raises(ValueError, match="The level foo is not valid"):
292
+ df.groupby([Grouper(freq="1ME", level="foo"), "Buyer"]).sum()
293
+
294
+ # multi names
295
+ df = df.copy()
296
+ df["Date"] = df.index + offsets.MonthEnd(2)
297
+ result = df.groupby([Grouper(freq="1ME", key="Date"), "Buyer"]).sum(
298
+ numeric_only=True
299
+ )
300
+ expected = DataFrame(
301
+ {
302
+ "Buyer": "Carl Joe Mark".split(),
303
+ "Quantity": [10, 18, 3],
304
+ "Date": [
305
+ datetime(2013, 11, 30, 0, 0),
306
+ datetime(2013, 11, 30, 0, 0),
307
+ datetime(2013, 11, 30, 0, 0),
308
+ ],
309
+ }
310
+ ).set_index(["Date", "Buyer"])
311
+ tm.assert_frame_equal(result, expected)
312
+
313
+ # error as we have both a level and a name!
314
+ msg = "The Grouper cannot specify both a key and a level!"
315
+ with pytest.raises(ValueError, match=msg):
316
+ df.groupby(
317
+ [Grouper(freq="1ME", key="Date", level="Date"), "Buyer"]
318
+ ).sum()
319
+
320
+ # single groupers
321
+ expected = DataFrame(
322
+ [[31]],
323
+ columns=["Quantity"],
324
+ index=DatetimeIndex(
325
+ [datetime(2013, 10, 31, 0, 0)], freq=offsets.MonthEnd(), name="Date"
326
+ ),
327
+ )
328
+ result = df.groupby(Grouper(freq="1ME")).sum(numeric_only=True)
329
+ tm.assert_frame_equal(result, expected)
330
+
331
+ result = df.groupby([Grouper(freq="1ME")]).sum(numeric_only=True)
332
+ tm.assert_frame_equal(result, expected)
333
+
334
+ expected.index = expected.index.shift(1)
335
+ assert expected.index.freq == offsets.MonthEnd()
336
+ result = df.groupby(Grouper(freq="1ME", key="Date")).sum(numeric_only=True)
337
+ tm.assert_frame_equal(result, expected)
338
+
339
+ result = df.groupby([Grouper(freq="1ME", key="Date")]).sum(
340
+ numeric_only=True
341
+ )
342
+ tm.assert_frame_equal(result, expected)
343
+
344
+ @pytest.mark.parametrize("freq", ["D", "ME", "YE", "QE-APR"])
345
+ def test_timegrouper_with_reg_groups_freq(self, freq):
346
+ # GH 6764 multiple grouping with/without sort
347
+ df = DataFrame(
348
+ {
349
+ "date": pd.to_datetime(
350
+ [
351
+ "20121002",
352
+ "20121007",
353
+ "20130130",
354
+ "20130202",
355
+ "20130305",
356
+ "20121002",
357
+ "20121207",
358
+ "20130130",
359
+ "20130202",
360
+ "20130305",
361
+ "20130202",
362
+ "20130305",
363
+ ]
364
+ ),
365
+ "user_id": [1, 1, 1, 1, 1, 3, 3, 3, 5, 5, 5, 5],
366
+ "whole_cost": [
367
+ 1790,
368
+ 364,
369
+ 280,
370
+ 259,
371
+ 201,
372
+ 623,
373
+ 90,
374
+ 312,
375
+ 359,
376
+ 301,
377
+ 359,
378
+ 801,
379
+ ],
380
+ "cost1": [12, 15, 10, 24, 39, 1, 0, 90, 45, 34, 1, 12],
381
+ }
382
+ ).set_index("date")
383
+
384
+ expected = (
385
+ df.groupby("user_id")["whole_cost"]
386
+ .resample(freq)
387
+ .sum(min_count=1) # XXX
388
+ .dropna()
389
+ .reorder_levels(["date", "user_id"])
390
+ .sort_index()
391
+ .astype("int64")
392
+ )
393
+ expected.name = "whole_cost"
394
+
395
+ result1 = (
396
+ df.sort_index().groupby([Grouper(freq=freq), "user_id"])["whole_cost"].sum()
397
+ )
398
+ tm.assert_series_equal(result1, expected)
399
+
400
+ result2 = df.groupby([Grouper(freq=freq), "user_id"])["whole_cost"].sum()
401
+ tm.assert_series_equal(result2, expected)
402
+
403
+ def test_timegrouper_get_group(self):
404
+ # GH 6914
405
+
406
+ df_original = DataFrame(
407
+ {
408
+ "Buyer": "Carl Joe Joe Carl Joe Carl".split(),
409
+ "Quantity": [18, 3, 5, 1, 9, 3],
410
+ "Date": [
411
+ datetime(2013, 9, 1, 13, 0),
412
+ datetime(2013, 9, 1, 13, 5),
413
+ datetime(2013, 10, 1, 20, 0),
414
+ datetime(2013, 10, 3, 10, 0),
415
+ datetime(2013, 12, 2, 12, 0),
416
+ datetime(2013, 9, 2, 14, 0),
417
+ ],
418
+ }
419
+ )
420
+ df_reordered = df_original.sort_values(by="Quantity")
421
+
422
+ # single grouping
423
+ expected_list = [
424
+ df_original.iloc[[0, 1, 5]],
425
+ df_original.iloc[[2, 3]],
426
+ df_original.iloc[[4]],
427
+ ]
428
+ dt_list = ["2013-09-30", "2013-10-31", "2013-12-31"]
429
+
430
+ for df in [df_original, df_reordered]:
431
+ grouped = df.groupby(Grouper(freq="ME", key="Date"))
432
+ for t, expected in zip(dt_list, expected_list):
433
+ dt = Timestamp(t)
434
+ result = grouped.get_group(dt)
435
+ tm.assert_frame_equal(result, expected)
436
+
437
+ # multiple grouping
438
+ expected_list = [
439
+ df_original.iloc[[1]],
440
+ df_original.iloc[[3]],
441
+ df_original.iloc[[4]],
442
+ ]
443
+ g_list = [("Joe", "2013-09-30"), ("Carl", "2013-10-31"), ("Joe", "2013-12-31")]
444
+
445
+ for df in [df_original, df_reordered]:
446
+ grouped = df.groupby(["Buyer", Grouper(freq="ME", key="Date")])
447
+ for (b, t), expected in zip(g_list, expected_list):
448
+ dt = Timestamp(t)
449
+ result = grouped.get_group((b, dt))
450
+ tm.assert_frame_equal(result, expected)
451
+
452
+ # with index
453
+ df_original = df_original.set_index("Date")
454
+ df_reordered = df_original.sort_values(by="Quantity")
455
+
456
+ expected_list = [
457
+ df_original.iloc[[0, 1, 5]],
458
+ df_original.iloc[[2, 3]],
459
+ df_original.iloc[[4]],
460
+ ]
461
+
462
+ for df in [df_original, df_reordered]:
463
+ grouped = df.groupby(Grouper(freq="ME"))
464
+ for t, expected in zip(dt_list, expected_list):
465
+ dt = Timestamp(t)
466
+ result = grouped.get_group(dt)
467
+ tm.assert_frame_equal(result, expected)
468
+
469
+ def test_timegrouper_apply_return_type_series(self):
470
+ # Using `apply` with the `TimeGrouper` should give the
471
+ # same return type as an `apply` with a `Grouper`.
472
+ # Issue #11742
473
+ df = DataFrame({"date": ["10/10/2000", "11/10/2000"], "value": [10, 13]})
474
+ df_dt = df.copy()
475
+ df_dt["date"] = pd.to_datetime(df_dt["date"])
476
+
477
+ def sumfunc_series(x):
478
+ return Series([x["value"].sum()], ("sum",))
479
+
480
+ msg = "DataFrameGroupBy.apply operated on the grouping columns"
481
+ with tm.assert_produces_warning(DeprecationWarning, match=msg):
482
+ expected = df.groupby(Grouper(key="date")).apply(sumfunc_series)
483
+ msg = "DataFrameGroupBy.apply operated on the grouping columns"
484
+ with tm.assert_produces_warning(DeprecationWarning, match=msg):
485
+ result = df_dt.groupby(Grouper(freq="ME", key="date")).apply(sumfunc_series)
486
+ tm.assert_frame_equal(
487
+ result.reset_index(drop=True), expected.reset_index(drop=True)
488
+ )
489
+
490
+ def test_timegrouper_apply_return_type_value(self):
491
+ # Using `apply` with the `TimeGrouper` should give the
492
+ # same return type as an `apply` with a `Grouper`.
493
+ # Issue #11742
494
+ df = DataFrame({"date": ["10/10/2000", "11/10/2000"], "value": [10, 13]})
495
+ df_dt = df.copy()
496
+ df_dt["date"] = pd.to_datetime(df_dt["date"])
497
+
498
+ def sumfunc_value(x):
499
+ return x.value.sum()
500
+
501
+ msg = "DataFrameGroupBy.apply operated on the grouping columns"
502
+ with tm.assert_produces_warning(DeprecationWarning, match=msg):
503
+ expected = df.groupby(Grouper(key="date")).apply(sumfunc_value)
504
+ with tm.assert_produces_warning(DeprecationWarning, match=msg):
505
+ result = df_dt.groupby(Grouper(freq="ME", key="date")).apply(sumfunc_value)
506
+ tm.assert_series_equal(
507
+ result.reset_index(drop=True), expected.reset_index(drop=True)
508
+ )
509
+
510
+ def test_groupby_groups_datetimeindex(self):
511
+ # GH#1430
512
+ periods = 1000
513
+ ind = date_range(start="2012/1/1", freq="5min", periods=periods)
514
+ df = DataFrame(
515
+ {"high": np.arange(periods), "low": np.arange(periods)}, index=ind
516
+ )
517
+ grouped = df.groupby(lambda x: datetime(x.year, x.month, x.day))
518
+
519
+ # it works!
520
+ groups = grouped.groups
521
+ assert isinstance(next(iter(groups.keys())), datetime)
522
+
523
+ def test_groupby_groups_datetimeindex2(self):
524
+ # GH#11442
525
+ index = date_range("2015/01/01", periods=5, name="date")
526
+ df = DataFrame({"A": [5, 6, 7, 8, 9], "B": [1, 2, 3, 4, 5]}, index=index)
527
+ result = df.groupby(level="date").groups
528
+ dates = ["2015-01-05", "2015-01-04", "2015-01-03", "2015-01-02", "2015-01-01"]
529
+ expected = {
530
+ Timestamp(date): DatetimeIndex([date], name="date") for date in dates
531
+ }
532
+ tm.assert_dict_equal(result, expected)
533
+
534
+ grouped = df.groupby(level="date")
535
+ for date in dates:
536
+ result = grouped.get_group(date)
537
+ data = [[df.loc[date, "A"], df.loc[date, "B"]]]
538
+ expected_index = DatetimeIndex(
539
+ [date], name="date", freq="D", dtype=index.dtype
540
+ )
541
+ expected = DataFrame(data, columns=list("AB"), index=expected_index)
542
+ tm.assert_frame_equal(result, expected)
543
+
544
+ def test_groupby_groups_datetimeindex_tz(self):
545
+ # GH 3950
546
+ dates = [
547
+ "2011-07-19 07:00:00",
548
+ "2011-07-19 08:00:00",
549
+ "2011-07-19 09:00:00",
550
+ "2011-07-19 07:00:00",
551
+ "2011-07-19 08:00:00",
552
+ "2011-07-19 09:00:00",
553
+ ]
554
+ df = DataFrame(
555
+ {
556
+ "label": ["a", "a", "a", "b", "b", "b"],
557
+ "datetime": dates,
558
+ "value1": np.arange(6, dtype="int64"),
559
+ "value2": [1, 2] * 3,
560
+ }
561
+ )
562
+ df["datetime"] = df["datetime"].apply(lambda d: Timestamp(d, tz="US/Pacific"))
563
+
564
+ exp_idx1 = DatetimeIndex(
565
+ [
566
+ "2011-07-19 07:00:00",
567
+ "2011-07-19 07:00:00",
568
+ "2011-07-19 08:00:00",
569
+ "2011-07-19 08:00:00",
570
+ "2011-07-19 09:00:00",
571
+ "2011-07-19 09:00:00",
572
+ ],
573
+ tz="US/Pacific",
574
+ name="datetime",
575
+ )
576
+ exp_idx2 = Index(["a", "b"] * 3, name="label")
577
+ exp_idx = MultiIndex.from_arrays([exp_idx1, exp_idx2])
578
+ expected = DataFrame(
579
+ {"value1": [0, 3, 1, 4, 2, 5], "value2": [1, 2, 2, 1, 1, 2]},
580
+ index=exp_idx,
581
+ columns=["value1", "value2"],
582
+ )
583
+
584
+ result = df.groupby(["datetime", "label"]).sum()
585
+ tm.assert_frame_equal(result, expected)
586
+
587
+ # by level
588
+ didx = DatetimeIndex(dates, tz="Asia/Tokyo")
589
+ df = DataFrame(
590
+ {"value1": np.arange(6, dtype="int64"), "value2": [1, 2, 3, 1, 2, 3]},
591
+ index=didx,
592
+ )
593
+
594
+ exp_idx = DatetimeIndex(
595
+ ["2011-07-19 07:00:00", "2011-07-19 08:00:00", "2011-07-19 09:00:00"],
596
+ tz="Asia/Tokyo",
597
+ )
598
+ expected = DataFrame(
599
+ {"value1": [3, 5, 7], "value2": [2, 4, 6]},
600
+ index=exp_idx,
601
+ columns=["value1", "value2"],
602
+ )
603
+
604
+ result = df.groupby(level=0).sum()
605
+ tm.assert_frame_equal(result, expected)
606
+
607
+ def test_frame_datetime64_handling_groupby(self):
608
+ # it works!
609
+ df = DataFrame(
610
+ [(3, np.datetime64("2012-07-03")), (3, np.datetime64("2012-07-04"))],
611
+ columns=["a", "date"],
612
+ )
613
+ result = df.groupby("a").first()
614
+ assert result["date"][3] == Timestamp("2012-07-03")
615
+
616
+ def test_groupby_multi_timezone(self):
617
+ # combining multiple / different timezones yields UTC
618
+ df = DataFrame(
619
+ {
620
+ "value": range(5),
621
+ "date": [
622
+ "2000-01-28 16:47:00",
623
+ "2000-01-29 16:48:00",
624
+ "2000-01-30 16:49:00",
625
+ "2000-01-31 16:50:00",
626
+ "2000-01-01 16:50:00",
627
+ ],
628
+ "tz": [
629
+ "America/Chicago",
630
+ "America/Chicago",
631
+ "America/Los_Angeles",
632
+ "America/Chicago",
633
+ "America/New_York",
634
+ ],
635
+ }
636
+ )
637
+
638
+ result = df.groupby("tz", group_keys=False).date.apply(
639
+ lambda x: pd.to_datetime(x).dt.tz_localize(x.name)
640
+ )
641
+
642
+ expected = Series(
643
+ [
644
+ Timestamp("2000-01-28 16:47:00-0600", tz="America/Chicago"),
645
+ Timestamp("2000-01-29 16:48:00-0600", tz="America/Chicago"),
646
+ Timestamp("2000-01-30 16:49:00-0800", tz="America/Los_Angeles"),
647
+ Timestamp("2000-01-31 16:50:00-0600", tz="America/Chicago"),
648
+ Timestamp("2000-01-01 16:50:00-0500", tz="America/New_York"),
649
+ ],
650
+ name="date",
651
+ dtype=object,
652
+ )
653
+ tm.assert_series_equal(result, expected)
654
+
655
+ tz = "America/Chicago"
656
+ res_values = df.groupby("tz").date.get_group(tz)
657
+ result = pd.to_datetime(res_values).dt.tz_localize(tz)
658
+ exp_values = Series(
659
+ ["2000-01-28 16:47:00", "2000-01-29 16:48:00", "2000-01-31 16:50:00"],
660
+ index=[0, 1, 3],
661
+ name="date",
662
+ )
663
+ expected = pd.to_datetime(exp_values).dt.tz_localize(tz)
664
+ tm.assert_series_equal(result, expected)
665
+
666
+ def test_groupby_groups_periods(self):
667
+ dates = [
668
+ "2011-07-19 07:00:00",
669
+ "2011-07-19 08:00:00",
670
+ "2011-07-19 09:00:00",
671
+ "2011-07-19 07:00:00",
672
+ "2011-07-19 08:00:00",
673
+ "2011-07-19 09:00:00",
674
+ ]
675
+ df = DataFrame(
676
+ {
677
+ "label": ["a", "a", "a", "b", "b", "b"],
678
+ "period": [pd.Period(d, freq="h") for d in dates],
679
+ "value1": np.arange(6, dtype="int64"),
680
+ "value2": [1, 2] * 3,
681
+ }
682
+ )
683
+
684
+ exp_idx1 = pd.PeriodIndex(
685
+ [
686
+ "2011-07-19 07:00:00",
687
+ "2011-07-19 07:00:00",
688
+ "2011-07-19 08:00:00",
689
+ "2011-07-19 08:00:00",
690
+ "2011-07-19 09:00:00",
691
+ "2011-07-19 09:00:00",
692
+ ],
693
+ freq="h",
694
+ name="period",
695
+ )
696
+ exp_idx2 = Index(["a", "b"] * 3, name="label")
697
+ exp_idx = MultiIndex.from_arrays([exp_idx1, exp_idx2])
698
+ expected = DataFrame(
699
+ {"value1": [0, 3, 1, 4, 2, 5], "value2": [1, 2, 2, 1, 1, 2]},
700
+ index=exp_idx,
701
+ columns=["value1", "value2"],
702
+ )
703
+
704
+ result = df.groupby(["period", "label"]).sum()
705
+ tm.assert_frame_equal(result, expected)
706
+
707
+ # by level
708
+ didx = pd.PeriodIndex(dates, freq="h")
709
+ df = DataFrame(
710
+ {"value1": np.arange(6, dtype="int64"), "value2": [1, 2, 3, 1, 2, 3]},
711
+ index=didx,
712
+ )
713
+
714
+ exp_idx = pd.PeriodIndex(
715
+ ["2011-07-19 07:00:00", "2011-07-19 08:00:00", "2011-07-19 09:00:00"],
716
+ freq="h",
717
+ )
718
+ expected = DataFrame(
719
+ {"value1": [3, 5, 7], "value2": [2, 4, 6]},
720
+ index=exp_idx,
721
+ columns=["value1", "value2"],
722
+ )
723
+
724
+ result = df.groupby(level=0).sum()
725
+ tm.assert_frame_equal(result, expected)
726
+
727
+ def test_groupby_first_datetime64(self):
728
+ df = DataFrame([(1, 1351036800000000000), (2, 1351036800000000000)])
729
+ df[1] = df[1].astype("M8[ns]")
730
+
731
+ assert issubclass(df[1].dtype.type, np.datetime64)
732
+
733
+ result = df.groupby(level=0).first()
734
+ got_dt = result[1].dtype
735
+ assert issubclass(got_dt.type, np.datetime64)
736
+
737
+ result = df[1].groupby(level=0).first()
738
+ got_dt = result.dtype
739
+ assert issubclass(got_dt.type, np.datetime64)
740
+
741
+ def test_groupby_max_datetime64(self):
742
+ # GH 5869
743
+ # datetimelike dtype conversion from int
744
+ df = DataFrame({"A": Timestamp("20130101"), "B": np.arange(5)})
745
+ # TODO: can we retain second reso in .apply here?
746
+ expected = df.groupby("A")["A"].apply(lambda x: x.max()).astype("M8[s]")
747
+ result = df.groupby("A")["A"].max()
748
+ tm.assert_series_equal(result, expected)
749
+
750
+ def test_groupby_datetime64_32_bit(self):
751
+ # GH 6410 / numpy 4328
752
+ # 32-bit under 1.9-dev indexing issue
753
+
754
+ df = DataFrame({"A": range(2), "B": [Timestamp("2000-01-1")] * 2})
755
+ result = df.groupby("A")["B"].transform("min")
756
+ expected = Series([Timestamp("2000-01-1")] * 2, name="B")
757
+ tm.assert_series_equal(result, expected)
758
+
759
+ def test_groupby_with_timezone_selection(self):
760
+ # GH 11616
761
+ # Test that column selection returns output in correct timezone.
762
+
763
+ df = DataFrame(
764
+ {
765
+ "factor": np.random.default_rng(2).integers(0, 3, size=60),
766
+ "time": date_range("01/01/2000 00:00", periods=60, freq="s", tz="UTC"),
767
+ }
768
+ )
769
+ df1 = df.groupby("factor").max()["time"]
770
+ df2 = df.groupby("factor")["time"].max()
771
+ tm.assert_series_equal(df1, df2)
772
+
773
+ def test_timezone_info(self):
774
+ # see gh-11682: Timezone info lost when broadcasting
775
+ # scalar datetime to DataFrame
776
+
777
+ df = DataFrame({"a": [1], "b": [datetime.now(pytz.utc)]})
778
+ assert df["b"][0].tzinfo == pytz.utc
779
+ df = DataFrame({"a": [1, 2, 3]})
780
+ df["b"] = datetime.now(pytz.utc)
781
+ assert df["b"][0].tzinfo == pytz.utc
782
+
783
+ def test_datetime_count(self):
784
+ df = DataFrame(
785
+ {"a": [1, 2, 3] * 2, "dates": date_range("now", periods=6, freq="min")}
786
+ )
787
+ result = df.groupby("a").dates.count()
788
+ expected = Series([2, 2, 2], index=Index([1, 2, 3], name="a"), name="dates")
789
+ tm.assert_series_equal(result, expected)
790
+
791
+ def test_first_last_max_min_on_time_data(self):
792
+ # GH 10295
793
+ # Verify that NaT is not in the result of max, min, first and last on
794
+ # Dataframe with datetime or timedelta values.
795
+ df_test = DataFrame(
796
+ {
797
+ "dt": [
798
+ np.nan,
799
+ "2015-07-24 10:10",
800
+ "2015-07-25 11:11",
801
+ "2015-07-23 12:12",
802
+ np.nan,
803
+ ],
804
+ "td": [
805
+ np.nan,
806
+ timedelta(days=1),
807
+ timedelta(days=2),
808
+ timedelta(days=3),
809
+ np.nan,
810
+ ],
811
+ }
812
+ )
813
+ df_test.dt = pd.to_datetime(df_test.dt)
814
+ df_test["group"] = "A"
815
+ df_ref = df_test[df_test.dt.notna()]
816
+
817
+ grouped_test = df_test.groupby("group")
818
+ grouped_ref = df_ref.groupby("group")
819
+
820
+ tm.assert_frame_equal(grouped_ref.max(), grouped_test.max())
821
+ tm.assert_frame_equal(grouped_ref.min(), grouped_test.min())
822
+ tm.assert_frame_equal(grouped_ref.first(), grouped_test.first())
823
+ tm.assert_frame_equal(grouped_ref.last(), grouped_test.last())
824
+
825
+ def test_nunique_with_timegrouper_and_nat(self):
826
+ # GH 17575
827
+ test = DataFrame(
828
+ {
829
+ "time": [
830
+ Timestamp("2016-06-28 09:35:35"),
831
+ pd.NaT,
832
+ Timestamp("2016-06-28 16:46:28"),
833
+ ],
834
+ "data": ["1", "2", "3"],
835
+ }
836
+ )
837
+
838
+ grouper = Grouper(key="time", freq="h")
839
+ result = test.groupby(grouper)["data"].nunique()
840
+ expected = test[test.time.notnull()].groupby(grouper)["data"].nunique()
841
+ expected.index = expected.index._with_freq(None)
842
+ tm.assert_series_equal(result, expected)
843
+
844
+ def test_scalar_call_versus_list_call(self):
845
+ # Issue: 17530
846
+ data_frame = {
847
+ "location": ["shanghai", "beijing", "shanghai"],
848
+ "time": Series(
849
+ ["2017-08-09 13:32:23", "2017-08-11 23:23:15", "2017-08-11 22:23:15"],
850
+ dtype="datetime64[ns]",
851
+ ),
852
+ "value": [1, 2, 3],
853
+ }
854
+ data_frame = DataFrame(data_frame).set_index("time")
855
+ grouper = Grouper(freq="D")
856
+
857
+ grouped = data_frame.groupby(grouper)
858
+ result = grouped.count()
859
+ grouped = data_frame.groupby([grouper])
860
+ expected = grouped.count()
861
+
862
+ tm.assert_frame_equal(result, expected)
863
+
864
+ def test_grouper_period_index(self):
865
+ # GH 32108
866
+ periods = 2
867
+ index = pd.period_range(
868
+ start="2018-01", periods=periods, freq="M", name="Month"
869
+ )
870
+ period_series = Series(range(periods), index=index)
871
+ result = period_series.groupby(period_series.index.month).sum()
872
+
873
+ expected = Series(
874
+ range(periods), index=Index(range(1, periods + 1), name=index.name)
875
+ )
876
+ tm.assert_series_equal(result, expected)
877
+
878
+ def test_groupby_apply_timegrouper_with_nat_dict_returns(
879
+ self, groupby_with_truncated_bingrouper
880
+ ):
881
+ # GH#43500 case where gb._grouper.result_index and gb._grouper.group_keys_seq
882
+ # have different lengths that goes through the `isinstance(values[0], dict)`
883
+ # path
884
+ gb = groupby_with_truncated_bingrouper
885
+
886
+ res = gb["Quantity"].apply(lambda x: {"foo": len(x)})
887
+
888
+ df = gb.obj
889
+ unit = df["Date"]._values.unit
890
+ dti = date_range("2013-09-01", "2013-10-01", freq="5D", name="Date", unit=unit)
891
+ mi = MultiIndex.from_arrays([dti, ["foo"] * len(dti)])
892
+ expected = Series([3, 0, 0, 0, 0, 0, 2], index=mi, name="Quantity")
893
+ tm.assert_series_equal(res, expected)
894
+
895
+ def test_groupby_apply_timegrouper_with_nat_scalar_returns(
896
+ self, groupby_with_truncated_bingrouper
897
+ ):
898
+ # GH#43500 Previously raised ValueError bc used index with incorrect
899
+ # length in wrap_applied_result
900
+ gb = groupby_with_truncated_bingrouper
901
+
902
+ res = gb["Quantity"].apply(lambda x: x.iloc[0] if len(x) else np.nan)
903
+
904
+ df = gb.obj
905
+ unit = df["Date"]._values.unit
906
+ dti = date_range("2013-09-01", "2013-10-01", freq="5D", name="Date", unit=unit)
907
+ expected = Series(
908
+ [18, np.nan, np.nan, np.nan, np.nan, np.nan, 5],
909
+ index=dti._with_freq(None),
910
+ name="Quantity",
911
+ )
912
+
913
+ tm.assert_series_equal(res, expected)
914
+
915
+ def test_groupby_apply_timegrouper_with_nat_apply_squeeze(
916
+ self, frame_for_truncated_bingrouper
917
+ ):
918
+ df = frame_for_truncated_bingrouper
919
+
920
+ # We need to create a GroupBy object with only one non-NaT group,
921
+ # so use a huge freq so that all non-NaT dates will be grouped together
922
+ tdg = Grouper(key="Date", freq="100YE")
923
+ gb = df.groupby(tdg)
924
+
925
+ # check that we will go through the singular_series path
926
+ # in _wrap_applied_output_series
927
+ assert gb.ngroups == 1
928
+ assert gb._selected_obj._get_axis(gb.axis).nlevels == 1
929
+
930
+ # function that returns a Series
931
+ msg = "DataFrameGroupBy.apply operated on the grouping columns"
932
+ with tm.assert_produces_warning(DeprecationWarning, match=msg):
933
+ res = gb.apply(lambda x: x["Quantity"] * 2)
934
+
935
+ dti = Index([Timestamp("2013-12-31")], dtype=df["Date"].dtype, name="Date")
936
+ expected = DataFrame(
937
+ [[36, 6, 6, 10, 2]],
938
+ index=dti,
939
+ columns=Index([0, 1, 5, 2, 3], name="Quantity"),
940
+ )
941
+ tm.assert_frame_equal(res, expected)
942
+
943
+ @pytest.mark.single_cpu
944
+ def test_groupby_agg_numba_timegrouper_with_nat(
945
+ self, groupby_with_truncated_bingrouper
946
+ ):
947
+ pytest.importorskip("numba")
948
+
949
+ # See discussion in GH#43487
950
+ gb = groupby_with_truncated_bingrouper
951
+
952
+ result = gb["Quantity"].aggregate(
953
+ lambda values, index: np.nanmean(values), engine="numba"
954
+ )
955
+
956
+ expected = gb["Quantity"].aggregate("mean")
957
+ tm.assert_series_equal(result, expected)
958
+
959
+ result_df = gb[["Quantity"]].aggregate(
960
+ lambda values, index: np.nanmean(values), engine="numba"
961
+ )
962
+ expected_df = gb[["Quantity"]].aggregate("mean")
963
+ tm.assert_frame_equal(result_df, expected_df)
venv/lib/python3.10/site-packages/pandas/tests/io/parser/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (190 Bytes). View file
 
venv/lib/python3.10/site-packages/pandas/tests/io/parser/__pycache__/conftest.cpython-310.pyc ADDED
Binary file (7.82 kB). View file
 
venv/lib/python3.10/site-packages/pandas/tests/io/parser/__pycache__/test_c_parser_only.cpython-310.pyc ADDED
Binary file (16.7 kB). View file
 
venv/lib/python3.10/site-packages/pandas/tests/io/parser/__pycache__/test_comment.cpython-310.pyc ADDED
Binary file (5.65 kB). View file
 
venv/lib/python3.10/site-packages/pandas/tests/io/parser/__pycache__/test_compression.cpython-310.pyc ADDED
Binary file (6.27 kB). View file
 
venv/lib/python3.10/site-packages/pandas/tests/io/parser/__pycache__/test_concatenate_chunks.cpython-310.pyc ADDED
Binary file (1.37 kB). View file
 
venv/lib/python3.10/site-packages/pandas/tests/io/parser/__pycache__/test_converters.cpython-310.pyc ADDED
Binary file (6.58 kB). View file
 
venv/lib/python3.10/site-packages/pandas/tests/io/parser/__pycache__/test_encoding.cpython-310.pyc ADDED
Binary file (9.22 kB). View file
 
venv/lib/python3.10/site-packages/pandas/tests/io/parser/__pycache__/test_header.cpython-310.pyc ADDED
Binary file (16.3 kB). View file
 
venv/lib/python3.10/site-packages/pandas/tests/io/parser/__pycache__/test_mangle_dupes.cpython-310.pyc ADDED
Binary file (4.58 kB). View file
 
venv/lib/python3.10/site-packages/pandas/tests/io/parser/__pycache__/test_multi_thread.cpython-310.pyc ADDED
Binary file (4.24 kB). View file
 
venv/lib/python3.10/site-packages/pandas/tests/io/parser/__pycache__/test_na_values.cpython-310.pyc ADDED
Binary file (16.2 kB). View file
 
venv/lib/python3.10/site-packages/pandas/tests/io/parser/__pycache__/test_network.cpython-310.pyc ADDED
Binary file (9.44 kB). View file
 
venv/lib/python3.10/site-packages/pandas/tests/io/parser/__pycache__/test_parse_dates.cpython-310.pyc ADDED
Binary file (44.8 kB). View file
 
venv/lib/python3.10/site-packages/pandas/tests/io/parser/__pycache__/test_python_parser_only.cpython-310.pyc ADDED
Binary file (15.3 kB). View file
 
venv/lib/python3.10/site-packages/pandas/tests/io/parser/__pycache__/test_quoting.cpython-310.pyc ADDED
Binary file (4.88 kB). View file
 
venv/lib/python3.10/site-packages/pandas/tests/io/parser/__pycache__/test_read_fwf.cpython-310.pyc ADDED
Binary file (24.4 kB). View file