Add files using upload-large-folder tool
Browse filesThis view is limited to 50 files because it contains too many changes.
See raw diff
- llmeval-env/lib/python3.10/site-packages/pandas/tests/api/__init__.py +0 -0
- llmeval-env/lib/python3.10/site-packages/pandas/tests/api/test_api.py +383 -0
- llmeval-env/lib/python3.10/site-packages/pandas/tests/api/test_types.py +62 -0
- llmeval-env/lib/python3.10/site-packages/pandas/tests/apply/__init__.py +0 -0
- llmeval-env/lib/python3.10/site-packages/pandas/tests/apply/__pycache__/__init__.cpython-310.pyc +0 -0
- llmeval-env/lib/python3.10/site-packages/pandas/tests/apply/__pycache__/common.cpython-310.pyc +0 -0
- llmeval-env/lib/python3.10/site-packages/pandas/tests/apply/__pycache__/test_frame_apply.cpython-310.pyc +0 -0
- llmeval-env/lib/python3.10/site-packages/pandas/tests/apply/__pycache__/test_frame_apply_relabeling.cpython-310.pyc +0 -0
- llmeval-env/lib/python3.10/site-packages/pandas/tests/apply/__pycache__/test_frame_transform.cpython-310.pyc +0 -0
- llmeval-env/lib/python3.10/site-packages/pandas/tests/apply/__pycache__/test_invalid_arg.cpython-310.pyc +0 -0
- llmeval-env/lib/python3.10/site-packages/pandas/tests/apply/__pycache__/test_numba.cpython-310.pyc +0 -0
- llmeval-env/lib/python3.10/site-packages/pandas/tests/apply/__pycache__/test_series_apply_relabeling.cpython-310.pyc +0 -0
- llmeval-env/lib/python3.10/site-packages/pandas/tests/apply/__pycache__/test_series_transform.cpython-310.pyc +0 -0
- llmeval-env/lib/python3.10/site-packages/pandas/tests/apply/__pycache__/test_str.cpython-310.pyc +0 -0
- llmeval-env/lib/python3.10/site-packages/pandas/tests/apply/common.py +7 -0
- llmeval-env/lib/python3.10/site-packages/pandas/tests/apply/test_frame_apply.py +1733 -0
- llmeval-env/lib/python3.10/site-packages/pandas/tests/apply/test_frame_apply_relabeling.py +113 -0
- llmeval-env/lib/python3.10/site-packages/pandas/tests/apply/test_frame_transform.py +264 -0
- llmeval-env/lib/python3.10/site-packages/pandas/tests/apply/test_invalid_arg.py +361 -0
- llmeval-env/lib/python3.10/site-packages/pandas/tests/apply/test_numba.py +118 -0
- llmeval-env/lib/python3.10/site-packages/pandas/tests/apply/test_series_apply.py +701 -0
- llmeval-env/lib/python3.10/site-packages/pandas/tests/apply/test_series_apply_relabeling.py +39 -0
- llmeval-env/lib/python3.10/site-packages/pandas/tests/apply/test_series_transform.py +84 -0
- llmeval-env/lib/python3.10/site-packages/pandas/tests/apply/test_str.py +326 -0
- llmeval-env/lib/python3.10/site-packages/pandas/tests/generic/__init__.py +0 -0
- llmeval-env/lib/python3.10/site-packages/pandas/tests/generic/__pycache__/__init__.cpython-310.pyc +0 -0
- llmeval-env/lib/python3.10/site-packages/pandas/tests/generic/__pycache__/test_duplicate_labels.cpython-310.pyc +0 -0
- llmeval-env/lib/python3.10/site-packages/pandas/tests/generic/__pycache__/test_finalize.cpython-310.pyc +0 -0
- llmeval-env/lib/python3.10/site-packages/pandas/tests/generic/__pycache__/test_frame.cpython-310.pyc +0 -0
- llmeval-env/lib/python3.10/site-packages/pandas/tests/generic/__pycache__/test_generic.cpython-310.pyc +0 -0
- llmeval-env/lib/python3.10/site-packages/pandas/tests/generic/__pycache__/test_label_or_level_utils.cpython-310.pyc +0 -0
- llmeval-env/lib/python3.10/site-packages/pandas/tests/generic/__pycache__/test_series.cpython-310.pyc +0 -0
- llmeval-env/lib/python3.10/site-packages/pandas/tests/generic/__pycache__/test_to_xarray.cpython-310.pyc +0 -0
- llmeval-env/lib/python3.10/site-packages/pandas/tests/generic/test_frame.py +209 -0
- llmeval-env/lib/python3.10/site-packages/pandas/tests/generic/test_generic.py +504 -0
- llmeval-env/lib/python3.10/site-packages/pandas/tests/generic/test_label_or_level_utils.py +336 -0
- llmeval-env/lib/python3.10/site-packages/pandas/tests/generic/test_to_xarray.py +130 -0
- llmeval-env/lib/python3.10/site-packages/pandas/tests/indexes/datetimelike_/__pycache__/test_equals.cpython-310.pyc +0 -0
- llmeval-env/lib/python3.10/site-packages/pandas/tests/indexes/datetimelike_/__pycache__/test_indexing.cpython-310.pyc +0 -0
- llmeval-env/lib/python3.10/site-packages/pandas/tests/indexes/datetimelike_/__pycache__/test_is_monotonic.cpython-310.pyc +0 -0
- llmeval-env/lib/python3.10/site-packages/pandas/tests/indexes/datetimelike_/__pycache__/test_sort_values.cpython-310.pyc +0 -0
- llmeval-env/lib/python3.10/site-packages/pandas/tests/indexes/datetimelike_/__pycache__/test_value_counts.cpython-310.pyc +0 -0
- llmeval-env/lib/python3.10/site-packages/pandas/tests/indexes/datetimes/__init__.py +0 -0
- llmeval-env/lib/python3.10/site-packages/pandas/tests/indexes/datetimes/test_arithmetic.py +56 -0
- llmeval-env/lib/python3.10/site-packages/pandas/tests/indexes/datetimes/test_iter.py +76 -0
- llmeval-env/lib/python3.10/site-packages/pandas/tests/indexes/datetimes/test_join.py +149 -0
- llmeval-env/lib/python3.10/site-packages/pandas/tests/indexes/datetimes/test_ops.py +56 -0
- llmeval-env/lib/python3.10/site-packages/pandas/tests/indexes/datetimes/test_pickle.py +45 -0
- llmeval-env/lib/python3.10/site-packages/pandas/tests/indexes/datetimes/test_scalar_compat.py +329 -0
- llmeval-env/lib/python3.10/site-packages/pandas/tests/indexes/numeric/__init__.py +0 -0
llmeval-env/lib/python3.10/site-packages/pandas/tests/api/__init__.py
ADDED
File without changes
|
llmeval-env/lib/python3.10/site-packages/pandas/tests/api/test_api.py
ADDED
@@ -0,0 +1,383 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from __future__ import annotations
|
2 |
+
|
3 |
+
import pytest
|
4 |
+
|
5 |
+
import pandas as pd
|
6 |
+
from pandas import api
|
7 |
+
import pandas._testing as tm
|
8 |
+
from pandas.api import (
|
9 |
+
extensions as api_extensions,
|
10 |
+
indexers as api_indexers,
|
11 |
+
interchange as api_interchange,
|
12 |
+
types as api_types,
|
13 |
+
typing as api_typing,
|
14 |
+
)
|
15 |
+
|
16 |
+
|
17 |
+
class Base:
|
18 |
+
def check(self, namespace, expected, ignored=None):
|
19 |
+
# see which names are in the namespace, minus optional
|
20 |
+
# ignored ones
|
21 |
+
# compare vs the expected
|
22 |
+
|
23 |
+
result = sorted(
|
24 |
+
f for f in dir(namespace) if not f.startswith("__") and f != "annotations"
|
25 |
+
)
|
26 |
+
if ignored is not None:
|
27 |
+
result = sorted(set(result) - set(ignored))
|
28 |
+
|
29 |
+
expected = sorted(expected)
|
30 |
+
tm.assert_almost_equal(result, expected)
|
31 |
+
|
32 |
+
|
33 |
+
class TestPDApi(Base):
|
34 |
+
# these are optionally imported based on testing
|
35 |
+
# & need to be ignored
|
36 |
+
ignored = ["tests", "locale", "conftest", "_version_meson"]
|
37 |
+
|
38 |
+
# top-level sub-packages
|
39 |
+
public_lib = [
|
40 |
+
"api",
|
41 |
+
"arrays",
|
42 |
+
"options",
|
43 |
+
"test",
|
44 |
+
"testing",
|
45 |
+
"errors",
|
46 |
+
"plotting",
|
47 |
+
"io",
|
48 |
+
"tseries",
|
49 |
+
]
|
50 |
+
private_lib = ["compat", "core", "pandas", "util", "_built_with_meson"]
|
51 |
+
|
52 |
+
# misc
|
53 |
+
misc = ["IndexSlice", "NaT", "NA"]
|
54 |
+
|
55 |
+
# top-level classes
|
56 |
+
classes = [
|
57 |
+
"ArrowDtype",
|
58 |
+
"Categorical",
|
59 |
+
"CategoricalIndex",
|
60 |
+
"DataFrame",
|
61 |
+
"DateOffset",
|
62 |
+
"DatetimeIndex",
|
63 |
+
"ExcelFile",
|
64 |
+
"ExcelWriter",
|
65 |
+
"Flags",
|
66 |
+
"Grouper",
|
67 |
+
"HDFStore",
|
68 |
+
"Index",
|
69 |
+
"MultiIndex",
|
70 |
+
"Period",
|
71 |
+
"PeriodIndex",
|
72 |
+
"RangeIndex",
|
73 |
+
"Series",
|
74 |
+
"SparseDtype",
|
75 |
+
"StringDtype",
|
76 |
+
"Timedelta",
|
77 |
+
"TimedeltaIndex",
|
78 |
+
"Timestamp",
|
79 |
+
"Interval",
|
80 |
+
"IntervalIndex",
|
81 |
+
"CategoricalDtype",
|
82 |
+
"PeriodDtype",
|
83 |
+
"IntervalDtype",
|
84 |
+
"DatetimeTZDtype",
|
85 |
+
"BooleanDtype",
|
86 |
+
"Int8Dtype",
|
87 |
+
"Int16Dtype",
|
88 |
+
"Int32Dtype",
|
89 |
+
"Int64Dtype",
|
90 |
+
"UInt8Dtype",
|
91 |
+
"UInt16Dtype",
|
92 |
+
"UInt32Dtype",
|
93 |
+
"UInt64Dtype",
|
94 |
+
"Float32Dtype",
|
95 |
+
"Float64Dtype",
|
96 |
+
"NamedAgg",
|
97 |
+
]
|
98 |
+
|
99 |
+
# these are already deprecated; awaiting removal
|
100 |
+
deprecated_classes: list[str] = []
|
101 |
+
|
102 |
+
# external modules exposed in pandas namespace
|
103 |
+
modules: list[str] = []
|
104 |
+
|
105 |
+
# top-level functions
|
106 |
+
funcs = [
|
107 |
+
"array",
|
108 |
+
"bdate_range",
|
109 |
+
"concat",
|
110 |
+
"crosstab",
|
111 |
+
"cut",
|
112 |
+
"date_range",
|
113 |
+
"interval_range",
|
114 |
+
"eval",
|
115 |
+
"factorize",
|
116 |
+
"get_dummies",
|
117 |
+
"from_dummies",
|
118 |
+
"infer_freq",
|
119 |
+
"isna",
|
120 |
+
"isnull",
|
121 |
+
"lreshape",
|
122 |
+
"melt",
|
123 |
+
"notna",
|
124 |
+
"notnull",
|
125 |
+
"offsets",
|
126 |
+
"merge",
|
127 |
+
"merge_ordered",
|
128 |
+
"merge_asof",
|
129 |
+
"period_range",
|
130 |
+
"pivot",
|
131 |
+
"pivot_table",
|
132 |
+
"qcut",
|
133 |
+
"show_versions",
|
134 |
+
"timedelta_range",
|
135 |
+
"unique",
|
136 |
+
"value_counts",
|
137 |
+
"wide_to_long",
|
138 |
+
]
|
139 |
+
|
140 |
+
# top-level option funcs
|
141 |
+
funcs_option = [
|
142 |
+
"reset_option",
|
143 |
+
"describe_option",
|
144 |
+
"get_option",
|
145 |
+
"option_context",
|
146 |
+
"set_option",
|
147 |
+
"set_eng_float_format",
|
148 |
+
]
|
149 |
+
|
150 |
+
# top-level read_* funcs
|
151 |
+
funcs_read = [
|
152 |
+
"read_clipboard",
|
153 |
+
"read_csv",
|
154 |
+
"read_excel",
|
155 |
+
"read_fwf",
|
156 |
+
"read_gbq",
|
157 |
+
"read_hdf",
|
158 |
+
"read_html",
|
159 |
+
"read_xml",
|
160 |
+
"read_json",
|
161 |
+
"read_pickle",
|
162 |
+
"read_sas",
|
163 |
+
"read_sql",
|
164 |
+
"read_sql_query",
|
165 |
+
"read_sql_table",
|
166 |
+
"read_stata",
|
167 |
+
"read_table",
|
168 |
+
"read_feather",
|
169 |
+
"read_parquet",
|
170 |
+
"read_orc",
|
171 |
+
"read_spss",
|
172 |
+
]
|
173 |
+
|
174 |
+
# top-level json funcs
|
175 |
+
funcs_json = ["json_normalize"]
|
176 |
+
|
177 |
+
# top-level to_* funcs
|
178 |
+
funcs_to = ["to_datetime", "to_numeric", "to_pickle", "to_timedelta"]
|
179 |
+
|
180 |
+
# top-level to deprecate in the future
|
181 |
+
deprecated_funcs_in_future: list[str] = []
|
182 |
+
|
183 |
+
# these are already deprecated; awaiting removal
|
184 |
+
deprecated_funcs: list[str] = []
|
185 |
+
|
186 |
+
# private modules in pandas namespace
|
187 |
+
private_modules = [
|
188 |
+
"_config",
|
189 |
+
"_libs",
|
190 |
+
"_is_numpy_dev",
|
191 |
+
"_pandas_datetime_CAPI",
|
192 |
+
"_pandas_parser_CAPI",
|
193 |
+
"_testing",
|
194 |
+
"_typing",
|
195 |
+
]
|
196 |
+
if not pd._built_with_meson:
|
197 |
+
private_modules.append("_version")
|
198 |
+
|
199 |
+
def test_api(self):
|
200 |
+
checkthese = (
|
201 |
+
self.public_lib
|
202 |
+
+ self.private_lib
|
203 |
+
+ self.misc
|
204 |
+
+ self.modules
|
205 |
+
+ self.classes
|
206 |
+
+ self.funcs
|
207 |
+
+ self.funcs_option
|
208 |
+
+ self.funcs_read
|
209 |
+
+ self.funcs_json
|
210 |
+
+ self.funcs_to
|
211 |
+
+ self.private_modules
|
212 |
+
)
|
213 |
+
self.check(namespace=pd, expected=checkthese, ignored=self.ignored)
|
214 |
+
|
215 |
+
def test_api_all(self):
|
216 |
+
expected = set(
|
217 |
+
self.public_lib
|
218 |
+
+ self.misc
|
219 |
+
+ self.modules
|
220 |
+
+ self.classes
|
221 |
+
+ self.funcs
|
222 |
+
+ self.funcs_option
|
223 |
+
+ self.funcs_read
|
224 |
+
+ self.funcs_json
|
225 |
+
+ self.funcs_to
|
226 |
+
) - set(self.deprecated_classes)
|
227 |
+
actual = set(pd.__all__)
|
228 |
+
|
229 |
+
extraneous = actual - expected
|
230 |
+
assert not extraneous
|
231 |
+
|
232 |
+
missing = expected - actual
|
233 |
+
assert not missing
|
234 |
+
|
235 |
+
def test_depr(self):
|
236 |
+
deprecated_list = (
|
237 |
+
self.deprecated_classes
|
238 |
+
+ self.deprecated_funcs
|
239 |
+
+ self.deprecated_funcs_in_future
|
240 |
+
)
|
241 |
+
for depr in deprecated_list:
|
242 |
+
with tm.assert_produces_warning(FutureWarning):
|
243 |
+
_ = getattr(pd, depr)
|
244 |
+
|
245 |
+
|
246 |
+
class TestApi(Base):
|
247 |
+
allowed_api_dirs = [
|
248 |
+
"types",
|
249 |
+
"extensions",
|
250 |
+
"indexers",
|
251 |
+
"interchange",
|
252 |
+
"typing",
|
253 |
+
]
|
254 |
+
allowed_typing = [
|
255 |
+
"DataFrameGroupBy",
|
256 |
+
"DatetimeIndexResamplerGroupby",
|
257 |
+
"Expanding",
|
258 |
+
"ExpandingGroupby",
|
259 |
+
"ExponentialMovingWindow",
|
260 |
+
"ExponentialMovingWindowGroupby",
|
261 |
+
"JsonReader",
|
262 |
+
"NaTType",
|
263 |
+
"NAType",
|
264 |
+
"PeriodIndexResamplerGroupby",
|
265 |
+
"Resampler",
|
266 |
+
"Rolling",
|
267 |
+
"RollingGroupby",
|
268 |
+
"SeriesGroupBy",
|
269 |
+
"StataReader",
|
270 |
+
"TimedeltaIndexResamplerGroupby",
|
271 |
+
"TimeGrouper",
|
272 |
+
"Window",
|
273 |
+
]
|
274 |
+
allowed_api_types = [
|
275 |
+
"is_any_real_numeric_dtype",
|
276 |
+
"is_array_like",
|
277 |
+
"is_bool",
|
278 |
+
"is_bool_dtype",
|
279 |
+
"is_categorical_dtype",
|
280 |
+
"is_complex",
|
281 |
+
"is_complex_dtype",
|
282 |
+
"is_datetime64_any_dtype",
|
283 |
+
"is_datetime64_dtype",
|
284 |
+
"is_datetime64_ns_dtype",
|
285 |
+
"is_datetime64tz_dtype",
|
286 |
+
"is_dict_like",
|
287 |
+
"is_dtype_equal",
|
288 |
+
"is_extension_array_dtype",
|
289 |
+
"is_file_like",
|
290 |
+
"is_float",
|
291 |
+
"is_float_dtype",
|
292 |
+
"is_hashable",
|
293 |
+
"is_int64_dtype",
|
294 |
+
"is_integer",
|
295 |
+
"is_integer_dtype",
|
296 |
+
"is_interval",
|
297 |
+
"is_interval_dtype",
|
298 |
+
"is_iterator",
|
299 |
+
"is_list_like",
|
300 |
+
"is_named_tuple",
|
301 |
+
"is_number",
|
302 |
+
"is_numeric_dtype",
|
303 |
+
"is_object_dtype",
|
304 |
+
"is_period_dtype",
|
305 |
+
"is_re",
|
306 |
+
"is_re_compilable",
|
307 |
+
"is_scalar",
|
308 |
+
"is_signed_integer_dtype",
|
309 |
+
"is_sparse",
|
310 |
+
"is_string_dtype",
|
311 |
+
"is_timedelta64_dtype",
|
312 |
+
"is_timedelta64_ns_dtype",
|
313 |
+
"is_unsigned_integer_dtype",
|
314 |
+
"pandas_dtype",
|
315 |
+
"infer_dtype",
|
316 |
+
"union_categoricals",
|
317 |
+
"CategoricalDtype",
|
318 |
+
"DatetimeTZDtype",
|
319 |
+
"IntervalDtype",
|
320 |
+
"PeriodDtype",
|
321 |
+
]
|
322 |
+
allowed_api_interchange = ["from_dataframe", "DataFrame"]
|
323 |
+
allowed_api_indexers = [
|
324 |
+
"check_array_indexer",
|
325 |
+
"BaseIndexer",
|
326 |
+
"FixedForwardWindowIndexer",
|
327 |
+
"VariableOffsetWindowIndexer",
|
328 |
+
]
|
329 |
+
allowed_api_extensions = [
|
330 |
+
"no_default",
|
331 |
+
"ExtensionDtype",
|
332 |
+
"register_extension_dtype",
|
333 |
+
"register_dataframe_accessor",
|
334 |
+
"register_index_accessor",
|
335 |
+
"register_series_accessor",
|
336 |
+
"take",
|
337 |
+
"ExtensionArray",
|
338 |
+
"ExtensionScalarOpsMixin",
|
339 |
+
]
|
340 |
+
|
341 |
+
def test_api(self):
|
342 |
+
self.check(api, self.allowed_api_dirs)
|
343 |
+
|
344 |
+
def test_api_typing(self):
|
345 |
+
self.check(api_typing, self.allowed_typing)
|
346 |
+
|
347 |
+
def test_api_types(self):
|
348 |
+
self.check(api_types, self.allowed_api_types)
|
349 |
+
|
350 |
+
def test_api_interchange(self):
|
351 |
+
self.check(api_interchange, self.allowed_api_interchange)
|
352 |
+
|
353 |
+
def test_api_indexers(self):
|
354 |
+
self.check(api_indexers, self.allowed_api_indexers)
|
355 |
+
|
356 |
+
def test_api_extensions(self):
|
357 |
+
self.check(api_extensions, self.allowed_api_extensions)
|
358 |
+
|
359 |
+
|
360 |
+
class TestTesting(Base):
|
361 |
+
funcs = [
|
362 |
+
"assert_frame_equal",
|
363 |
+
"assert_series_equal",
|
364 |
+
"assert_index_equal",
|
365 |
+
"assert_extension_array_equal",
|
366 |
+
]
|
367 |
+
|
368 |
+
def test_testing(self):
|
369 |
+
from pandas import testing
|
370 |
+
|
371 |
+
self.check(testing, self.funcs)
|
372 |
+
|
373 |
+
def test_util_in_top_level(self):
|
374 |
+
with pytest.raises(AttributeError, match="foo"):
|
375 |
+
pd.util.foo
|
376 |
+
|
377 |
+
|
378 |
+
def test_pandas_array_alias():
|
379 |
+
msg = "PandasArray has been renamed NumpyExtensionArray"
|
380 |
+
with tm.assert_produces_warning(FutureWarning, match=msg):
|
381 |
+
res = pd.arrays.PandasArray
|
382 |
+
|
383 |
+
assert res is pd.arrays.NumpyExtensionArray
|
llmeval-env/lib/python3.10/site-packages/pandas/tests/api/test_types.py
ADDED
@@ -0,0 +1,62 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from __future__ import annotations
|
2 |
+
|
3 |
+
import pandas._testing as tm
|
4 |
+
from pandas.api import types
|
5 |
+
from pandas.tests.api.test_api import Base
|
6 |
+
|
7 |
+
|
8 |
+
class TestTypes(Base):
|
9 |
+
allowed = [
|
10 |
+
"is_any_real_numeric_dtype",
|
11 |
+
"is_bool",
|
12 |
+
"is_bool_dtype",
|
13 |
+
"is_categorical_dtype",
|
14 |
+
"is_complex",
|
15 |
+
"is_complex_dtype",
|
16 |
+
"is_datetime64_any_dtype",
|
17 |
+
"is_datetime64_dtype",
|
18 |
+
"is_datetime64_ns_dtype",
|
19 |
+
"is_datetime64tz_dtype",
|
20 |
+
"is_dtype_equal",
|
21 |
+
"is_float",
|
22 |
+
"is_float_dtype",
|
23 |
+
"is_int64_dtype",
|
24 |
+
"is_integer",
|
25 |
+
"is_integer_dtype",
|
26 |
+
"is_number",
|
27 |
+
"is_numeric_dtype",
|
28 |
+
"is_object_dtype",
|
29 |
+
"is_scalar",
|
30 |
+
"is_sparse",
|
31 |
+
"is_string_dtype",
|
32 |
+
"is_signed_integer_dtype",
|
33 |
+
"is_timedelta64_dtype",
|
34 |
+
"is_timedelta64_ns_dtype",
|
35 |
+
"is_unsigned_integer_dtype",
|
36 |
+
"is_period_dtype",
|
37 |
+
"is_interval",
|
38 |
+
"is_interval_dtype",
|
39 |
+
"is_re",
|
40 |
+
"is_re_compilable",
|
41 |
+
"is_dict_like",
|
42 |
+
"is_iterator",
|
43 |
+
"is_file_like",
|
44 |
+
"is_list_like",
|
45 |
+
"is_hashable",
|
46 |
+
"is_array_like",
|
47 |
+
"is_named_tuple",
|
48 |
+
"pandas_dtype",
|
49 |
+
"union_categoricals",
|
50 |
+
"infer_dtype",
|
51 |
+
"is_extension_array_dtype",
|
52 |
+
]
|
53 |
+
deprecated: list[str] = []
|
54 |
+
dtypes = ["CategoricalDtype", "DatetimeTZDtype", "PeriodDtype", "IntervalDtype"]
|
55 |
+
|
56 |
+
def test_types(self):
|
57 |
+
self.check(types, self.allowed + self.dtypes + self.deprecated)
|
58 |
+
|
59 |
+
def test_deprecated_from_api_types(self):
|
60 |
+
for t in self.deprecated:
|
61 |
+
with tm.assert_produces_warning(FutureWarning):
|
62 |
+
getattr(types, t)(1)
|
llmeval-env/lib/python3.10/site-packages/pandas/tests/apply/__init__.py
ADDED
File without changes
|
llmeval-env/lib/python3.10/site-packages/pandas/tests/apply/__pycache__/__init__.cpython-310.pyc
ADDED
Binary file (191 Bytes). View file
|
|
llmeval-env/lib/python3.10/site-packages/pandas/tests/apply/__pycache__/common.cpython-310.pyc
ADDED
Binary file (564 Bytes). View file
|
|
llmeval-env/lib/python3.10/site-packages/pandas/tests/apply/__pycache__/test_frame_apply.cpython-310.pyc
ADDED
Binary file (57.2 kB). View file
|
|
llmeval-env/lib/python3.10/site-packages/pandas/tests/apply/__pycache__/test_frame_apply_relabeling.cpython-310.pyc
ADDED
Binary file (3.3 kB). View file
|
|
llmeval-env/lib/python3.10/site-packages/pandas/tests/apply/__pycache__/test_frame_transform.cpython-310.pyc
ADDED
Binary file (8.04 kB). View file
|
|
llmeval-env/lib/python3.10/site-packages/pandas/tests/apply/__pycache__/test_invalid_arg.cpython-310.pyc
ADDED
Binary file (11.8 kB). View file
|
|
llmeval-env/lib/python3.10/site-packages/pandas/tests/apply/__pycache__/test_numba.cpython-310.pyc
ADDED
Binary file (5.31 kB). View file
|
|
llmeval-env/lib/python3.10/site-packages/pandas/tests/apply/__pycache__/test_series_apply_relabeling.cpython-310.pyc
ADDED
Binary file (1.45 kB). View file
|
|
llmeval-env/lib/python3.10/site-packages/pandas/tests/apply/__pycache__/test_series_transform.cpython-310.pyc
ADDED
Binary file (3.24 kB). View file
|
|
llmeval-env/lib/python3.10/site-packages/pandas/tests/apply/__pycache__/test_str.cpython-310.pyc
ADDED
Binary file (7.25 kB). View file
|
|
llmeval-env/lib/python3.10/site-packages/pandas/tests/apply/common.py
ADDED
@@ -0,0 +1,7 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from pandas.core.groupby.base import transformation_kernels
|
2 |
+
|
3 |
+
# There is no Series.cumcount or DataFrame.cumcount
|
4 |
+
series_transform_kernels = [
|
5 |
+
x for x in sorted(transformation_kernels) if x != "cumcount"
|
6 |
+
]
|
7 |
+
frame_transform_kernels = [x for x in sorted(transformation_kernels) if x != "cumcount"]
|
llmeval-env/lib/python3.10/site-packages/pandas/tests/apply/test_frame_apply.py
ADDED
@@ -0,0 +1,1733 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from datetime import datetime
|
2 |
+
import warnings
|
3 |
+
|
4 |
+
import numpy as np
|
5 |
+
import pytest
|
6 |
+
|
7 |
+
from pandas.core.dtypes.dtypes import CategoricalDtype
|
8 |
+
|
9 |
+
import pandas as pd
|
10 |
+
from pandas import (
|
11 |
+
DataFrame,
|
12 |
+
MultiIndex,
|
13 |
+
Series,
|
14 |
+
Timestamp,
|
15 |
+
date_range,
|
16 |
+
)
|
17 |
+
import pandas._testing as tm
|
18 |
+
from pandas.tests.frame.common import zip_frames
|
19 |
+
|
20 |
+
|
21 |
+
@pytest.fixture
|
22 |
+
def int_frame_const_col():
|
23 |
+
"""
|
24 |
+
Fixture for DataFrame of ints which are constant per column
|
25 |
+
|
26 |
+
Columns are ['A', 'B', 'C'], with values (per column): [1, 2, 3]
|
27 |
+
"""
|
28 |
+
df = DataFrame(
|
29 |
+
np.tile(np.arange(3, dtype="int64"), 6).reshape(6, -1) + 1,
|
30 |
+
columns=["A", "B", "C"],
|
31 |
+
)
|
32 |
+
return df
|
33 |
+
|
34 |
+
|
35 |
+
@pytest.fixture(params=["python", pytest.param("numba", marks=pytest.mark.single_cpu)])
|
36 |
+
def engine(request):
|
37 |
+
if request.param == "numba":
|
38 |
+
pytest.importorskip("numba")
|
39 |
+
return request.param
|
40 |
+
|
41 |
+
|
42 |
+
def test_apply(float_frame, engine, request):
|
43 |
+
if engine == "numba":
|
44 |
+
mark = pytest.mark.xfail(reason="numba engine not supporting numpy ufunc yet")
|
45 |
+
request.node.add_marker(mark)
|
46 |
+
with np.errstate(all="ignore"):
|
47 |
+
# ufunc
|
48 |
+
result = np.sqrt(float_frame["A"])
|
49 |
+
expected = float_frame.apply(np.sqrt, engine=engine)["A"]
|
50 |
+
tm.assert_series_equal(result, expected)
|
51 |
+
|
52 |
+
# aggregator
|
53 |
+
result = float_frame.apply(np.mean, engine=engine)["A"]
|
54 |
+
expected = np.mean(float_frame["A"])
|
55 |
+
assert result == expected
|
56 |
+
|
57 |
+
d = float_frame.index[0]
|
58 |
+
result = float_frame.apply(np.mean, axis=1, engine=engine)
|
59 |
+
expected = np.mean(float_frame.xs(d))
|
60 |
+
assert result[d] == expected
|
61 |
+
assert result.index is float_frame.index
|
62 |
+
|
63 |
+
|
64 |
+
@pytest.mark.parametrize("axis", [0, 1])
|
65 |
+
@pytest.mark.parametrize("raw", [True, False])
|
66 |
+
def test_apply_args(float_frame, axis, raw, engine, request):
|
67 |
+
if engine == "numba":
|
68 |
+
mark = pytest.mark.xfail(reason="numba engine doesn't support args")
|
69 |
+
request.node.add_marker(mark)
|
70 |
+
result = float_frame.apply(
|
71 |
+
lambda x, y: x + y, axis, args=(1,), raw=raw, engine=engine
|
72 |
+
)
|
73 |
+
expected = float_frame + 1
|
74 |
+
tm.assert_frame_equal(result, expected)
|
75 |
+
|
76 |
+
|
77 |
+
def test_apply_categorical_func():
|
78 |
+
# GH 9573
|
79 |
+
df = DataFrame({"c0": ["A", "A", "B", "B"], "c1": ["C", "C", "D", "D"]})
|
80 |
+
result = df.apply(lambda ts: ts.astype("category"))
|
81 |
+
|
82 |
+
assert result.shape == (4, 2)
|
83 |
+
assert isinstance(result["c0"].dtype, CategoricalDtype)
|
84 |
+
assert isinstance(result["c1"].dtype, CategoricalDtype)
|
85 |
+
|
86 |
+
|
87 |
+
def test_apply_axis1_with_ea():
|
88 |
+
# GH#36785
|
89 |
+
expected = DataFrame({"A": [Timestamp("2013-01-01", tz="UTC")]})
|
90 |
+
result = expected.apply(lambda x: x, axis=1)
|
91 |
+
tm.assert_frame_equal(result, expected)
|
92 |
+
|
93 |
+
|
94 |
+
@pytest.mark.parametrize(
|
95 |
+
"data, dtype",
|
96 |
+
[(1, None), (1, CategoricalDtype([1])), (Timestamp("2013-01-01", tz="UTC"), None)],
|
97 |
+
)
|
98 |
+
def test_agg_axis1_duplicate_index(data, dtype):
|
99 |
+
# GH 42380
|
100 |
+
expected = DataFrame([[data], [data]], index=["a", "a"], dtype=dtype)
|
101 |
+
result = expected.agg(lambda x: x, axis=1)
|
102 |
+
tm.assert_frame_equal(result, expected)
|
103 |
+
|
104 |
+
|
105 |
+
def test_apply_mixed_datetimelike():
|
106 |
+
# mixed datetimelike
|
107 |
+
# GH 7778
|
108 |
+
expected = DataFrame(
|
109 |
+
{
|
110 |
+
"A": date_range("20130101", periods=3),
|
111 |
+
"B": pd.to_timedelta(np.arange(3), unit="s"),
|
112 |
+
}
|
113 |
+
)
|
114 |
+
result = expected.apply(lambda x: x, axis=1)
|
115 |
+
tm.assert_frame_equal(result, expected)
|
116 |
+
|
117 |
+
|
118 |
+
@pytest.mark.parametrize("func", [np.sqrt, np.mean])
|
119 |
+
def test_apply_empty(func, engine):
|
120 |
+
# empty
|
121 |
+
empty_frame = DataFrame()
|
122 |
+
|
123 |
+
result = empty_frame.apply(func, engine=engine)
|
124 |
+
assert result.empty
|
125 |
+
|
126 |
+
|
127 |
+
def test_apply_float_frame(float_frame, engine):
|
128 |
+
no_rows = float_frame[:0]
|
129 |
+
result = no_rows.apply(lambda x: x.mean(), engine=engine)
|
130 |
+
expected = Series(np.nan, index=float_frame.columns)
|
131 |
+
tm.assert_series_equal(result, expected)
|
132 |
+
|
133 |
+
no_cols = float_frame.loc[:, []]
|
134 |
+
result = no_cols.apply(lambda x: x.mean(), axis=1, engine=engine)
|
135 |
+
expected = Series(np.nan, index=float_frame.index)
|
136 |
+
tm.assert_series_equal(result, expected)
|
137 |
+
|
138 |
+
|
139 |
+
def test_apply_empty_except_index(engine):
|
140 |
+
# GH 2476
|
141 |
+
expected = DataFrame(index=["a"])
|
142 |
+
result = expected.apply(lambda x: x["a"], axis=1, engine=engine)
|
143 |
+
tm.assert_frame_equal(result, expected)
|
144 |
+
|
145 |
+
|
146 |
+
def test_apply_with_reduce_empty():
|
147 |
+
# reduce with an empty DataFrame
|
148 |
+
empty_frame = DataFrame()
|
149 |
+
|
150 |
+
x = []
|
151 |
+
result = empty_frame.apply(x.append, axis=1, result_type="expand")
|
152 |
+
tm.assert_frame_equal(result, empty_frame)
|
153 |
+
result = empty_frame.apply(x.append, axis=1, result_type="reduce")
|
154 |
+
expected = Series([], dtype=np.float64)
|
155 |
+
tm.assert_series_equal(result, expected)
|
156 |
+
|
157 |
+
empty_with_cols = DataFrame(columns=["a", "b", "c"])
|
158 |
+
result = empty_with_cols.apply(x.append, axis=1, result_type="expand")
|
159 |
+
tm.assert_frame_equal(result, empty_with_cols)
|
160 |
+
result = empty_with_cols.apply(x.append, axis=1, result_type="reduce")
|
161 |
+
expected = Series([], dtype=np.float64)
|
162 |
+
tm.assert_series_equal(result, expected)
|
163 |
+
|
164 |
+
# Ensure that x.append hasn't been called
|
165 |
+
assert x == []
|
166 |
+
|
167 |
+
|
168 |
+
@pytest.mark.parametrize("func", ["sum", "prod", "any", "all"])
|
169 |
+
def test_apply_funcs_over_empty(func):
|
170 |
+
# GH 28213
|
171 |
+
df = DataFrame(columns=["a", "b", "c"])
|
172 |
+
|
173 |
+
result = df.apply(getattr(np, func))
|
174 |
+
expected = getattr(df, func)()
|
175 |
+
if func in ("sum", "prod"):
|
176 |
+
expected = expected.astype(float)
|
177 |
+
tm.assert_series_equal(result, expected)
|
178 |
+
|
179 |
+
|
180 |
+
def test_nunique_empty():
|
181 |
+
# GH 28213
|
182 |
+
df = DataFrame(columns=["a", "b", "c"])
|
183 |
+
|
184 |
+
result = df.nunique()
|
185 |
+
expected = Series(0, index=df.columns)
|
186 |
+
tm.assert_series_equal(result, expected)
|
187 |
+
|
188 |
+
result = df.T.nunique()
|
189 |
+
expected = Series([], dtype=np.float64)
|
190 |
+
tm.assert_series_equal(result, expected)
|
191 |
+
|
192 |
+
|
193 |
+
def test_apply_standard_nonunique():
|
194 |
+
df = DataFrame([[1, 2, 3], [4, 5, 6], [7, 8, 9]], index=["a", "a", "c"])
|
195 |
+
|
196 |
+
result = df.apply(lambda s: s[0], axis=1)
|
197 |
+
expected = Series([1, 4, 7], ["a", "a", "c"])
|
198 |
+
tm.assert_series_equal(result, expected)
|
199 |
+
|
200 |
+
result = df.T.apply(lambda s: s[0], axis=0)
|
201 |
+
tm.assert_series_equal(result, expected)
|
202 |
+
|
203 |
+
|
204 |
+
def test_apply_broadcast_scalars(float_frame):
|
205 |
+
# scalars
|
206 |
+
result = float_frame.apply(np.mean, result_type="broadcast")
|
207 |
+
expected = DataFrame([float_frame.mean()], index=float_frame.index)
|
208 |
+
tm.assert_frame_equal(result, expected)
|
209 |
+
|
210 |
+
|
211 |
+
def test_apply_broadcast_scalars_axis1(float_frame):
|
212 |
+
result = float_frame.apply(np.mean, axis=1, result_type="broadcast")
|
213 |
+
m = float_frame.mean(axis=1)
|
214 |
+
expected = DataFrame({c: m for c in float_frame.columns})
|
215 |
+
tm.assert_frame_equal(result, expected)
|
216 |
+
|
217 |
+
|
218 |
+
def test_apply_broadcast_lists_columns(float_frame):
|
219 |
+
# lists
|
220 |
+
result = float_frame.apply(
|
221 |
+
lambda x: list(range(len(float_frame.columns))),
|
222 |
+
axis=1,
|
223 |
+
result_type="broadcast",
|
224 |
+
)
|
225 |
+
m = list(range(len(float_frame.columns)))
|
226 |
+
expected = DataFrame(
|
227 |
+
[m] * len(float_frame.index),
|
228 |
+
dtype="float64",
|
229 |
+
index=float_frame.index,
|
230 |
+
columns=float_frame.columns,
|
231 |
+
)
|
232 |
+
tm.assert_frame_equal(result, expected)
|
233 |
+
|
234 |
+
|
235 |
+
def test_apply_broadcast_lists_index(float_frame):
|
236 |
+
result = float_frame.apply(
|
237 |
+
lambda x: list(range(len(float_frame.index))), result_type="broadcast"
|
238 |
+
)
|
239 |
+
m = list(range(len(float_frame.index)))
|
240 |
+
expected = DataFrame(
|
241 |
+
{c: m for c in float_frame.columns},
|
242 |
+
dtype="float64",
|
243 |
+
index=float_frame.index,
|
244 |
+
)
|
245 |
+
tm.assert_frame_equal(result, expected)
|
246 |
+
|
247 |
+
|
248 |
+
def test_apply_broadcast_list_lambda_func(int_frame_const_col):
|
249 |
+
# preserve columns
|
250 |
+
df = int_frame_const_col
|
251 |
+
result = df.apply(lambda x: [1, 2, 3], axis=1, result_type="broadcast")
|
252 |
+
tm.assert_frame_equal(result, df)
|
253 |
+
|
254 |
+
|
255 |
+
def test_apply_broadcast_series_lambda_func(int_frame_const_col):
|
256 |
+
df = int_frame_const_col
|
257 |
+
result = df.apply(
|
258 |
+
lambda x: Series([1, 2, 3], index=list("abc")),
|
259 |
+
axis=1,
|
260 |
+
result_type="broadcast",
|
261 |
+
)
|
262 |
+
expected = df.copy()
|
263 |
+
tm.assert_frame_equal(result, expected)
|
264 |
+
|
265 |
+
|
266 |
+
@pytest.mark.parametrize("axis", [0, 1])
|
267 |
+
def test_apply_raw_float_frame(float_frame, axis, engine):
|
268 |
+
if engine == "numba":
|
269 |
+
pytest.skip("numba can't handle when UDF returns None.")
|
270 |
+
|
271 |
+
def _assert_raw(x):
|
272 |
+
assert isinstance(x, np.ndarray)
|
273 |
+
assert x.ndim == 1
|
274 |
+
|
275 |
+
float_frame.apply(_assert_raw, axis=axis, engine=engine, raw=True)
|
276 |
+
|
277 |
+
|
278 |
+
@pytest.mark.parametrize("axis", [0, 1])
|
279 |
+
def test_apply_raw_float_frame_lambda(float_frame, axis, engine):
|
280 |
+
result = float_frame.apply(np.mean, axis=axis, engine=engine, raw=True)
|
281 |
+
expected = float_frame.apply(lambda x: x.values.mean(), axis=axis)
|
282 |
+
tm.assert_series_equal(result, expected)
|
283 |
+
|
284 |
+
|
285 |
+
def test_apply_raw_float_frame_no_reduction(float_frame, engine):
|
286 |
+
# no reduction
|
287 |
+
result = float_frame.apply(lambda x: x * 2, engine=engine, raw=True)
|
288 |
+
expected = float_frame * 2
|
289 |
+
tm.assert_frame_equal(result, expected)
|
290 |
+
|
291 |
+
|
292 |
+
@pytest.mark.parametrize("axis", [0, 1])
|
293 |
+
def test_apply_raw_mixed_type_frame(axis, engine):
|
294 |
+
if engine == "numba":
|
295 |
+
pytest.skip("isinstance check doesn't work with numba")
|
296 |
+
|
297 |
+
def _assert_raw(x):
|
298 |
+
assert isinstance(x, np.ndarray)
|
299 |
+
assert x.ndim == 1
|
300 |
+
|
301 |
+
# Mixed dtype (GH-32423)
|
302 |
+
df = DataFrame(
|
303 |
+
{
|
304 |
+
"a": 1.0,
|
305 |
+
"b": 2,
|
306 |
+
"c": "foo",
|
307 |
+
"float32": np.array([1.0] * 10, dtype="float32"),
|
308 |
+
"int32": np.array([1] * 10, dtype="int32"),
|
309 |
+
},
|
310 |
+
index=np.arange(10),
|
311 |
+
)
|
312 |
+
df.apply(_assert_raw, axis=axis, engine=engine, raw=True)
|
313 |
+
|
314 |
+
|
315 |
+
def test_apply_axis1(float_frame):
|
316 |
+
d = float_frame.index[0]
|
317 |
+
result = float_frame.apply(np.mean, axis=1)[d]
|
318 |
+
expected = np.mean(float_frame.xs(d))
|
319 |
+
assert result == expected
|
320 |
+
|
321 |
+
|
322 |
+
def test_apply_mixed_dtype_corner():
|
323 |
+
df = DataFrame({"A": ["foo"], "B": [1.0]})
|
324 |
+
result = df[:0].apply(np.mean, axis=1)
|
325 |
+
# the result here is actually kind of ambiguous, should it be a Series
|
326 |
+
# or a DataFrame?
|
327 |
+
expected = Series(np.nan, index=pd.Index([], dtype="int64"))
|
328 |
+
tm.assert_series_equal(result, expected)
|
329 |
+
|
330 |
+
|
331 |
+
def test_apply_mixed_dtype_corner_indexing():
|
332 |
+
df = DataFrame({"A": ["foo"], "B": [1.0]})
|
333 |
+
result = df.apply(lambda x: x["A"], axis=1)
|
334 |
+
expected = Series(["foo"], index=[0])
|
335 |
+
tm.assert_series_equal(result, expected)
|
336 |
+
|
337 |
+
result = df.apply(lambda x: x["B"], axis=1)
|
338 |
+
expected = Series([1.0], index=[0])
|
339 |
+
tm.assert_series_equal(result, expected)
|
340 |
+
|
341 |
+
|
342 |
+
@pytest.mark.filterwarnings("ignore::RuntimeWarning")
|
343 |
+
@pytest.mark.parametrize("ax", ["index", "columns"])
|
344 |
+
@pytest.mark.parametrize(
|
345 |
+
"func", [lambda x: x, lambda x: x.mean()], ids=["identity", "mean"]
|
346 |
+
)
|
347 |
+
@pytest.mark.parametrize("raw", [True, False])
|
348 |
+
@pytest.mark.parametrize("axis", [0, 1])
|
349 |
+
def test_apply_empty_infer_type(ax, func, raw, axis, engine, request):
|
350 |
+
df = DataFrame(**{ax: ["a", "b", "c"]})
|
351 |
+
|
352 |
+
with np.errstate(all="ignore"):
|
353 |
+
test_res = func(np.array([], dtype="f8"))
|
354 |
+
is_reduction = not isinstance(test_res, np.ndarray)
|
355 |
+
|
356 |
+
result = df.apply(func, axis=axis, engine=engine, raw=raw)
|
357 |
+
if is_reduction:
|
358 |
+
agg_axis = df._get_agg_axis(axis)
|
359 |
+
assert isinstance(result, Series)
|
360 |
+
assert result.index is agg_axis
|
361 |
+
else:
|
362 |
+
assert isinstance(result, DataFrame)
|
363 |
+
|
364 |
+
|
365 |
+
def test_apply_empty_infer_type_broadcast():
|
366 |
+
no_cols = DataFrame(index=["a", "b", "c"])
|
367 |
+
result = no_cols.apply(lambda x: x.mean(), result_type="broadcast")
|
368 |
+
assert isinstance(result, DataFrame)
|
369 |
+
|
370 |
+
|
371 |
+
def test_apply_with_args_kwds_add_some(float_frame):
|
372 |
+
def add_some(x, howmuch=0):
|
373 |
+
return x + howmuch
|
374 |
+
|
375 |
+
result = float_frame.apply(add_some, howmuch=2)
|
376 |
+
expected = float_frame.apply(lambda x: x + 2)
|
377 |
+
tm.assert_frame_equal(result, expected)
|
378 |
+
|
379 |
+
|
380 |
+
def test_apply_with_args_kwds_agg_and_add(float_frame):
|
381 |
+
def agg_and_add(x, howmuch=0):
|
382 |
+
return x.mean() + howmuch
|
383 |
+
|
384 |
+
result = float_frame.apply(agg_and_add, howmuch=2)
|
385 |
+
expected = float_frame.apply(lambda x: x.mean() + 2)
|
386 |
+
tm.assert_series_equal(result, expected)
|
387 |
+
|
388 |
+
|
389 |
+
def test_apply_with_args_kwds_subtract_and_divide(float_frame):
|
390 |
+
def subtract_and_divide(x, sub, divide=1):
|
391 |
+
return (x - sub) / divide
|
392 |
+
|
393 |
+
result = float_frame.apply(subtract_and_divide, args=(2,), divide=2)
|
394 |
+
expected = float_frame.apply(lambda x: (x - 2.0) / 2.0)
|
395 |
+
tm.assert_frame_equal(result, expected)
|
396 |
+
|
397 |
+
|
398 |
+
def test_apply_yield_list(float_frame):
|
399 |
+
result = float_frame.apply(list)
|
400 |
+
tm.assert_frame_equal(result, float_frame)
|
401 |
+
|
402 |
+
|
403 |
+
def test_apply_reduce_Series(float_frame):
|
404 |
+
float_frame.iloc[::2, float_frame.columns.get_loc("A")] = np.nan
|
405 |
+
expected = float_frame.mean(1)
|
406 |
+
result = float_frame.apply(np.mean, axis=1)
|
407 |
+
tm.assert_series_equal(result, expected)
|
408 |
+
|
409 |
+
|
410 |
+
def test_apply_reduce_to_dict():
|
411 |
+
# GH 25196 37544
|
412 |
+
data = DataFrame([[1, 2], [3, 4]], columns=["c0", "c1"], index=["i0", "i1"])
|
413 |
+
|
414 |
+
result = data.apply(dict, axis=0)
|
415 |
+
expected = Series([{"i0": 1, "i1": 3}, {"i0": 2, "i1": 4}], index=data.columns)
|
416 |
+
tm.assert_series_equal(result, expected)
|
417 |
+
|
418 |
+
result = data.apply(dict, axis=1)
|
419 |
+
expected = Series([{"c0": 1, "c1": 2}, {"c0": 3, "c1": 4}], index=data.index)
|
420 |
+
tm.assert_series_equal(result, expected)
|
421 |
+
|
422 |
+
|
423 |
+
def test_apply_differently_indexed():
|
424 |
+
df = DataFrame(np.random.default_rng(2).standard_normal((20, 10)))
|
425 |
+
|
426 |
+
result = df.apply(Series.describe, axis=0)
|
427 |
+
expected = DataFrame({i: v.describe() for i, v in df.items()}, columns=df.columns)
|
428 |
+
tm.assert_frame_equal(result, expected)
|
429 |
+
|
430 |
+
result = df.apply(Series.describe, axis=1)
|
431 |
+
expected = DataFrame({i: v.describe() for i, v in df.T.items()}, columns=df.index).T
|
432 |
+
tm.assert_frame_equal(result, expected)
|
433 |
+
|
434 |
+
|
435 |
+
def test_apply_bug():
|
436 |
+
# GH 6125
|
437 |
+
positions = DataFrame(
|
438 |
+
[
|
439 |
+
[1, "ABC0", 50],
|
440 |
+
[1, "YUM0", 20],
|
441 |
+
[1, "DEF0", 20],
|
442 |
+
[2, "ABC1", 50],
|
443 |
+
[2, "YUM1", 20],
|
444 |
+
[2, "DEF1", 20],
|
445 |
+
],
|
446 |
+
columns=["a", "market", "position"],
|
447 |
+
)
|
448 |
+
|
449 |
+
def f(r):
|
450 |
+
return r["market"]
|
451 |
+
|
452 |
+
expected = positions.apply(f, axis=1)
|
453 |
+
|
454 |
+
positions = DataFrame(
|
455 |
+
[
|
456 |
+
[datetime(2013, 1, 1), "ABC0", 50],
|
457 |
+
[datetime(2013, 1, 2), "YUM0", 20],
|
458 |
+
[datetime(2013, 1, 3), "DEF0", 20],
|
459 |
+
[datetime(2013, 1, 4), "ABC1", 50],
|
460 |
+
[datetime(2013, 1, 5), "YUM1", 20],
|
461 |
+
[datetime(2013, 1, 6), "DEF1", 20],
|
462 |
+
],
|
463 |
+
columns=["a", "market", "position"],
|
464 |
+
)
|
465 |
+
result = positions.apply(f, axis=1)
|
466 |
+
tm.assert_series_equal(result, expected)
|
467 |
+
|
468 |
+
|
469 |
+
def test_apply_convert_objects():
|
470 |
+
expected = DataFrame(
|
471 |
+
{
|
472 |
+
"A": [
|
473 |
+
"foo",
|
474 |
+
"foo",
|
475 |
+
"foo",
|
476 |
+
"foo",
|
477 |
+
"bar",
|
478 |
+
"bar",
|
479 |
+
"bar",
|
480 |
+
"bar",
|
481 |
+
"foo",
|
482 |
+
"foo",
|
483 |
+
"foo",
|
484 |
+
],
|
485 |
+
"B": [
|
486 |
+
"one",
|
487 |
+
"one",
|
488 |
+
"one",
|
489 |
+
"two",
|
490 |
+
"one",
|
491 |
+
"one",
|
492 |
+
"one",
|
493 |
+
"two",
|
494 |
+
"two",
|
495 |
+
"two",
|
496 |
+
"one",
|
497 |
+
],
|
498 |
+
"C": [
|
499 |
+
"dull",
|
500 |
+
"dull",
|
501 |
+
"shiny",
|
502 |
+
"dull",
|
503 |
+
"dull",
|
504 |
+
"shiny",
|
505 |
+
"shiny",
|
506 |
+
"dull",
|
507 |
+
"shiny",
|
508 |
+
"shiny",
|
509 |
+
"shiny",
|
510 |
+
],
|
511 |
+
"D": np.random.default_rng(2).standard_normal(11),
|
512 |
+
"E": np.random.default_rng(2).standard_normal(11),
|
513 |
+
"F": np.random.default_rng(2).standard_normal(11),
|
514 |
+
}
|
515 |
+
)
|
516 |
+
|
517 |
+
result = expected.apply(lambda x: x, axis=1)
|
518 |
+
tm.assert_frame_equal(result, expected)
|
519 |
+
|
520 |
+
|
521 |
+
def test_apply_attach_name(float_frame):
|
522 |
+
result = float_frame.apply(lambda x: x.name)
|
523 |
+
expected = Series(float_frame.columns, index=float_frame.columns)
|
524 |
+
tm.assert_series_equal(result, expected)
|
525 |
+
|
526 |
+
|
527 |
+
def test_apply_attach_name_axis1(float_frame):
|
528 |
+
result = float_frame.apply(lambda x: x.name, axis=1)
|
529 |
+
expected = Series(float_frame.index, index=float_frame.index)
|
530 |
+
tm.assert_series_equal(result, expected)
|
531 |
+
|
532 |
+
|
533 |
+
def test_apply_attach_name_non_reduction(float_frame):
|
534 |
+
# non-reductions
|
535 |
+
result = float_frame.apply(lambda x: np.repeat(x.name, len(x)))
|
536 |
+
expected = DataFrame(
|
537 |
+
np.tile(float_frame.columns, (len(float_frame.index), 1)),
|
538 |
+
index=float_frame.index,
|
539 |
+
columns=float_frame.columns,
|
540 |
+
)
|
541 |
+
tm.assert_frame_equal(result, expected)
|
542 |
+
|
543 |
+
|
544 |
+
def test_apply_attach_name_non_reduction_axis1(float_frame):
|
545 |
+
result = float_frame.apply(lambda x: np.repeat(x.name, len(x)), axis=1)
|
546 |
+
expected = Series(
|
547 |
+
np.repeat(t[0], len(float_frame.columns)) for t in float_frame.itertuples()
|
548 |
+
)
|
549 |
+
expected.index = float_frame.index
|
550 |
+
tm.assert_series_equal(result, expected)
|
551 |
+
|
552 |
+
|
553 |
+
def test_apply_multi_index():
|
554 |
+
index = MultiIndex.from_arrays([["a", "a", "b"], ["c", "d", "d"]])
|
555 |
+
s = DataFrame([[1, 2], [3, 4], [5, 6]], index=index, columns=["col1", "col2"])
|
556 |
+
result = s.apply(lambda x: Series({"min": min(x), "max": max(x)}), 1)
|
557 |
+
expected = DataFrame([[1, 2], [3, 4], [5, 6]], index=index, columns=["min", "max"])
|
558 |
+
tm.assert_frame_equal(result, expected, check_like=True)
|
559 |
+
|
560 |
+
|
561 |
+
@pytest.mark.parametrize(
|
562 |
+
"df, dicts",
|
563 |
+
[
|
564 |
+
[
|
565 |
+
DataFrame([["foo", "bar"], ["spam", "eggs"]]),
|
566 |
+
Series([{0: "foo", 1: "spam"}, {0: "bar", 1: "eggs"}]),
|
567 |
+
],
|
568 |
+
[DataFrame([[0, 1], [2, 3]]), Series([{0: 0, 1: 2}, {0: 1, 1: 3}])],
|
569 |
+
],
|
570 |
+
)
|
571 |
+
def test_apply_dict(df, dicts):
|
572 |
+
# GH 8735
|
573 |
+
fn = lambda x: x.to_dict()
|
574 |
+
reduce_true = df.apply(fn, result_type="reduce")
|
575 |
+
reduce_false = df.apply(fn, result_type="expand")
|
576 |
+
reduce_none = df.apply(fn)
|
577 |
+
|
578 |
+
tm.assert_series_equal(reduce_true, dicts)
|
579 |
+
tm.assert_frame_equal(reduce_false, df)
|
580 |
+
tm.assert_series_equal(reduce_none, dicts)
|
581 |
+
|
582 |
+
|
583 |
+
def test_apply_non_numpy_dtype():
|
584 |
+
# GH 12244
|
585 |
+
df = DataFrame({"dt": date_range("2015-01-01", periods=3, tz="Europe/Brussels")})
|
586 |
+
result = df.apply(lambda x: x)
|
587 |
+
tm.assert_frame_equal(result, df)
|
588 |
+
|
589 |
+
result = df.apply(lambda x: x + pd.Timedelta("1day"))
|
590 |
+
expected = DataFrame(
|
591 |
+
{"dt": date_range("2015-01-02", periods=3, tz="Europe/Brussels")}
|
592 |
+
)
|
593 |
+
tm.assert_frame_equal(result, expected)
|
594 |
+
|
595 |
+
|
596 |
+
def test_apply_non_numpy_dtype_category():
|
597 |
+
df = DataFrame({"dt": ["a", "b", "c", "a"]}, dtype="category")
|
598 |
+
result = df.apply(lambda x: x)
|
599 |
+
tm.assert_frame_equal(result, df)
|
600 |
+
|
601 |
+
|
602 |
+
def test_apply_dup_names_multi_agg():
|
603 |
+
# GH 21063
|
604 |
+
df = DataFrame([[0, 1], [2, 3]], columns=["a", "a"])
|
605 |
+
expected = DataFrame([[0, 1]], columns=["a", "a"], index=["min"])
|
606 |
+
result = df.agg(["min"])
|
607 |
+
|
608 |
+
tm.assert_frame_equal(result, expected)
|
609 |
+
|
610 |
+
|
611 |
+
@pytest.mark.parametrize("op", ["apply", "agg"])
|
612 |
+
def test_apply_nested_result_axis_1(op):
|
613 |
+
# GH 13820
|
614 |
+
def apply_list(row):
|
615 |
+
return [2 * row["A"], 2 * row["C"], 2 * row["B"]]
|
616 |
+
|
617 |
+
df = DataFrame(np.zeros((4, 4)), columns=list("ABCD"))
|
618 |
+
result = getattr(df, op)(apply_list, axis=1)
|
619 |
+
expected = Series(
|
620 |
+
[[0.0, 0.0, 0.0], [0.0, 0.0, 0.0], [0.0, 0.0, 0.0], [0.0, 0.0, 0.0]]
|
621 |
+
)
|
622 |
+
tm.assert_series_equal(result, expected)
|
623 |
+
|
624 |
+
|
625 |
+
def test_apply_noreduction_tzaware_object():
|
626 |
+
# https://github.com/pandas-dev/pandas/issues/31505
|
627 |
+
expected = DataFrame(
|
628 |
+
{"foo": [Timestamp("2020", tz="UTC")]}, dtype="datetime64[ns, UTC]"
|
629 |
+
)
|
630 |
+
result = expected.apply(lambda x: x)
|
631 |
+
tm.assert_frame_equal(result, expected)
|
632 |
+
result = expected.apply(lambda x: x.copy())
|
633 |
+
tm.assert_frame_equal(result, expected)
|
634 |
+
|
635 |
+
|
636 |
+
def test_apply_function_runs_once():
|
637 |
+
# https://github.com/pandas-dev/pandas/issues/30815
|
638 |
+
|
639 |
+
df = DataFrame({"a": [1, 2, 3]})
|
640 |
+
names = [] # Save row names function is applied to
|
641 |
+
|
642 |
+
def reducing_function(row):
|
643 |
+
names.append(row.name)
|
644 |
+
|
645 |
+
def non_reducing_function(row):
|
646 |
+
names.append(row.name)
|
647 |
+
return row
|
648 |
+
|
649 |
+
for func in [reducing_function, non_reducing_function]:
|
650 |
+
del names[:]
|
651 |
+
|
652 |
+
df.apply(func, axis=1)
|
653 |
+
assert names == list(df.index)
|
654 |
+
|
655 |
+
|
656 |
+
def test_apply_raw_function_runs_once(engine):
|
657 |
+
# https://github.com/pandas-dev/pandas/issues/34506
|
658 |
+
if engine == "numba":
|
659 |
+
pytest.skip("appending to list outside of numba func is not supported")
|
660 |
+
|
661 |
+
df = DataFrame({"a": [1, 2, 3]})
|
662 |
+
values = [] # Save row values function is applied to
|
663 |
+
|
664 |
+
def reducing_function(row):
|
665 |
+
values.extend(row)
|
666 |
+
|
667 |
+
def non_reducing_function(row):
|
668 |
+
values.extend(row)
|
669 |
+
return row
|
670 |
+
|
671 |
+
for func in [reducing_function, non_reducing_function]:
|
672 |
+
del values[:]
|
673 |
+
|
674 |
+
df.apply(func, engine=engine, raw=True, axis=1)
|
675 |
+
assert values == list(df.a.to_list())
|
676 |
+
|
677 |
+
|
678 |
+
def test_apply_with_byte_string():
|
679 |
+
# GH 34529
|
680 |
+
df = DataFrame(np.array([b"abcd", b"efgh"]), columns=["col"])
|
681 |
+
expected = DataFrame(np.array([b"abcd", b"efgh"]), columns=["col"], dtype=object)
|
682 |
+
# After we make the apply we expect a dataframe just
|
683 |
+
# like the original but with the object datatype
|
684 |
+
result = df.apply(lambda x: x.astype("object"))
|
685 |
+
tm.assert_frame_equal(result, expected)
|
686 |
+
|
687 |
+
|
688 |
+
@pytest.mark.parametrize("val", ["asd", 12, None, np.nan])
|
689 |
+
def test_apply_category_equalness(val):
|
690 |
+
# Check if categorical comparisons on apply, GH 21239
|
691 |
+
df_values = ["asd", None, 12, "asd", "cde", np.nan]
|
692 |
+
df = DataFrame({"a": df_values}, dtype="category")
|
693 |
+
|
694 |
+
result = df.a.apply(lambda x: x == val)
|
695 |
+
expected = Series(
|
696 |
+
[np.nan if pd.isnull(x) else x == val for x in df_values], name="a"
|
697 |
+
)
|
698 |
+
tm.assert_series_equal(result, expected)
|
699 |
+
|
700 |
+
|
701 |
+
# the user has supplied an opaque UDF where
|
702 |
+
# they are transforming the input that requires
|
703 |
+
# us to infer the output
|
704 |
+
|
705 |
+
|
706 |
+
def test_infer_row_shape():
|
707 |
+
# GH 17437
|
708 |
+
# if row shape is changing, infer it
|
709 |
+
df = DataFrame(np.random.default_rng(2).random((10, 2)))
|
710 |
+
result = df.apply(np.fft.fft, axis=0).shape
|
711 |
+
assert result == (10, 2)
|
712 |
+
|
713 |
+
result = df.apply(np.fft.rfft, axis=0).shape
|
714 |
+
assert result == (6, 2)
|
715 |
+
|
716 |
+
|
717 |
+
@pytest.mark.parametrize(
|
718 |
+
"ops, by_row, expected",
|
719 |
+
[
|
720 |
+
({"a": lambda x: x + 1}, "compat", DataFrame({"a": [2, 3]})),
|
721 |
+
({"a": lambda x: x + 1}, False, DataFrame({"a": [2, 3]})),
|
722 |
+
({"a": lambda x: x.sum()}, "compat", Series({"a": 3})),
|
723 |
+
({"a": lambda x: x.sum()}, False, Series({"a": 3})),
|
724 |
+
(
|
725 |
+
{"a": ["sum", np.sum, lambda x: x.sum()]},
|
726 |
+
"compat",
|
727 |
+
DataFrame({"a": [3, 3, 3]}, index=["sum", "sum", "<lambda>"]),
|
728 |
+
),
|
729 |
+
(
|
730 |
+
{"a": ["sum", np.sum, lambda x: x.sum()]},
|
731 |
+
False,
|
732 |
+
DataFrame({"a": [3, 3, 3]}, index=["sum", "sum", "<lambda>"]),
|
733 |
+
),
|
734 |
+
({"a": lambda x: 1}, "compat", DataFrame({"a": [1, 1]})),
|
735 |
+
({"a": lambda x: 1}, False, Series({"a": 1})),
|
736 |
+
],
|
737 |
+
)
|
738 |
+
def test_dictlike_lambda(ops, by_row, expected):
|
739 |
+
# GH53601
|
740 |
+
df = DataFrame({"a": [1, 2]})
|
741 |
+
result = df.apply(ops, by_row=by_row)
|
742 |
+
tm.assert_equal(result, expected)
|
743 |
+
|
744 |
+
|
745 |
+
@pytest.mark.parametrize(
|
746 |
+
"ops",
|
747 |
+
[
|
748 |
+
{"a": lambda x: x + 1},
|
749 |
+
{"a": lambda x: x.sum()},
|
750 |
+
{"a": ["sum", np.sum, lambda x: x.sum()]},
|
751 |
+
{"a": lambda x: 1},
|
752 |
+
],
|
753 |
+
)
|
754 |
+
def test_dictlike_lambda_raises(ops):
|
755 |
+
# GH53601
|
756 |
+
df = DataFrame({"a": [1, 2]})
|
757 |
+
with pytest.raises(ValueError, match="by_row=True not allowed"):
|
758 |
+
df.apply(ops, by_row=True)
|
759 |
+
|
760 |
+
|
761 |
+
def test_with_dictlike_columns():
|
762 |
+
# GH 17602
|
763 |
+
df = DataFrame([[1, 2], [1, 2]], columns=["a", "b"])
|
764 |
+
result = df.apply(lambda x: {"s": x["a"] + x["b"]}, axis=1)
|
765 |
+
expected = Series([{"s": 3} for t in df.itertuples()])
|
766 |
+
tm.assert_series_equal(result, expected)
|
767 |
+
|
768 |
+
df["tm"] = [
|
769 |
+
Timestamp("2017-05-01 00:00:00"),
|
770 |
+
Timestamp("2017-05-02 00:00:00"),
|
771 |
+
]
|
772 |
+
result = df.apply(lambda x: {"s": x["a"] + x["b"]}, axis=1)
|
773 |
+
tm.assert_series_equal(result, expected)
|
774 |
+
|
775 |
+
# compose a series
|
776 |
+
result = (df["a"] + df["b"]).apply(lambda x: {"s": x})
|
777 |
+
expected = Series([{"s": 3}, {"s": 3}])
|
778 |
+
tm.assert_series_equal(result, expected)
|
779 |
+
|
780 |
+
|
781 |
+
def test_with_dictlike_columns_with_datetime():
|
782 |
+
# GH 18775
|
783 |
+
df = DataFrame()
|
784 |
+
df["author"] = ["X", "Y", "Z"]
|
785 |
+
df["publisher"] = ["BBC", "NBC", "N24"]
|
786 |
+
df["date"] = pd.to_datetime(
|
787 |
+
["17-10-2010 07:15:30", "13-05-2011 08:20:35", "15-01-2013 09:09:09"],
|
788 |
+
dayfirst=True,
|
789 |
+
)
|
790 |
+
result = df.apply(lambda x: {}, axis=1)
|
791 |
+
expected = Series([{}, {}, {}])
|
792 |
+
tm.assert_series_equal(result, expected)
|
793 |
+
|
794 |
+
|
795 |
+
def test_with_dictlike_columns_with_infer():
|
796 |
+
# GH 17602
|
797 |
+
df = DataFrame([[1, 2], [1, 2]], columns=["a", "b"])
|
798 |
+
result = df.apply(lambda x: {"s": x["a"] + x["b"]}, axis=1, result_type="expand")
|
799 |
+
expected = DataFrame({"s": [3, 3]})
|
800 |
+
tm.assert_frame_equal(result, expected)
|
801 |
+
|
802 |
+
df["tm"] = [
|
803 |
+
Timestamp("2017-05-01 00:00:00"),
|
804 |
+
Timestamp("2017-05-02 00:00:00"),
|
805 |
+
]
|
806 |
+
result = df.apply(lambda x: {"s": x["a"] + x["b"]}, axis=1, result_type="expand")
|
807 |
+
tm.assert_frame_equal(result, expected)
|
808 |
+
|
809 |
+
|
810 |
+
@pytest.mark.parametrize(
|
811 |
+
"ops, by_row, expected",
|
812 |
+
[
|
813 |
+
([lambda x: x + 1], "compat", DataFrame({("a", "<lambda>"): [2, 3]})),
|
814 |
+
([lambda x: x + 1], False, DataFrame({("a", "<lambda>"): [2, 3]})),
|
815 |
+
([lambda x: x.sum()], "compat", DataFrame({"a": [3]}, index=["<lambda>"])),
|
816 |
+
([lambda x: x.sum()], False, DataFrame({"a": [3]}, index=["<lambda>"])),
|
817 |
+
(
|
818 |
+
["sum", np.sum, lambda x: x.sum()],
|
819 |
+
"compat",
|
820 |
+
DataFrame({"a": [3, 3, 3]}, index=["sum", "sum", "<lambda>"]),
|
821 |
+
),
|
822 |
+
(
|
823 |
+
["sum", np.sum, lambda x: x.sum()],
|
824 |
+
False,
|
825 |
+
DataFrame({"a": [3, 3, 3]}, index=["sum", "sum", "<lambda>"]),
|
826 |
+
),
|
827 |
+
(
|
828 |
+
[lambda x: x + 1, lambda x: 3],
|
829 |
+
"compat",
|
830 |
+
DataFrame([[2, 3], [3, 3]], columns=[["a", "a"], ["<lambda>", "<lambda>"]]),
|
831 |
+
),
|
832 |
+
(
|
833 |
+
[lambda x: 2, lambda x: 3],
|
834 |
+
False,
|
835 |
+
DataFrame({"a": [2, 3]}, ["<lambda>", "<lambda>"]),
|
836 |
+
),
|
837 |
+
],
|
838 |
+
)
|
839 |
+
def test_listlike_lambda(ops, by_row, expected):
|
840 |
+
# GH53601
|
841 |
+
df = DataFrame({"a": [1, 2]})
|
842 |
+
result = df.apply(ops, by_row=by_row)
|
843 |
+
tm.assert_equal(result, expected)
|
844 |
+
|
845 |
+
|
846 |
+
@pytest.mark.parametrize(
|
847 |
+
"ops",
|
848 |
+
[
|
849 |
+
[lambda x: x + 1],
|
850 |
+
[lambda x: x.sum()],
|
851 |
+
["sum", np.sum, lambda x: x.sum()],
|
852 |
+
[lambda x: x + 1, lambda x: 3],
|
853 |
+
],
|
854 |
+
)
|
855 |
+
def test_listlike_lambda_raises(ops):
|
856 |
+
# GH53601
|
857 |
+
df = DataFrame({"a": [1, 2]})
|
858 |
+
with pytest.raises(ValueError, match="by_row=True not allowed"):
|
859 |
+
df.apply(ops, by_row=True)
|
860 |
+
|
861 |
+
|
862 |
+
def test_with_listlike_columns():
|
863 |
+
# GH 17348
|
864 |
+
df = DataFrame(
|
865 |
+
{
|
866 |
+
"a": Series(np.random.default_rng(2).standard_normal(4)),
|
867 |
+
"b": ["a", "list", "of", "words"],
|
868 |
+
"ts": date_range("2016-10-01", periods=4, freq="h"),
|
869 |
+
}
|
870 |
+
)
|
871 |
+
|
872 |
+
result = df[["a", "b"]].apply(tuple, axis=1)
|
873 |
+
expected = Series([t[1:] for t in df[["a", "b"]].itertuples()])
|
874 |
+
tm.assert_series_equal(result, expected)
|
875 |
+
|
876 |
+
result = df[["a", "ts"]].apply(tuple, axis=1)
|
877 |
+
expected = Series([t[1:] for t in df[["a", "ts"]].itertuples()])
|
878 |
+
tm.assert_series_equal(result, expected)
|
879 |
+
|
880 |
+
|
881 |
+
def test_with_listlike_columns_returning_list():
|
882 |
+
# GH 18919
|
883 |
+
df = DataFrame({"x": Series([["a", "b"], ["q"]]), "y": Series([["z"], ["q", "t"]])})
|
884 |
+
df.index = MultiIndex.from_tuples([("i0", "j0"), ("i1", "j1")])
|
885 |
+
|
886 |
+
result = df.apply(lambda row: [el for el in row["x"] if el in row["y"]], axis=1)
|
887 |
+
expected = Series([[], ["q"]], index=df.index)
|
888 |
+
tm.assert_series_equal(result, expected)
|
889 |
+
|
890 |
+
|
891 |
+
def test_infer_output_shape_columns():
|
892 |
+
# GH 18573
|
893 |
+
|
894 |
+
df = DataFrame(
|
895 |
+
{
|
896 |
+
"number": [1.0, 2.0],
|
897 |
+
"string": ["foo", "bar"],
|
898 |
+
"datetime": [
|
899 |
+
Timestamp("2017-11-29 03:30:00"),
|
900 |
+
Timestamp("2017-11-29 03:45:00"),
|
901 |
+
],
|
902 |
+
}
|
903 |
+
)
|
904 |
+
result = df.apply(lambda row: (row.number, row.string), axis=1)
|
905 |
+
expected = Series([(t.number, t.string) for t in df.itertuples()])
|
906 |
+
tm.assert_series_equal(result, expected)
|
907 |
+
|
908 |
+
|
909 |
+
def test_infer_output_shape_listlike_columns():
|
910 |
+
# GH 16353
|
911 |
+
|
912 |
+
df = DataFrame(
|
913 |
+
np.random.default_rng(2).standard_normal((6, 3)), columns=["A", "B", "C"]
|
914 |
+
)
|
915 |
+
|
916 |
+
result = df.apply(lambda x: [1, 2, 3], axis=1)
|
917 |
+
expected = Series([[1, 2, 3] for t in df.itertuples()])
|
918 |
+
tm.assert_series_equal(result, expected)
|
919 |
+
|
920 |
+
result = df.apply(lambda x: [1, 2], axis=1)
|
921 |
+
expected = Series([[1, 2] for t in df.itertuples()])
|
922 |
+
tm.assert_series_equal(result, expected)
|
923 |
+
|
924 |
+
|
925 |
+
@pytest.mark.parametrize("val", [1, 2])
|
926 |
+
def test_infer_output_shape_listlike_columns_np_func(val):
|
927 |
+
# GH 17970
|
928 |
+
df = DataFrame({"a": [1, 2, 3]}, index=list("abc"))
|
929 |
+
|
930 |
+
result = df.apply(lambda row: np.ones(val), axis=1)
|
931 |
+
expected = Series([np.ones(val) for t in df.itertuples()], index=df.index)
|
932 |
+
tm.assert_series_equal(result, expected)
|
933 |
+
|
934 |
+
|
935 |
+
def test_infer_output_shape_listlike_columns_with_timestamp():
|
936 |
+
# GH 17892
|
937 |
+
df = DataFrame(
|
938 |
+
{
|
939 |
+
"a": [
|
940 |
+
Timestamp("2010-02-01"),
|
941 |
+
Timestamp("2010-02-04"),
|
942 |
+
Timestamp("2010-02-05"),
|
943 |
+
Timestamp("2010-02-06"),
|
944 |
+
],
|
945 |
+
"b": [9, 5, 4, 3],
|
946 |
+
"c": [5, 3, 4, 2],
|
947 |
+
"d": [1, 2, 3, 4],
|
948 |
+
}
|
949 |
+
)
|
950 |
+
|
951 |
+
def fun(x):
|
952 |
+
return (1, 2)
|
953 |
+
|
954 |
+
result = df.apply(fun, axis=1)
|
955 |
+
expected = Series([(1, 2) for t in df.itertuples()])
|
956 |
+
tm.assert_series_equal(result, expected)
|
957 |
+
|
958 |
+
|
959 |
+
@pytest.mark.parametrize("lst", [[1, 2, 3], [1, 2]])
|
960 |
+
def test_consistent_coerce_for_shapes(lst):
|
961 |
+
# we want column names to NOT be propagated
|
962 |
+
# just because the shape matches the input shape
|
963 |
+
df = DataFrame(
|
964 |
+
np.random.default_rng(2).standard_normal((4, 3)), columns=["A", "B", "C"]
|
965 |
+
)
|
966 |
+
|
967 |
+
result = df.apply(lambda x: lst, axis=1)
|
968 |
+
expected = Series([lst for t in df.itertuples()])
|
969 |
+
tm.assert_series_equal(result, expected)
|
970 |
+
|
971 |
+
|
972 |
+
def test_consistent_names(int_frame_const_col):
|
973 |
+
# if a Series is returned, we should use the resulting index names
|
974 |
+
df = int_frame_const_col
|
975 |
+
|
976 |
+
result = df.apply(
|
977 |
+
lambda x: Series([1, 2, 3], index=["test", "other", "cols"]), axis=1
|
978 |
+
)
|
979 |
+
expected = int_frame_const_col.rename(
|
980 |
+
columns={"A": "test", "B": "other", "C": "cols"}
|
981 |
+
)
|
982 |
+
tm.assert_frame_equal(result, expected)
|
983 |
+
|
984 |
+
result = df.apply(lambda x: Series([1, 2], index=["test", "other"]), axis=1)
|
985 |
+
expected = expected[["test", "other"]]
|
986 |
+
tm.assert_frame_equal(result, expected)
|
987 |
+
|
988 |
+
|
989 |
+
def test_result_type(int_frame_const_col):
|
990 |
+
# result_type should be consistent no matter which
|
991 |
+
# path we take in the code
|
992 |
+
df = int_frame_const_col
|
993 |
+
|
994 |
+
result = df.apply(lambda x: [1, 2, 3], axis=1, result_type="expand")
|
995 |
+
expected = df.copy()
|
996 |
+
expected.columns = [0, 1, 2]
|
997 |
+
tm.assert_frame_equal(result, expected)
|
998 |
+
|
999 |
+
|
1000 |
+
def test_result_type_shorter_list(int_frame_const_col):
|
1001 |
+
# result_type should be consistent no matter which
|
1002 |
+
# path we take in the code
|
1003 |
+
df = int_frame_const_col
|
1004 |
+
result = df.apply(lambda x: [1, 2], axis=1, result_type="expand")
|
1005 |
+
expected = df[["A", "B"]].copy()
|
1006 |
+
expected.columns = [0, 1]
|
1007 |
+
tm.assert_frame_equal(result, expected)
|
1008 |
+
|
1009 |
+
|
1010 |
+
def test_result_type_broadcast(int_frame_const_col, request, engine):
|
1011 |
+
# result_type should be consistent no matter which
|
1012 |
+
# path we take in the code
|
1013 |
+
if engine == "numba":
|
1014 |
+
mark = pytest.mark.xfail(reason="numba engine doesn't support list return")
|
1015 |
+
request.node.add_marker(mark)
|
1016 |
+
df = int_frame_const_col
|
1017 |
+
# broadcast result
|
1018 |
+
result = df.apply(
|
1019 |
+
lambda x: [1, 2, 3], axis=1, result_type="broadcast", engine=engine
|
1020 |
+
)
|
1021 |
+
expected = df.copy()
|
1022 |
+
tm.assert_frame_equal(result, expected)
|
1023 |
+
|
1024 |
+
|
1025 |
+
def test_result_type_broadcast_series_func(int_frame_const_col, engine, request):
|
1026 |
+
# result_type should be consistent no matter which
|
1027 |
+
# path we take in the code
|
1028 |
+
if engine == "numba":
|
1029 |
+
mark = pytest.mark.xfail(
|
1030 |
+
reason="numba Series constructor only support ndarrays not list data"
|
1031 |
+
)
|
1032 |
+
request.node.add_marker(mark)
|
1033 |
+
df = int_frame_const_col
|
1034 |
+
columns = ["other", "col", "names"]
|
1035 |
+
result = df.apply(
|
1036 |
+
lambda x: Series([1, 2, 3], index=columns),
|
1037 |
+
axis=1,
|
1038 |
+
result_type="broadcast",
|
1039 |
+
engine=engine,
|
1040 |
+
)
|
1041 |
+
expected = df.copy()
|
1042 |
+
tm.assert_frame_equal(result, expected)
|
1043 |
+
|
1044 |
+
|
1045 |
+
def test_result_type_series_result(int_frame_const_col, engine, request):
|
1046 |
+
# result_type should be consistent no matter which
|
1047 |
+
# path we take in the code
|
1048 |
+
if engine == "numba":
|
1049 |
+
mark = pytest.mark.xfail(
|
1050 |
+
reason="numba Series constructor only support ndarrays not list data"
|
1051 |
+
)
|
1052 |
+
request.node.add_marker(mark)
|
1053 |
+
df = int_frame_const_col
|
1054 |
+
# series result
|
1055 |
+
result = df.apply(lambda x: Series([1, 2, 3], index=x.index), axis=1, engine=engine)
|
1056 |
+
expected = df.copy()
|
1057 |
+
tm.assert_frame_equal(result, expected)
|
1058 |
+
|
1059 |
+
|
1060 |
+
def test_result_type_series_result_other_index(int_frame_const_col, engine, request):
|
1061 |
+
# result_type should be consistent no matter which
|
1062 |
+
# path we take in the code
|
1063 |
+
|
1064 |
+
if engine == "numba":
|
1065 |
+
mark = pytest.mark.xfail(
|
1066 |
+
reason="no support in numba Series constructor for list of columns"
|
1067 |
+
)
|
1068 |
+
request.node.add_marker(mark)
|
1069 |
+
df = int_frame_const_col
|
1070 |
+
# series result with other index
|
1071 |
+
columns = ["other", "col", "names"]
|
1072 |
+
result = df.apply(lambda x: Series([1, 2, 3], index=columns), axis=1, engine=engine)
|
1073 |
+
expected = df.copy()
|
1074 |
+
expected.columns = columns
|
1075 |
+
tm.assert_frame_equal(result, expected)
|
1076 |
+
|
1077 |
+
|
1078 |
+
@pytest.mark.parametrize(
|
1079 |
+
"box",
|
1080 |
+
[lambda x: list(x), lambda x: tuple(x), lambda x: np.array(x, dtype="int64")],
|
1081 |
+
ids=["list", "tuple", "array"],
|
1082 |
+
)
|
1083 |
+
def test_consistency_for_boxed(box, int_frame_const_col):
|
1084 |
+
# passing an array or list should not affect the output shape
|
1085 |
+
df = int_frame_const_col
|
1086 |
+
|
1087 |
+
result = df.apply(lambda x: box([1, 2]), axis=1)
|
1088 |
+
expected = Series([box([1, 2]) for t in df.itertuples()])
|
1089 |
+
tm.assert_series_equal(result, expected)
|
1090 |
+
|
1091 |
+
result = df.apply(lambda x: box([1, 2]), axis=1, result_type="expand")
|
1092 |
+
expected = int_frame_const_col[["A", "B"]].rename(columns={"A": 0, "B": 1})
|
1093 |
+
tm.assert_frame_equal(result, expected)
|
1094 |
+
|
1095 |
+
|
1096 |
+
def test_agg_transform(axis, float_frame):
|
1097 |
+
other_axis = 1 if axis in {0, "index"} else 0
|
1098 |
+
|
1099 |
+
with np.errstate(all="ignore"):
|
1100 |
+
f_abs = np.abs(float_frame)
|
1101 |
+
f_sqrt = np.sqrt(float_frame)
|
1102 |
+
|
1103 |
+
# ufunc
|
1104 |
+
expected = f_sqrt.copy()
|
1105 |
+
result = float_frame.apply(np.sqrt, axis=axis)
|
1106 |
+
tm.assert_frame_equal(result, expected)
|
1107 |
+
|
1108 |
+
# list-like
|
1109 |
+
result = float_frame.apply([np.sqrt], axis=axis)
|
1110 |
+
expected = f_sqrt.copy()
|
1111 |
+
if axis in {0, "index"}:
|
1112 |
+
expected.columns = MultiIndex.from_product([float_frame.columns, ["sqrt"]])
|
1113 |
+
else:
|
1114 |
+
expected.index = MultiIndex.from_product([float_frame.index, ["sqrt"]])
|
1115 |
+
tm.assert_frame_equal(result, expected)
|
1116 |
+
|
1117 |
+
# multiple items in list
|
1118 |
+
# these are in the order as if we are applying both
|
1119 |
+
# functions per series and then concatting
|
1120 |
+
result = float_frame.apply([np.abs, np.sqrt], axis=axis)
|
1121 |
+
expected = zip_frames([f_abs, f_sqrt], axis=other_axis)
|
1122 |
+
if axis in {0, "index"}:
|
1123 |
+
expected.columns = MultiIndex.from_product(
|
1124 |
+
[float_frame.columns, ["absolute", "sqrt"]]
|
1125 |
+
)
|
1126 |
+
else:
|
1127 |
+
expected.index = MultiIndex.from_product(
|
1128 |
+
[float_frame.index, ["absolute", "sqrt"]]
|
1129 |
+
)
|
1130 |
+
tm.assert_frame_equal(result, expected)
|
1131 |
+
|
1132 |
+
|
1133 |
+
def test_demo():
|
1134 |
+
# demonstration tests
|
1135 |
+
df = DataFrame({"A": range(5), "B": 5})
|
1136 |
+
|
1137 |
+
result = df.agg(["min", "max"])
|
1138 |
+
expected = DataFrame(
|
1139 |
+
{"A": [0, 4], "B": [5, 5]}, columns=["A", "B"], index=["min", "max"]
|
1140 |
+
)
|
1141 |
+
tm.assert_frame_equal(result, expected)
|
1142 |
+
|
1143 |
+
|
1144 |
+
def test_demo_dict_agg():
|
1145 |
+
# demonstration tests
|
1146 |
+
df = DataFrame({"A": range(5), "B": 5})
|
1147 |
+
result = df.agg({"A": ["min", "max"], "B": ["sum", "max"]})
|
1148 |
+
expected = DataFrame(
|
1149 |
+
{"A": [4.0, 0.0, np.nan], "B": [5.0, np.nan, 25.0]},
|
1150 |
+
columns=["A", "B"],
|
1151 |
+
index=["max", "min", "sum"],
|
1152 |
+
)
|
1153 |
+
tm.assert_frame_equal(result.reindex_like(expected), expected)
|
1154 |
+
|
1155 |
+
|
1156 |
+
def test_agg_with_name_as_column_name():
|
1157 |
+
# GH 36212 - Column name is "name"
|
1158 |
+
data = {"name": ["foo", "bar"]}
|
1159 |
+
df = DataFrame(data)
|
1160 |
+
|
1161 |
+
# result's name should be None
|
1162 |
+
result = df.agg({"name": "count"})
|
1163 |
+
expected = Series({"name": 2})
|
1164 |
+
tm.assert_series_equal(result, expected)
|
1165 |
+
|
1166 |
+
# Check if name is still preserved when aggregating series instead
|
1167 |
+
result = df["name"].agg({"name": "count"})
|
1168 |
+
expected = Series({"name": 2}, name="name")
|
1169 |
+
tm.assert_series_equal(result, expected)
|
1170 |
+
|
1171 |
+
|
1172 |
+
def test_agg_multiple_mixed():
|
1173 |
+
# GH 20909
|
1174 |
+
mdf = DataFrame(
|
1175 |
+
{
|
1176 |
+
"A": [1, 2, 3],
|
1177 |
+
"B": [1.0, 2.0, 3.0],
|
1178 |
+
"C": ["foo", "bar", "baz"],
|
1179 |
+
}
|
1180 |
+
)
|
1181 |
+
expected = DataFrame(
|
1182 |
+
{
|
1183 |
+
"A": [1, 6],
|
1184 |
+
"B": [1.0, 6.0],
|
1185 |
+
"C": ["bar", "foobarbaz"],
|
1186 |
+
},
|
1187 |
+
index=["min", "sum"],
|
1188 |
+
)
|
1189 |
+
# sorted index
|
1190 |
+
result = mdf.agg(["min", "sum"])
|
1191 |
+
tm.assert_frame_equal(result, expected)
|
1192 |
+
|
1193 |
+
result = mdf[["C", "B", "A"]].agg(["sum", "min"])
|
1194 |
+
# GH40420: the result of .agg should have an index that is sorted
|
1195 |
+
# according to the arguments provided to agg.
|
1196 |
+
expected = expected[["C", "B", "A"]].reindex(["sum", "min"])
|
1197 |
+
tm.assert_frame_equal(result, expected)
|
1198 |
+
|
1199 |
+
|
1200 |
+
def test_agg_multiple_mixed_raises():
|
1201 |
+
# GH 20909
|
1202 |
+
mdf = DataFrame(
|
1203 |
+
{
|
1204 |
+
"A": [1, 2, 3],
|
1205 |
+
"B": [1.0, 2.0, 3.0],
|
1206 |
+
"C": ["foo", "bar", "baz"],
|
1207 |
+
"D": date_range("20130101", periods=3),
|
1208 |
+
}
|
1209 |
+
)
|
1210 |
+
|
1211 |
+
# sorted index
|
1212 |
+
msg = "does not support reduction"
|
1213 |
+
with pytest.raises(TypeError, match=msg):
|
1214 |
+
mdf.agg(["min", "sum"])
|
1215 |
+
|
1216 |
+
with pytest.raises(TypeError, match=msg):
|
1217 |
+
mdf[["D", "C", "B", "A"]].agg(["sum", "min"])
|
1218 |
+
|
1219 |
+
|
1220 |
+
def test_agg_reduce(axis, float_frame):
|
1221 |
+
other_axis = 1 if axis in {0, "index"} else 0
|
1222 |
+
name1, name2 = float_frame.axes[other_axis].unique()[:2].sort_values()
|
1223 |
+
|
1224 |
+
# all reducers
|
1225 |
+
expected = pd.concat(
|
1226 |
+
[
|
1227 |
+
float_frame.mean(axis=axis),
|
1228 |
+
float_frame.max(axis=axis),
|
1229 |
+
float_frame.sum(axis=axis),
|
1230 |
+
],
|
1231 |
+
axis=1,
|
1232 |
+
)
|
1233 |
+
expected.columns = ["mean", "max", "sum"]
|
1234 |
+
expected = expected.T if axis in {0, "index"} else expected
|
1235 |
+
|
1236 |
+
result = float_frame.agg(["mean", "max", "sum"], axis=axis)
|
1237 |
+
tm.assert_frame_equal(result, expected)
|
1238 |
+
|
1239 |
+
# dict input with scalars
|
1240 |
+
func = {name1: "mean", name2: "sum"}
|
1241 |
+
result = float_frame.agg(func, axis=axis)
|
1242 |
+
expected = Series(
|
1243 |
+
[
|
1244 |
+
float_frame.loc(other_axis)[name1].mean(),
|
1245 |
+
float_frame.loc(other_axis)[name2].sum(),
|
1246 |
+
],
|
1247 |
+
index=[name1, name2],
|
1248 |
+
)
|
1249 |
+
tm.assert_series_equal(result, expected)
|
1250 |
+
|
1251 |
+
# dict input with lists
|
1252 |
+
func = {name1: ["mean"], name2: ["sum"]}
|
1253 |
+
result = float_frame.agg(func, axis=axis)
|
1254 |
+
expected = DataFrame(
|
1255 |
+
{
|
1256 |
+
name1: Series([float_frame.loc(other_axis)[name1].mean()], index=["mean"]),
|
1257 |
+
name2: Series([float_frame.loc(other_axis)[name2].sum()], index=["sum"]),
|
1258 |
+
}
|
1259 |
+
)
|
1260 |
+
expected = expected.T if axis in {1, "columns"} else expected
|
1261 |
+
tm.assert_frame_equal(result, expected)
|
1262 |
+
|
1263 |
+
# dict input with lists with multiple
|
1264 |
+
func = {name1: ["mean", "sum"], name2: ["sum", "max"]}
|
1265 |
+
result = float_frame.agg(func, axis=axis)
|
1266 |
+
expected = pd.concat(
|
1267 |
+
{
|
1268 |
+
name1: Series(
|
1269 |
+
[
|
1270 |
+
float_frame.loc(other_axis)[name1].mean(),
|
1271 |
+
float_frame.loc(other_axis)[name1].sum(),
|
1272 |
+
],
|
1273 |
+
index=["mean", "sum"],
|
1274 |
+
),
|
1275 |
+
name2: Series(
|
1276 |
+
[
|
1277 |
+
float_frame.loc(other_axis)[name2].sum(),
|
1278 |
+
float_frame.loc(other_axis)[name2].max(),
|
1279 |
+
],
|
1280 |
+
index=["sum", "max"],
|
1281 |
+
),
|
1282 |
+
},
|
1283 |
+
axis=1,
|
1284 |
+
)
|
1285 |
+
expected = expected.T if axis in {1, "columns"} else expected
|
1286 |
+
tm.assert_frame_equal(result, expected)
|
1287 |
+
|
1288 |
+
|
1289 |
+
def test_nuiscance_columns():
|
1290 |
+
# GH 15015
|
1291 |
+
df = DataFrame(
|
1292 |
+
{
|
1293 |
+
"A": [1, 2, 3],
|
1294 |
+
"B": [1.0, 2.0, 3.0],
|
1295 |
+
"C": ["foo", "bar", "baz"],
|
1296 |
+
"D": date_range("20130101", periods=3),
|
1297 |
+
}
|
1298 |
+
)
|
1299 |
+
|
1300 |
+
result = df.agg("min")
|
1301 |
+
expected = Series([1, 1.0, "bar", Timestamp("20130101")], index=df.columns)
|
1302 |
+
tm.assert_series_equal(result, expected)
|
1303 |
+
|
1304 |
+
result = df.agg(["min"])
|
1305 |
+
expected = DataFrame(
|
1306 |
+
[[1, 1.0, "bar", Timestamp("20130101").as_unit("ns")]],
|
1307 |
+
index=["min"],
|
1308 |
+
columns=df.columns,
|
1309 |
+
)
|
1310 |
+
tm.assert_frame_equal(result, expected)
|
1311 |
+
|
1312 |
+
msg = "does not support reduction"
|
1313 |
+
with pytest.raises(TypeError, match=msg):
|
1314 |
+
df.agg("sum")
|
1315 |
+
|
1316 |
+
result = df[["A", "B", "C"]].agg("sum")
|
1317 |
+
expected = Series([6, 6.0, "foobarbaz"], index=["A", "B", "C"])
|
1318 |
+
tm.assert_series_equal(result, expected)
|
1319 |
+
|
1320 |
+
msg = "does not support reduction"
|
1321 |
+
with pytest.raises(TypeError, match=msg):
|
1322 |
+
df.agg(["sum"])
|
1323 |
+
|
1324 |
+
|
1325 |
+
@pytest.mark.parametrize("how", ["agg", "apply"])
|
1326 |
+
def test_non_callable_aggregates(how):
|
1327 |
+
# GH 16405
|
1328 |
+
# 'size' is a property of frame/series
|
1329 |
+
# validate that this is working
|
1330 |
+
# GH 39116 - expand to apply
|
1331 |
+
df = DataFrame(
|
1332 |
+
{"A": [None, 2, 3], "B": [1.0, np.nan, 3.0], "C": ["foo", None, "bar"]}
|
1333 |
+
)
|
1334 |
+
|
1335 |
+
# Function aggregate
|
1336 |
+
result = getattr(df, how)({"A": "count"})
|
1337 |
+
expected = Series({"A": 2})
|
1338 |
+
|
1339 |
+
tm.assert_series_equal(result, expected)
|
1340 |
+
|
1341 |
+
# Non-function aggregate
|
1342 |
+
result = getattr(df, how)({"A": "size"})
|
1343 |
+
expected = Series({"A": 3})
|
1344 |
+
|
1345 |
+
tm.assert_series_equal(result, expected)
|
1346 |
+
|
1347 |
+
# Mix function and non-function aggs
|
1348 |
+
result1 = getattr(df, how)(["count", "size"])
|
1349 |
+
result2 = getattr(df, how)(
|
1350 |
+
{"A": ["count", "size"], "B": ["count", "size"], "C": ["count", "size"]}
|
1351 |
+
)
|
1352 |
+
expected = DataFrame(
|
1353 |
+
{
|
1354 |
+
"A": {"count": 2, "size": 3},
|
1355 |
+
"B": {"count": 2, "size": 3},
|
1356 |
+
"C": {"count": 2, "size": 3},
|
1357 |
+
}
|
1358 |
+
)
|
1359 |
+
|
1360 |
+
tm.assert_frame_equal(result1, result2, check_like=True)
|
1361 |
+
tm.assert_frame_equal(result2, expected, check_like=True)
|
1362 |
+
|
1363 |
+
# Just functional string arg is same as calling df.arg()
|
1364 |
+
result = getattr(df, how)("count")
|
1365 |
+
expected = df.count()
|
1366 |
+
|
1367 |
+
tm.assert_series_equal(result, expected)
|
1368 |
+
|
1369 |
+
|
1370 |
+
@pytest.mark.parametrize("how", ["agg", "apply"])
|
1371 |
+
def test_size_as_str(how, axis):
|
1372 |
+
# GH 39934
|
1373 |
+
df = DataFrame(
|
1374 |
+
{"A": [None, 2, 3], "B": [1.0, np.nan, 3.0], "C": ["foo", None, "bar"]}
|
1375 |
+
)
|
1376 |
+
# Just a string attribute arg same as calling df.arg
|
1377 |
+
# on the columns
|
1378 |
+
result = getattr(df, how)("size", axis=axis)
|
1379 |
+
if axis in (0, "index"):
|
1380 |
+
expected = Series(df.shape[0], index=df.columns)
|
1381 |
+
else:
|
1382 |
+
expected = Series(df.shape[1], index=df.index)
|
1383 |
+
tm.assert_series_equal(result, expected)
|
1384 |
+
|
1385 |
+
|
1386 |
+
def test_agg_listlike_result():
|
1387 |
+
# GH-29587 user defined function returning list-likes
|
1388 |
+
df = DataFrame({"A": [2, 2, 3], "B": [1.5, np.nan, 1.5], "C": ["foo", None, "bar"]})
|
1389 |
+
|
1390 |
+
def func(group_col):
|
1391 |
+
return list(group_col.dropna().unique())
|
1392 |
+
|
1393 |
+
result = df.agg(func)
|
1394 |
+
expected = Series([[2, 3], [1.5], ["foo", "bar"]], index=["A", "B", "C"])
|
1395 |
+
tm.assert_series_equal(result, expected)
|
1396 |
+
|
1397 |
+
result = df.agg([func])
|
1398 |
+
expected = expected.to_frame("func").T
|
1399 |
+
tm.assert_frame_equal(result, expected)
|
1400 |
+
|
1401 |
+
|
1402 |
+
@pytest.mark.parametrize("axis", [0, 1])
|
1403 |
+
@pytest.mark.parametrize(
|
1404 |
+
"args, kwargs",
|
1405 |
+
[
|
1406 |
+
((1, 2, 3), {}),
|
1407 |
+
((8, 7, 15), {}),
|
1408 |
+
((1, 2), {}),
|
1409 |
+
((1,), {"b": 2}),
|
1410 |
+
((), {"a": 1, "b": 2}),
|
1411 |
+
((), {"a": 2, "b": 1}),
|
1412 |
+
((), {"a": 1, "b": 2, "c": 3}),
|
1413 |
+
],
|
1414 |
+
)
|
1415 |
+
def test_agg_args_kwargs(axis, args, kwargs):
|
1416 |
+
def f(x, a, b, c=3):
|
1417 |
+
return x.sum() + (a + b) / c
|
1418 |
+
|
1419 |
+
df = DataFrame([[1, 2], [3, 4]])
|
1420 |
+
|
1421 |
+
if axis == 0:
|
1422 |
+
expected = Series([5.0, 7.0])
|
1423 |
+
else:
|
1424 |
+
expected = Series([4.0, 8.0])
|
1425 |
+
|
1426 |
+
result = df.agg(f, axis, *args, **kwargs)
|
1427 |
+
|
1428 |
+
tm.assert_series_equal(result, expected)
|
1429 |
+
|
1430 |
+
|
1431 |
+
@pytest.mark.parametrize("num_cols", [2, 3, 5])
|
1432 |
+
def test_frequency_is_original(num_cols, engine, request):
|
1433 |
+
# GH 22150
|
1434 |
+
if engine == "numba":
|
1435 |
+
mark = pytest.mark.xfail(reason="numba engine only supports numeric indices")
|
1436 |
+
request.node.add_marker(mark)
|
1437 |
+
index = pd.DatetimeIndex(["1950-06-30", "1952-10-24", "1953-05-29"])
|
1438 |
+
original = index.copy()
|
1439 |
+
df = DataFrame(1, index=index, columns=range(num_cols))
|
1440 |
+
df.apply(lambda x: x, engine=engine)
|
1441 |
+
assert index.freq == original.freq
|
1442 |
+
|
1443 |
+
|
1444 |
+
def test_apply_datetime_tz_issue(engine, request):
|
1445 |
+
# GH 29052
|
1446 |
+
|
1447 |
+
if engine == "numba":
|
1448 |
+
mark = pytest.mark.xfail(
|
1449 |
+
reason="numba engine doesn't support non-numeric indexes"
|
1450 |
+
)
|
1451 |
+
request.node.add_marker(mark)
|
1452 |
+
|
1453 |
+
timestamps = [
|
1454 |
+
Timestamp("2019-03-15 12:34:31.909000+0000", tz="UTC"),
|
1455 |
+
Timestamp("2019-03-15 12:34:34.359000+0000", tz="UTC"),
|
1456 |
+
Timestamp("2019-03-15 12:34:34.660000+0000", tz="UTC"),
|
1457 |
+
]
|
1458 |
+
df = DataFrame(data=[0, 1, 2], index=timestamps)
|
1459 |
+
result = df.apply(lambda x: x.name, axis=1, engine=engine)
|
1460 |
+
expected = Series(index=timestamps, data=timestamps)
|
1461 |
+
|
1462 |
+
tm.assert_series_equal(result, expected)
|
1463 |
+
|
1464 |
+
|
1465 |
+
@pytest.mark.parametrize("df", [DataFrame({"A": ["a", None], "B": ["c", "d"]})])
|
1466 |
+
@pytest.mark.parametrize("method", ["min", "max", "sum"])
|
1467 |
+
def test_mixed_column_raises(df, method, using_infer_string):
|
1468 |
+
# GH 16832
|
1469 |
+
if method == "sum":
|
1470 |
+
msg = r'can only concatenate str \(not "int"\) to str|does not support'
|
1471 |
+
else:
|
1472 |
+
msg = "not supported between instances of 'str' and 'float'"
|
1473 |
+
if not using_infer_string:
|
1474 |
+
with pytest.raises(TypeError, match=msg):
|
1475 |
+
getattr(df, method)()
|
1476 |
+
else:
|
1477 |
+
getattr(df, method)()
|
1478 |
+
|
1479 |
+
|
1480 |
+
@pytest.mark.parametrize("col", [1, 1.0, True, "a", np.nan])
|
1481 |
+
def test_apply_dtype(col):
|
1482 |
+
# GH 31466
|
1483 |
+
df = DataFrame([[1.0, col]], columns=["a", "b"])
|
1484 |
+
result = df.apply(lambda x: x.dtype)
|
1485 |
+
expected = df.dtypes
|
1486 |
+
|
1487 |
+
tm.assert_series_equal(result, expected)
|
1488 |
+
|
1489 |
+
|
1490 |
+
def test_apply_mutating(using_array_manager, using_copy_on_write, warn_copy_on_write):
|
1491 |
+
# GH#35462 case where applied func pins a new BlockManager to a row
|
1492 |
+
df = DataFrame({"a": range(100), "b": range(100, 200)})
|
1493 |
+
df_orig = df.copy()
|
1494 |
+
|
1495 |
+
def func(row):
|
1496 |
+
mgr = row._mgr
|
1497 |
+
row.loc["a"] += 1
|
1498 |
+
assert row._mgr is not mgr
|
1499 |
+
return row
|
1500 |
+
|
1501 |
+
expected = df.copy()
|
1502 |
+
expected["a"] += 1
|
1503 |
+
|
1504 |
+
with tm.assert_cow_warning(warn_copy_on_write):
|
1505 |
+
result = df.apply(func, axis=1)
|
1506 |
+
|
1507 |
+
tm.assert_frame_equal(result, expected)
|
1508 |
+
if using_copy_on_write or using_array_manager:
|
1509 |
+
# INFO(CoW) With copy on write, mutating a viewing row doesn't mutate the parent
|
1510 |
+
# INFO(ArrayManager) With BlockManager, the row is a view and mutated in place,
|
1511 |
+
# with ArrayManager the row is not a view, and thus not mutated in place
|
1512 |
+
tm.assert_frame_equal(df, df_orig)
|
1513 |
+
else:
|
1514 |
+
tm.assert_frame_equal(df, result)
|
1515 |
+
|
1516 |
+
|
1517 |
+
def test_apply_empty_list_reduce():
|
1518 |
+
# GH#35683 get columns correct
|
1519 |
+
df = DataFrame([[1, 2], [3, 4], [5, 6], [7, 8], [9, 10]], columns=["a", "b"])
|
1520 |
+
|
1521 |
+
result = df.apply(lambda x: [], result_type="reduce")
|
1522 |
+
expected = Series({"a": [], "b": []}, dtype=object)
|
1523 |
+
tm.assert_series_equal(result, expected)
|
1524 |
+
|
1525 |
+
|
1526 |
+
def test_apply_no_suffix_index(engine, request):
|
1527 |
+
# GH36189
|
1528 |
+
if engine == "numba":
|
1529 |
+
mark = pytest.mark.xfail(
|
1530 |
+
reason="numba engine doesn't support list-likes/dict-like callables"
|
1531 |
+
)
|
1532 |
+
request.node.add_marker(mark)
|
1533 |
+
pdf = DataFrame([[4, 9]] * 3, columns=["A", "B"])
|
1534 |
+
result = pdf.apply(["sum", lambda x: x.sum(), lambda x: x.sum()], engine=engine)
|
1535 |
+
expected = DataFrame(
|
1536 |
+
{"A": [12, 12, 12], "B": [27, 27, 27]}, index=["sum", "<lambda>", "<lambda>"]
|
1537 |
+
)
|
1538 |
+
|
1539 |
+
tm.assert_frame_equal(result, expected)
|
1540 |
+
|
1541 |
+
|
1542 |
+
def test_apply_raw_returns_string(engine):
|
1543 |
+
# https://github.com/pandas-dev/pandas/issues/35940
|
1544 |
+
if engine == "numba":
|
1545 |
+
pytest.skip("No object dtype support in numba")
|
1546 |
+
df = DataFrame({"A": ["aa", "bbb"]})
|
1547 |
+
result = df.apply(lambda x: x[0], engine=engine, axis=1, raw=True)
|
1548 |
+
expected = Series(["aa", "bbb"])
|
1549 |
+
tm.assert_series_equal(result, expected)
|
1550 |
+
|
1551 |
+
|
1552 |
+
def test_aggregation_func_column_order():
|
1553 |
+
# GH40420: the result of .agg should have an index that is sorted
|
1554 |
+
# according to the arguments provided to agg.
|
1555 |
+
df = DataFrame(
|
1556 |
+
[
|
1557 |
+
(1, 0, 0),
|
1558 |
+
(2, 0, 0),
|
1559 |
+
(3, 0, 0),
|
1560 |
+
(4, 5, 4),
|
1561 |
+
(5, 6, 6),
|
1562 |
+
(6, 7, 7),
|
1563 |
+
],
|
1564 |
+
columns=("att1", "att2", "att3"),
|
1565 |
+
)
|
1566 |
+
|
1567 |
+
def sum_div2(s):
|
1568 |
+
return s.sum() / 2
|
1569 |
+
|
1570 |
+
aggs = ["sum", sum_div2, "count", "min"]
|
1571 |
+
result = df.agg(aggs)
|
1572 |
+
expected = DataFrame(
|
1573 |
+
{
|
1574 |
+
"att1": [21.0, 10.5, 6.0, 1.0],
|
1575 |
+
"att2": [18.0, 9.0, 6.0, 0.0],
|
1576 |
+
"att3": [17.0, 8.5, 6.0, 0.0],
|
1577 |
+
},
|
1578 |
+
index=["sum", "sum_div2", "count", "min"],
|
1579 |
+
)
|
1580 |
+
tm.assert_frame_equal(result, expected)
|
1581 |
+
|
1582 |
+
|
1583 |
+
def test_apply_getitem_axis_1(engine, request):
|
1584 |
+
# GH 13427
|
1585 |
+
if engine == "numba":
|
1586 |
+
mark = pytest.mark.xfail(
|
1587 |
+
reason="numba engine not supporting duplicate index values"
|
1588 |
+
)
|
1589 |
+
request.node.add_marker(mark)
|
1590 |
+
df = DataFrame({"a": [0, 1, 2], "b": [1, 2, 3]})
|
1591 |
+
result = df[["a", "a"]].apply(
|
1592 |
+
lambda x: x.iloc[0] + x.iloc[1], axis=1, engine=engine
|
1593 |
+
)
|
1594 |
+
expected = Series([0, 2, 4])
|
1595 |
+
tm.assert_series_equal(result, expected)
|
1596 |
+
|
1597 |
+
|
1598 |
+
def test_nuisance_depr_passes_through_warnings():
|
1599 |
+
# GH 43740
|
1600 |
+
# DataFrame.agg with list-likes may emit warnings for both individual
|
1601 |
+
# args and for entire columns, but we only want to emit once. We
|
1602 |
+
# catch and suppress the warnings for individual args, but need to make
|
1603 |
+
# sure if some other warnings were raised, they get passed through to
|
1604 |
+
# the user.
|
1605 |
+
|
1606 |
+
def expected_warning(x):
|
1607 |
+
warnings.warn("Hello, World!")
|
1608 |
+
return x.sum()
|
1609 |
+
|
1610 |
+
df = DataFrame({"a": [1, 2, 3]})
|
1611 |
+
with tm.assert_produces_warning(UserWarning, match="Hello, World!"):
|
1612 |
+
df.agg([expected_warning])
|
1613 |
+
|
1614 |
+
|
1615 |
+
def test_apply_type():
|
1616 |
+
# GH 46719
|
1617 |
+
df = DataFrame(
|
1618 |
+
{"col1": [3, "string", float], "col2": [0.25, datetime(2020, 1, 1), np.nan]},
|
1619 |
+
index=["a", "b", "c"],
|
1620 |
+
)
|
1621 |
+
|
1622 |
+
# axis=0
|
1623 |
+
result = df.apply(type, axis=0)
|
1624 |
+
expected = Series({"col1": Series, "col2": Series})
|
1625 |
+
tm.assert_series_equal(result, expected)
|
1626 |
+
|
1627 |
+
# axis=1
|
1628 |
+
result = df.apply(type, axis=1)
|
1629 |
+
expected = Series({"a": Series, "b": Series, "c": Series})
|
1630 |
+
tm.assert_series_equal(result, expected)
|
1631 |
+
|
1632 |
+
|
1633 |
+
def test_apply_on_empty_dataframe(engine):
|
1634 |
+
# GH 39111
|
1635 |
+
df = DataFrame({"a": [1, 2], "b": [3, 0]})
|
1636 |
+
result = df.head(0).apply(lambda x: max(x["a"], x["b"]), axis=1, engine=engine)
|
1637 |
+
expected = Series([], dtype=np.float64)
|
1638 |
+
tm.assert_series_equal(result, expected)
|
1639 |
+
|
1640 |
+
|
1641 |
+
def test_apply_return_list():
|
1642 |
+
df = DataFrame({"a": [1, 2], "b": [2, 3]})
|
1643 |
+
result = df.apply(lambda x: [x.values])
|
1644 |
+
expected = DataFrame({"a": [[1, 2]], "b": [[2, 3]]})
|
1645 |
+
tm.assert_frame_equal(result, expected)
|
1646 |
+
|
1647 |
+
|
1648 |
+
@pytest.mark.parametrize(
|
1649 |
+
"test, constant",
|
1650 |
+
[
|
1651 |
+
({"a": [1, 2, 3], "b": [1, 1, 1]}, {"a": [1, 2, 3], "b": [1]}),
|
1652 |
+
({"a": [2, 2, 2], "b": [1, 1, 1]}, {"a": [2], "b": [1]}),
|
1653 |
+
],
|
1654 |
+
)
|
1655 |
+
def test_unique_agg_type_is_series(test, constant):
|
1656 |
+
# GH#22558
|
1657 |
+
df1 = DataFrame(test)
|
1658 |
+
expected = Series(data=constant, index=["a", "b"], dtype="object")
|
1659 |
+
aggregation = {"a": "unique", "b": "unique"}
|
1660 |
+
|
1661 |
+
result = df1.agg(aggregation)
|
1662 |
+
|
1663 |
+
tm.assert_series_equal(result, expected)
|
1664 |
+
|
1665 |
+
|
1666 |
+
def test_any_apply_keyword_non_zero_axis_regression():
|
1667 |
+
# https://github.com/pandas-dev/pandas/issues/48656
|
1668 |
+
df = DataFrame({"A": [1, 2, 0], "B": [0, 2, 0], "C": [0, 0, 0]})
|
1669 |
+
expected = Series([True, True, False])
|
1670 |
+
tm.assert_series_equal(df.any(axis=1), expected)
|
1671 |
+
|
1672 |
+
result = df.apply("any", axis=1)
|
1673 |
+
tm.assert_series_equal(result, expected)
|
1674 |
+
|
1675 |
+
result = df.apply("any", 1)
|
1676 |
+
tm.assert_series_equal(result, expected)
|
1677 |
+
|
1678 |
+
|
1679 |
+
def test_agg_mapping_func_deprecated():
|
1680 |
+
# GH 53325
|
1681 |
+
df = DataFrame({"x": [1, 2, 3]})
|
1682 |
+
|
1683 |
+
def foo1(x, a=1, c=0):
|
1684 |
+
return x + a + c
|
1685 |
+
|
1686 |
+
def foo2(x, b=2, c=0):
|
1687 |
+
return x + b + c
|
1688 |
+
|
1689 |
+
# single func already takes the vectorized path
|
1690 |
+
result = df.agg(foo1, 0, 3, c=4)
|
1691 |
+
expected = df + 7
|
1692 |
+
tm.assert_frame_equal(result, expected)
|
1693 |
+
|
1694 |
+
msg = "using .+ in Series.agg cannot aggregate and"
|
1695 |
+
|
1696 |
+
with tm.assert_produces_warning(FutureWarning, match=msg):
|
1697 |
+
result = df.agg([foo1, foo2], 0, 3, c=4)
|
1698 |
+
expected = DataFrame(
|
1699 |
+
[[8, 8], [9, 9], [10, 10]], columns=[["x", "x"], ["foo1", "foo2"]]
|
1700 |
+
)
|
1701 |
+
tm.assert_frame_equal(result, expected)
|
1702 |
+
|
1703 |
+
# TODO: the result below is wrong, should be fixed (GH53325)
|
1704 |
+
with tm.assert_produces_warning(FutureWarning, match=msg):
|
1705 |
+
result = df.agg({"x": foo1}, 0, 3, c=4)
|
1706 |
+
expected = DataFrame([2, 3, 4], columns=["x"])
|
1707 |
+
tm.assert_frame_equal(result, expected)
|
1708 |
+
|
1709 |
+
|
1710 |
+
def test_agg_std():
|
1711 |
+
df = DataFrame(np.arange(6).reshape(3, 2), columns=["A", "B"])
|
1712 |
+
|
1713 |
+
with tm.assert_produces_warning(FutureWarning, match="using DataFrame.std"):
|
1714 |
+
result = df.agg(np.std)
|
1715 |
+
expected = Series({"A": 2.0, "B": 2.0}, dtype=float)
|
1716 |
+
tm.assert_series_equal(result, expected)
|
1717 |
+
|
1718 |
+
with tm.assert_produces_warning(FutureWarning, match="using Series.std"):
|
1719 |
+
result = df.agg([np.std])
|
1720 |
+
expected = DataFrame({"A": 2.0, "B": 2.0}, index=["std"])
|
1721 |
+
tm.assert_frame_equal(result, expected)
|
1722 |
+
|
1723 |
+
|
1724 |
+
def test_agg_dist_like_and_nonunique_columns():
|
1725 |
+
# GH#51099
|
1726 |
+
df = DataFrame(
|
1727 |
+
{"A": [None, 2, 3], "B": [1.0, np.nan, 3.0], "C": ["foo", None, "bar"]}
|
1728 |
+
)
|
1729 |
+
df.columns = ["A", "A", "C"]
|
1730 |
+
|
1731 |
+
result = df.agg({"A": "count"})
|
1732 |
+
expected = df["A"].count()
|
1733 |
+
tm.assert_series_equal(result, expected)
|
llmeval-env/lib/python3.10/site-packages/pandas/tests/apply/test_frame_apply_relabeling.py
ADDED
@@ -0,0 +1,113 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import numpy as np
|
2 |
+
import pytest
|
3 |
+
|
4 |
+
from pandas.compat.numpy import np_version_gte1p25
|
5 |
+
|
6 |
+
import pandas as pd
|
7 |
+
import pandas._testing as tm
|
8 |
+
|
9 |
+
|
10 |
+
def test_agg_relabel():
|
11 |
+
# GH 26513
|
12 |
+
df = pd.DataFrame({"A": [1, 2, 1, 2], "B": [1, 2, 3, 4], "C": [3, 4, 5, 6]})
|
13 |
+
|
14 |
+
# simplest case with one column, one func
|
15 |
+
result = df.agg(foo=("B", "sum"))
|
16 |
+
expected = pd.DataFrame({"B": [10]}, index=pd.Index(["foo"]))
|
17 |
+
tm.assert_frame_equal(result, expected)
|
18 |
+
|
19 |
+
# test on same column with different methods
|
20 |
+
result = df.agg(foo=("B", "sum"), bar=("B", "min"))
|
21 |
+
expected = pd.DataFrame({"B": [10, 1]}, index=pd.Index(["foo", "bar"]))
|
22 |
+
|
23 |
+
tm.assert_frame_equal(result, expected)
|
24 |
+
|
25 |
+
|
26 |
+
def test_agg_relabel_multi_columns_multi_methods():
|
27 |
+
# GH 26513, test on multiple columns with multiple methods
|
28 |
+
df = pd.DataFrame({"A": [1, 2, 1, 2], "B": [1, 2, 3, 4], "C": [3, 4, 5, 6]})
|
29 |
+
result = df.agg(
|
30 |
+
foo=("A", "sum"),
|
31 |
+
bar=("B", "mean"),
|
32 |
+
cat=("A", "min"),
|
33 |
+
dat=("B", "max"),
|
34 |
+
f=("A", "max"),
|
35 |
+
g=("C", "min"),
|
36 |
+
)
|
37 |
+
expected = pd.DataFrame(
|
38 |
+
{
|
39 |
+
"A": [6.0, np.nan, 1.0, np.nan, 2.0, np.nan],
|
40 |
+
"B": [np.nan, 2.5, np.nan, 4.0, np.nan, np.nan],
|
41 |
+
"C": [np.nan, np.nan, np.nan, np.nan, np.nan, 3.0],
|
42 |
+
},
|
43 |
+
index=pd.Index(["foo", "bar", "cat", "dat", "f", "g"]),
|
44 |
+
)
|
45 |
+
tm.assert_frame_equal(result, expected)
|
46 |
+
|
47 |
+
|
48 |
+
@pytest.mark.xfail(np_version_gte1p25, reason="name of min now equals name of np.min")
|
49 |
+
def test_agg_relabel_partial_functions():
|
50 |
+
# GH 26513, test on partial, functools or more complex cases
|
51 |
+
df = pd.DataFrame({"A": [1, 2, 1, 2], "B": [1, 2, 3, 4], "C": [3, 4, 5, 6]})
|
52 |
+
msg = "using Series.[mean|min]"
|
53 |
+
with tm.assert_produces_warning(FutureWarning, match=msg):
|
54 |
+
result = df.agg(foo=("A", np.mean), bar=("A", "mean"), cat=("A", min))
|
55 |
+
expected = pd.DataFrame(
|
56 |
+
{"A": [1.5, 1.5, 1.0]}, index=pd.Index(["foo", "bar", "cat"])
|
57 |
+
)
|
58 |
+
tm.assert_frame_equal(result, expected)
|
59 |
+
|
60 |
+
msg = "using Series.[mean|min|max|sum]"
|
61 |
+
with tm.assert_produces_warning(FutureWarning, match=msg):
|
62 |
+
result = df.agg(
|
63 |
+
foo=("A", min),
|
64 |
+
bar=("A", np.min),
|
65 |
+
cat=("B", max),
|
66 |
+
dat=("C", "min"),
|
67 |
+
f=("B", np.sum),
|
68 |
+
kk=("B", lambda x: min(x)),
|
69 |
+
)
|
70 |
+
expected = pd.DataFrame(
|
71 |
+
{
|
72 |
+
"A": [1.0, 1.0, np.nan, np.nan, np.nan, np.nan],
|
73 |
+
"B": [np.nan, np.nan, 4.0, np.nan, 10.0, 1.0],
|
74 |
+
"C": [np.nan, np.nan, np.nan, 3.0, np.nan, np.nan],
|
75 |
+
},
|
76 |
+
index=pd.Index(["foo", "bar", "cat", "dat", "f", "kk"]),
|
77 |
+
)
|
78 |
+
tm.assert_frame_equal(result, expected)
|
79 |
+
|
80 |
+
|
81 |
+
def test_agg_namedtuple():
|
82 |
+
# GH 26513
|
83 |
+
df = pd.DataFrame({"A": [0, 1], "B": [1, 2]})
|
84 |
+
result = df.agg(
|
85 |
+
foo=pd.NamedAgg("B", "sum"),
|
86 |
+
bar=pd.NamedAgg("B", "min"),
|
87 |
+
cat=pd.NamedAgg(column="B", aggfunc="count"),
|
88 |
+
fft=pd.NamedAgg("B", aggfunc="max"),
|
89 |
+
)
|
90 |
+
|
91 |
+
expected = pd.DataFrame(
|
92 |
+
{"B": [3, 1, 2, 2]}, index=pd.Index(["foo", "bar", "cat", "fft"])
|
93 |
+
)
|
94 |
+
tm.assert_frame_equal(result, expected)
|
95 |
+
|
96 |
+
result = df.agg(
|
97 |
+
foo=pd.NamedAgg("A", "min"),
|
98 |
+
bar=pd.NamedAgg(column="B", aggfunc="max"),
|
99 |
+
cat=pd.NamedAgg(column="A", aggfunc="max"),
|
100 |
+
)
|
101 |
+
expected = pd.DataFrame(
|
102 |
+
{"A": [0.0, np.nan, 1.0], "B": [np.nan, 2.0, np.nan]},
|
103 |
+
index=pd.Index(["foo", "bar", "cat"]),
|
104 |
+
)
|
105 |
+
tm.assert_frame_equal(result, expected)
|
106 |
+
|
107 |
+
|
108 |
+
def test_reconstruct_func():
|
109 |
+
# GH 28472, test to ensure reconstruct_func isn't moved;
|
110 |
+
# This method is used by other libraries (e.g. dask)
|
111 |
+
result = pd.core.apply.reconstruct_func("min")
|
112 |
+
expected = (False, "min", None, None)
|
113 |
+
tm.assert_equal(result, expected)
|
llmeval-env/lib/python3.10/site-packages/pandas/tests/apply/test_frame_transform.py
ADDED
@@ -0,0 +1,264 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import numpy as np
|
2 |
+
import pytest
|
3 |
+
|
4 |
+
from pandas import (
|
5 |
+
DataFrame,
|
6 |
+
MultiIndex,
|
7 |
+
Series,
|
8 |
+
)
|
9 |
+
import pandas._testing as tm
|
10 |
+
from pandas.tests.apply.common import frame_transform_kernels
|
11 |
+
from pandas.tests.frame.common import zip_frames
|
12 |
+
|
13 |
+
|
14 |
+
def unpack_obj(obj, klass, axis):
|
15 |
+
"""
|
16 |
+
Helper to ensure we have the right type of object for a test parametrized
|
17 |
+
over frame_or_series.
|
18 |
+
"""
|
19 |
+
if klass is not DataFrame:
|
20 |
+
obj = obj["A"]
|
21 |
+
if axis != 0:
|
22 |
+
pytest.skip(f"Test is only for DataFrame with axis={axis}")
|
23 |
+
return obj
|
24 |
+
|
25 |
+
|
26 |
+
def test_transform_ufunc(axis, float_frame, frame_or_series):
|
27 |
+
# GH 35964
|
28 |
+
obj = unpack_obj(float_frame, frame_or_series, axis)
|
29 |
+
|
30 |
+
with np.errstate(all="ignore"):
|
31 |
+
f_sqrt = np.sqrt(obj)
|
32 |
+
|
33 |
+
# ufunc
|
34 |
+
result = obj.transform(np.sqrt, axis=axis)
|
35 |
+
expected = f_sqrt
|
36 |
+
tm.assert_equal(result, expected)
|
37 |
+
|
38 |
+
|
39 |
+
@pytest.mark.parametrize(
|
40 |
+
"ops, names",
|
41 |
+
[
|
42 |
+
([np.sqrt], ["sqrt"]),
|
43 |
+
([np.abs, np.sqrt], ["absolute", "sqrt"]),
|
44 |
+
(np.array([np.sqrt]), ["sqrt"]),
|
45 |
+
(np.array([np.abs, np.sqrt]), ["absolute", "sqrt"]),
|
46 |
+
],
|
47 |
+
)
|
48 |
+
def test_transform_listlike(axis, float_frame, ops, names):
|
49 |
+
# GH 35964
|
50 |
+
other_axis = 1 if axis in {0, "index"} else 0
|
51 |
+
with np.errstate(all="ignore"):
|
52 |
+
expected = zip_frames([op(float_frame) for op in ops], axis=other_axis)
|
53 |
+
if axis in {0, "index"}:
|
54 |
+
expected.columns = MultiIndex.from_product([float_frame.columns, names])
|
55 |
+
else:
|
56 |
+
expected.index = MultiIndex.from_product([float_frame.index, names])
|
57 |
+
result = float_frame.transform(ops, axis=axis)
|
58 |
+
tm.assert_frame_equal(result, expected)
|
59 |
+
|
60 |
+
|
61 |
+
@pytest.mark.parametrize("ops", [[], np.array([])])
|
62 |
+
def test_transform_empty_listlike(float_frame, ops, frame_or_series):
|
63 |
+
obj = unpack_obj(float_frame, frame_or_series, 0)
|
64 |
+
|
65 |
+
with pytest.raises(ValueError, match="No transform functions were provided"):
|
66 |
+
obj.transform(ops)
|
67 |
+
|
68 |
+
|
69 |
+
def test_transform_listlike_func_with_args():
|
70 |
+
# GH 50624
|
71 |
+
df = DataFrame({"x": [1, 2, 3]})
|
72 |
+
|
73 |
+
def foo1(x, a=1, c=0):
|
74 |
+
return x + a + c
|
75 |
+
|
76 |
+
def foo2(x, b=2, c=0):
|
77 |
+
return x + b + c
|
78 |
+
|
79 |
+
msg = r"foo1\(\) got an unexpected keyword argument 'b'"
|
80 |
+
with pytest.raises(TypeError, match=msg):
|
81 |
+
df.transform([foo1, foo2], 0, 3, b=3, c=4)
|
82 |
+
|
83 |
+
result = df.transform([foo1, foo2], 0, 3, c=4)
|
84 |
+
expected = DataFrame(
|
85 |
+
[[8, 8], [9, 9], [10, 10]],
|
86 |
+
columns=MultiIndex.from_tuples([("x", "foo1"), ("x", "foo2")]),
|
87 |
+
)
|
88 |
+
tm.assert_frame_equal(result, expected)
|
89 |
+
|
90 |
+
|
91 |
+
@pytest.mark.parametrize("box", [dict, Series])
|
92 |
+
def test_transform_dictlike(axis, float_frame, box):
|
93 |
+
# GH 35964
|
94 |
+
if axis in (0, "index"):
|
95 |
+
e = float_frame.columns[0]
|
96 |
+
expected = float_frame[[e]].transform(np.abs)
|
97 |
+
else:
|
98 |
+
e = float_frame.index[0]
|
99 |
+
expected = float_frame.iloc[[0]].transform(np.abs)
|
100 |
+
result = float_frame.transform(box({e: np.abs}), axis=axis)
|
101 |
+
tm.assert_frame_equal(result, expected)
|
102 |
+
|
103 |
+
|
104 |
+
def test_transform_dictlike_mixed():
|
105 |
+
# GH 40018 - mix of lists and non-lists in values of a dictionary
|
106 |
+
df = DataFrame({"a": [1, 2], "b": [1, 4], "c": [1, 4]})
|
107 |
+
result = df.transform({"b": ["sqrt", "abs"], "c": "sqrt"})
|
108 |
+
expected = DataFrame(
|
109 |
+
[[1.0, 1, 1.0], [2.0, 4, 2.0]],
|
110 |
+
columns=MultiIndex([("b", "c"), ("sqrt", "abs")], [(0, 0, 1), (0, 1, 0)]),
|
111 |
+
)
|
112 |
+
tm.assert_frame_equal(result, expected)
|
113 |
+
|
114 |
+
|
115 |
+
@pytest.mark.parametrize(
|
116 |
+
"ops",
|
117 |
+
[
|
118 |
+
{},
|
119 |
+
{"A": []},
|
120 |
+
{"A": [], "B": "cumsum"},
|
121 |
+
{"A": "cumsum", "B": []},
|
122 |
+
{"A": [], "B": ["cumsum"]},
|
123 |
+
{"A": ["cumsum"], "B": []},
|
124 |
+
],
|
125 |
+
)
|
126 |
+
def test_transform_empty_dictlike(float_frame, ops, frame_or_series):
|
127 |
+
obj = unpack_obj(float_frame, frame_or_series, 0)
|
128 |
+
|
129 |
+
with pytest.raises(ValueError, match="No transform functions were provided"):
|
130 |
+
obj.transform(ops)
|
131 |
+
|
132 |
+
|
133 |
+
@pytest.mark.parametrize("use_apply", [True, False])
|
134 |
+
def test_transform_udf(axis, float_frame, use_apply, frame_or_series):
|
135 |
+
# GH 35964
|
136 |
+
obj = unpack_obj(float_frame, frame_or_series, axis)
|
137 |
+
|
138 |
+
# transform uses UDF either via apply or passing the entire DataFrame
|
139 |
+
def func(x):
|
140 |
+
# transform is using apply iff x is not a DataFrame
|
141 |
+
if use_apply == isinstance(x, frame_or_series):
|
142 |
+
# Force transform to fallback
|
143 |
+
raise ValueError
|
144 |
+
return x + 1
|
145 |
+
|
146 |
+
result = obj.transform(func, axis=axis)
|
147 |
+
expected = obj + 1
|
148 |
+
tm.assert_equal(result, expected)
|
149 |
+
|
150 |
+
|
151 |
+
wont_fail = ["ffill", "bfill", "fillna", "pad", "backfill", "shift"]
|
152 |
+
frame_kernels_raise = [x for x in frame_transform_kernels if x not in wont_fail]
|
153 |
+
|
154 |
+
|
155 |
+
@pytest.mark.parametrize("op", [*frame_kernels_raise, lambda x: x + 1])
|
156 |
+
def test_transform_bad_dtype(op, frame_or_series, request):
|
157 |
+
# GH 35964
|
158 |
+
if op == "ngroup":
|
159 |
+
request.applymarker(
|
160 |
+
pytest.mark.xfail(raises=ValueError, reason="ngroup not valid for NDFrame")
|
161 |
+
)
|
162 |
+
|
163 |
+
obj = DataFrame({"A": 3 * [object]}) # DataFrame that will fail on most transforms
|
164 |
+
obj = tm.get_obj(obj, frame_or_series)
|
165 |
+
error = TypeError
|
166 |
+
msg = "|".join(
|
167 |
+
[
|
168 |
+
"not supported between instances of 'type' and 'type'",
|
169 |
+
"unsupported operand type",
|
170 |
+
]
|
171 |
+
)
|
172 |
+
|
173 |
+
with pytest.raises(error, match=msg):
|
174 |
+
obj.transform(op)
|
175 |
+
with pytest.raises(error, match=msg):
|
176 |
+
obj.transform([op])
|
177 |
+
with pytest.raises(error, match=msg):
|
178 |
+
obj.transform({"A": op})
|
179 |
+
with pytest.raises(error, match=msg):
|
180 |
+
obj.transform({"A": [op]})
|
181 |
+
|
182 |
+
|
183 |
+
@pytest.mark.parametrize("op", frame_kernels_raise)
|
184 |
+
def test_transform_failure_typeerror(request, op):
|
185 |
+
# GH 35964
|
186 |
+
|
187 |
+
if op == "ngroup":
|
188 |
+
request.applymarker(
|
189 |
+
pytest.mark.xfail(raises=ValueError, reason="ngroup not valid for NDFrame")
|
190 |
+
)
|
191 |
+
|
192 |
+
# Using object makes most transform kernels fail
|
193 |
+
df = DataFrame({"A": 3 * [object], "B": [1, 2, 3]})
|
194 |
+
error = TypeError
|
195 |
+
msg = "|".join(
|
196 |
+
[
|
197 |
+
"not supported between instances of 'type' and 'type'",
|
198 |
+
"unsupported operand type",
|
199 |
+
]
|
200 |
+
)
|
201 |
+
|
202 |
+
with pytest.raises(error, match=msg):
|
203 |
+
df.transform([op])
|
204 |
+
|
205 |
+
with pytest.raises(error, match=msg):
|
206 |
+
df.transform({"A": op, "B": op})
|
207 |
+
|
208 |
+
with pytest.raises(error, match=msg):
|
209 |
+
df.transform({"A": [op], "B": [op]})
|
210 |
+
|
211 |
+
with pytest.raises(error, match=msg):
|
212 |
+
df.transform({"A": [op, "shift"], "B": [op]})
|
213 |
+
|
214 |
+
|
215 |
+
def test_transform_failure_valueerror():
|
216 |
+
# GH 40211
|
217 |
+
def op(x):
|
218 |
+
if np.sum(np.sum(x)) < 10:
|
219 |
+
raise ValueError
|
220 |
+
return x
|
221 |
+
|
222 |
+
df = DataFrame({"A": [1, 2, 3], "B": [400, 500, 600]})
|
223 |
+
msg = "Transform function failed"
|
224 |
+
|
225 |
+
with pytest.raises(ValueError, match=msg):
|
226 |
+
df.transform([op])
|
227 |
+
|
228 |
+
with pytest.raises(ValueError, match=msg):
|
229 |
+
df.transform({"A": op, "B": op})
|
230 |
+
|
231 |
+
with pytest.raises(ValueError, match=msg):
|
232 |
+
df.transform({"A": [op], "B": [op]})
|
233 |
+
|
234 |
+
with pytest.raises(ValueError, match=msg):
|
235 |
+
df.transform({"A": [op, "shift"], "B": [op]})
|
236 |
+
|
237 |
+
|
238 |
+
@pytest.mark.parametrize("use_apply", [True, False])
|
239 |
+
def test_transform_passes_args(use_apply, frame_or_series):
|
240 |
+
# GH 35964
|
241 |
+
# transform uses UDF either via apply or passing the entire DataFrame
|
242 |
+
expected_args = [1, 2]
|
243 |
+
expected_kwargs = {"c": 3}
|
244 |
+
|
245 |
+
def f(x, a, b, c):
|
246 |
+
# transform is using apply iff x is not a DataFrame
|
247 |
+
if use_apply == isinstance(x, frame_or_series):
|
248 |
+
# Force transform to fallback
|
249 |
+
raise ValueError
|
250 |
+
assert [a, b] == expected_args
|
251 |
+
assert c == expected_kwargs["c"]
|
252 |
+
return x
|
253 |
+
|
254 |
+
frame_or_series([1]).transform(f, 0, *expected_args, **expected_kwargs)
|
255 |
+
|
256 |
+
|
257 |
+
def test_transform_empty_dataframe():
|
258 |
+
# https://github.com/pandas-dev/pandas/issues/39636
|
259 |
+
df = DataFrame([], columns=["col1", "col2"])
|
260 |
+
result = df.transform(lambda x: x + 10)
|
261 |
+
tm.assert_frame_equal(result, df)
|
262 |
+
|
263 |
+
result = df["col1"].transform(lambda x: x + 10)
|
264 |
+
tm.assert_series_equal(result, df["col1"])
|
llmeval-env/lib/python3.10/site-packages/pandas/tests/apply/test_invalid_arg.py
ADDED
@@ -0,0 +1,361 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Tests specifically aimed at detecting bad arguments.
|
2 |
+
# This file is organized by reason for exception.
|
3 |
+
# 1. always invalid argument values
|
4 |
+
# 2. missing column(s)
|
5 |
+
# 3. incompatible ops/dtype/args/kwargs
|
6 |
+
# 4. invalid result shape/type
|
7 |
+
# If your test does not fit into one of these categories, add to this list.
|
8 |
+
|
9 |
+
from itertools import chain
|
10 |
+
import re
|
11 |
+
|
12 |
+
import numpy as np
|
13 |
+
import pytest
|
14 |
+
|
15 |
+
from pandas.errors import SpecificationError
|
16 |
+
|
17 |
+
from pandas import (
|
18 |
+
DataFrame,
|
19 |
+
Series,
|
20 |
+
date_range,
|
21 |
+
)
|
22 |
+
import pandas._testing as tm
|
23 |
+
|
24 |
+
|
25 |
+
@pytest.mark.parametrize("result_type", ["foo", 1])
|
26 |
+
def test_result_type_error(result_type):
|
27 |
+
# allowed result_type
|
28 |
+
df = DataFrame(
|
29 |
+
np.tile(np.arange(3, dtype="int64"), 6).reshape(6, -1) + 1,
|
30 |
+
columns=["A", "B", "C"],
|
31 |
+
)
|
32 |
+
|
33 |
+
msg = (
|
34 |
+
"invalid value for result_type, must be one of "
|
35 |
+
"{None, 'reduce', 'broadcast', 'expand'}"
|
36 |
+
)
|
37 |
+
with pytest.raises(ValueError, match=msg):
|
38 |
+
df.apply(lambda x: [1, 2, 3], axis=1, result_type=result_type)
|
39 |
+
|
40 |
+
|
41 |
+
def test_apply_invalid_axis_value():
|
42 |
+
df = DataFrame([[1, 2, 3], [4, 5, 6], [7, 8, 9]], index=["a", "a", "c"])
|
43 |
+
msg = "No axis named 2 for object type DataFrame"
|
44 |
+
with pytest.raises(ValueError, match=msg):
|
45 |
+
df.apply(lambda x: x, 2)
|
46 |
+
|
47 |
+
|
48 |
+
def test_agg_raises():
|
49 |
+
# GH 26513
|
50 |
+
df = DataFrame({"A": [0, 1], "B": [1, 2]})
|
51 |
+
msg = "Must provide"
|
52 |
+
|
53 |
+
with pytest.raises(TypeError, match=msg):
|
54 |
+
df.agg()
|
55 |
+
|
56 |
+
|
57 |
+
def test_map_with_invalid_na_action_raises():
|
58 |
+
# https://github.com/pandas-dev/pandas/issues/32815
|
59 |
+
s = Series([1, 2, 3])
|
60 |
+
msg = "na_action must either be 'ignore' or None"
|
61 |
+
with pytest.raises(ValueError, match=msg):
|
62 |
+
s.map(lambda x: x, na_action="____")
|
63 |
+
|
64 |
+
|
65 |
+
@pytest.mark.parametrize("input_na_action", ["____", True])
|
66 |
+
def test_map_arg_is_dict_with_invalid_na_action_raises(input_na_action):
|
67 |
+
# https://github.com/pandas-dev/pandas/issues/46588
|
68 |
+
s = Series([1, 2, 3])
|
69 |
+
msg = f"na_action must either be 'ignore' or None, {input_na_action} was passed"
|
70 |
+
with pytest.raises(ValueError, match=msg):
|
71 |
+
s.map({1: 2}, na_action=input_na_action)
|
72 |
+
|
73 |
+
|
74 |
+
@pytest.mark.parametrize("method", ["apply", "agg", "transform"])
|
75 |
+
@pytest.mark.parametrize("func", [{"A": {"B": "sum"}}, {"A": {"B": ["sum"]}}])
|
76 |
+
def test_nested_renamer(frame_or_series, method, func):
|
77 |
+
# GH 35964
|
78 |
+
obj = frame_or_series({"A": [1]})
|
79 |
+
match = "nested renamer is not supported"
|
80 |
+
with pytest.raises(SpecificationError, match=match):
|
81 |
+
getattr(obj, method)(func)
|
82 |
+
|
83 |
+
|
84 |
+
@pytest.mark.parametrize(
|
85 |
+
"renamer",
|
86 |
+
[{"foo": ["min", "max"]}, {"foo": ["min", "max"], "bar": ["sum", "mean"]}],
|
87 |
+
)
|
88 |
+
def test_series_nested_renamer(renamer):
|
89 |
+
s = Series(range(6), dtype="int64", name="series")
|
90 |
+
msg = "nested renamer is not supported"
|
91 |
+
with pytest.raises(SpecificationError, match=msg):
|
92 |
+
s.agg(renamer)
|
93 |
+
|
94 |
+
|
95 |
+
def test_apply_dict_depr():
|
96 |
+
tsdf = DataFrame(
|
97 |
+
np.random.default_rng(2).standard_normal((10, 3)),
|
98 |
+
columns=["A", "B", "C"],
|
99 |
+
index=date_range("1/1/2000", periods=10),
|
100 |
+
)
|
101 |
+
msg = "nested renamer is not supported"
|
102 |
+
with pytest.raises(SpecificationError, match=msg):
|
103 |
+
tsdf.A.agg({"foo": ["sum", "mean"]})
|
104 |
+
|
105 |
+
|
106 |
+
@pytest.mark.parametrize("method", ["agg", "transform"])
|
107 |
+
def test_dict_nested_renaming_depr(method):
|
108 |
+
df = DataFrame({"A": range(5), "B": 5})
|
109 |
+
|
110 |
+
# nested renaming
|
111 |
+
msg = r"nested renamer is not supported"
|
112 |
+
with pytest.raises(SpecificationError, match=msg):
|
113 |
+
getattr(df, method)({"A": {"foo": "min"}, "B": {"bar": "max"}})
|
114 |
+
|
115 |
+
|
116 |
+
@pytest.mark.parametrize("method", ["apply", "agg", "transform"])
|
117 |
+
@pytest.mark.parametrize("func", [{"B": "sum"}, {"B": ["sum"]}])
|
118 |
+
def test_missing_column(method, func):
|
119 |
+
# GH 40004
|
120 |
+
obj = DataFrame({"A": [1]})
|
121 |
+
match = re.escape("Column(s) ['B'] do not exist")
|
122 |
+
with pytest.raises(KeyError, match=match):
|
123 |
+
getattr(obj, method)(func)
|
124 |
+
|
125 |
+
|
126 |
+
def test_transform_mixed_column_name_dtypes():
|
127 |
+
# GH39025
|
128 |
+
df = DataFrame({"a": ["1"]})
|
129 |
+
msg = r"Column\(s\) \[1, 'b'\] do not exist"
|
130 |
+
with pytest.raises(KeyError, match=msg):
|
131 |
+
df.transform({"a": int, 1: str, "b": int})
|
132 |
+
|
133 |
+
|
134 |
+
@pytest.mark.parametrize(
|
135 |
+
"how, args", [("pct_change", ()), ("nsmallest", (1, ["a", "b"])), ("tail", 1)]
|
136 |
+
)
|
137 |
+
def test_apply_str_axis_1_raises(how, args):
|
138 |
+
# GH 39211 - some ops don't support axis=1
|
139 |
+
df = DataFrame({"a": [1, 2], "b": [3, 4]})
|
140 |
+
msg = f"Operation {how} does not support axis=1"
|
141 |
+
with pytest.raises(ValueError, match=msg):
|
142 |
+
df.apply(how, axis=1, args=args)
|
143 |
+
|
144 |
+
|
145 |
+
def test_transform_axis_1_raises():
|
146 |
+
# GH 35964
|
147 |
+
msg = "No axis named 1 for object type Series"
|
148 |
+
with pytest.raises(ValueError, match=msg):
|
149 |
+
Series([1]).transform("sum", axis=1)
|
150 |
+
|
151 |
+
|
152 |
+
def test_apply_modify_traceback():
|
153 |
+
data = DataFrame(
|
154 |
+
{
|
155 |
+
"A": [
|
156 |
+
"foo",
|
157 |
+
"foo",
|
158 |
+
"foo",
|
159 |
+
"foo",
|
160 |
+
"bar",
|
161 |
+
"bar",
|
162 |
+
"bar",
|
163 |
+
"bar",
|
164 |
+
"foo",
|
165 |
+
"foo",
|
166 |
+
"foo",
|
167 |
+
],
|
168 |
+
"B": [
|
169 |
+
"one",
|
170 |
+
"one",
|
171 |
+
"one",
|
172 |
+
"two",
|
173 |
+
"one",
|
174 |
+
"one",
|
175 |
+
"one",
|
176 |
+
"two",
|
177 |
+
"two",
|
178 |
+
"two",
|
179 |
+
"one",
|
180 |
+
],
|
181 |
+
"C": [
|
182 |
+
"dull",
|
183 |
+
"dull",
|
184 |
+
"shiny",
|
185 |
+
"dull",
|
186 |
+
"dull",
|
187 |
+
"shiny",
|
188 |
+
"shiny",
|
189 |
+
"dull",
|
190 |
+
"shiny",
|
191 |
+
"shiny",
|
192 |
+
"shiny",
|
193 |
+
],
|
194 |
+
"D": np.random.default_rng(2).standard_normal(11),
|
195 |
+
"E": np.random.default_rng(2).standard_normal(11),
|
196 |
+
"F": np.random.default_rng(2).standard_normal(11),
|
197 |
+
}
|
198 |
+
)
|
199 |
+
|
200 |
+
data.loc[4, "C"] = np.nan
|
201 |
+
|
202 |
+
def transform(row):
|
203 |
+
if row["C"].startswith("shin") and row["A"] == "foo":
|
204 |
+
row["D"] = 7
|
205 |
+
return row
|
206 |
+
|
207 |
+
msg = "'float' object has no attribute 'startswith'"
|
208 |
+
with pytest.raises(AttributeError, match=msg):
|
209 |
+
data.apply(transform, axis=1)
|
210 |
+
|
211 |
+
|
212 |
+
@pytest.mark.parametrize(
|
213 |
+
"df, func, expected",
|
214 |
+
tm.get_cython_table_params(
|
215 |
+
DataFrame([["a", "b"], ["b", "a"]]), [["cumprod", TypeError]]
|
216 |
+
),
|
217 |
+
)
|
218 |
+
def test_agg_cython_table_raises_frame(df, func, expected, axis, using_infer_string):
|
219 |
+
# GH 21224
|
220 |
+
if using_infer_string:
|
221 |
+
import pyarrow as pa
|
222 |
+
|
223 |
+
expected = (expected, pa.lib.ArrowNotImplementedError)
|
224 |
+
|
225 |
+
msg = "can't multiply sequence by non-int of type 'str'|has no kernel"
|
226 |
+
warn = None if isinstance(func, str) else FutureWarning
|
227 |
+
with pytest.raises(expected, match=msg):
|
228 |
+
with tm.assert_produces_warning(warn, match="using DataFrame.cumprod"):
|
229 |
+
df.agg(func, axis=axis)
|
230 |
+
|
231 |
+
|
232 |
+
@pytest.mark.parametrize(
|
233 |
+
"series, func, expected",
|
234 |
+
chain(
|
235 |
+
tm.get_cython_table_params(
|
236 |
+
Series("a b c".split()),
|
237 |
+
[
|
238 |
+
("mean", TypeError), # mean raises TypeError
|
239 |
+
("prod", TypeError),
|
240 |
+
("std", TypeError),
|
241 |
+
("var", TypeError),
|
242 |
+
("median", TypeError),
|
243 |
+
("cumprod", TypeError),
|
244 |
+
],
|
245 |
+
)
|
246 |
+
),
|
247 |
+
)
|
248 |
+
def test_agg_cython_table_raises_series(series, func, expected, using_infer_string):
|
249 |
+
# GH21224
|
250 |
+
msg = r"[Cc]ould not convert|can't multiply sequence by non-int of type"
|
251 |
+
if func == "median" or func is np.nanmedian or func is np.median:
|
252 |
+
msg = r"Cannot convert \['a' 'b' 'c'\] to numeric"
|
253 |
+
|
254 |
+
if using_infer_string:
|
255 |
+
import pyarrow as pa
|
256 |
+
|
257 |
+
expected = (expected, pa.lib.ArrowNotImplementedError)
|
258 |
+
|
259 |
+
msg = msg + "|does not support|has no kernel"
|
260 |
+
warn = None if isinstance(func, str) else FutureWarning
|
261 |
+
|
262 |
+
with pytest.raises(expected, match=msg):
|
263 |
+
# e.g. Series('a b'.split()).cumprod() will raise
|
264 |
+
with tm.assert_produces_warning(warn, match="is currently using Series.*"):
|
265 |
+
series.agg(func)
|
266 |
+
|
267 |
+
|
268 |
+
def test_agg_none_to_type():
|
269 |
+
# GH 40543
|
270 |
+
df = DataFrame({"a": [None]})
|
271 |
+
msg = re.escape("int() argument must be a string")
|
272 |
+
with pytest.raises(TypeError, match=msg):
|
273 |
+
df.agg({"a": lambda x: int(x.iloc[0])})
|
274 |
+
|
275 |
+
|
276 |
+
def test_transform_none_to_type():
|
277 |
+
# GH#34377
|
278 |
+
df = DataFrame({"a": [None]})
|
279 |
+
msg = "argument must be a"
|
280 |
+
with pytest.raises(TypeError, match=msg):
|
281 |
+
df.transform({"a": lambda x: int(x.iloc[0])})
|
282 |
+
|
283 |
+
|
284 |
+
@pytest.mark.parametrize(
|
285 |
+
"func",
|
286 |
+
[
|
287 |
+
lambda x: np.array([1, 2]).reshape(-1, 2),
|
288 |
+
lambda x: [1, 2],
|
289 |
+
lambda x: Series([1, 2]),
|
290 |
+
],
|
291 |
+
)
|
292 |
+
def test_apply_broadcast_error(func):
|
293 |
+
df = DataFrame(
|
294 |
+
np.tile(np.arange(3, dtype="int64"), 6).reshape(6, -1) + 1,
|
295 |
+
columns=["A", "B", "C"],
|
296 |
+
)
|
297 |
+
|
298 |
+
# > 1 ndim
|
299 |
+
msg = "too many dims to broadcast|cannot broadcast result"
|
300 |
+
with pytest.raises(ValueError, match=msg):
|
301 |
+
df.apply(func, axis=1, result_type="broadcast")
|
302 |
+
|
303 |
+
|
304 |
+
def test_transform_and_agg_err_agg(axis, float_frame):
|
305 |
+
# cannot both transform and agg
|
306 |
+
msg = "cannot combine transform and aggregation operations"
|
307 |
+
with pytest.raises(ValueError, match=msg):
|
308 |
+
with np.errstate(all="ignore"):
|
309 |
+
float_frame.agg(["max", "sqrt"], axis=axis)
|
310 |
+
|
311 |
+
|
312 |
+
@pytest.mark.filterwarnings("ignore::FutureWarning") # GH53325
|
313 |
+
@pytest.mark.parametrize(
|
314 |
+
"func, msg",
|
315 |
+
[
|
316 |
+
(["sqrt", "max"], "cannot combine transform and aggregation"),
|
317 |
+
(
|
318 |
+
{"foo": np.sqrt, "bar": "sum"},
|
319 |
+
"cannot perform both aggregation and transformation",
|
320 |
+
),
|
321 |
+
],
|
322 |
+
)
|
323 |
+
def test_transform_and_agg_err_series(string_series, func, msg):
|
324 |
+
# we are trying to transform with an aggregator
|
325 |
+
with pytest.raises(ValueError, match=msg):
|
326 |
+
with np.errstate(all="ignore"):
|
327 |
+
string_series.agg(func)
|
328 |
+
|
329 |
+
|
330 |
+
@pytest.mark.parametrize("func", [["max", "min"], ["max", "sqrt"]])
|
331 |
+
def test_transform_wont_agg_frame(axis, float_frame, func):
|
332 |
+
# GH 35964
|
333 |
+
# cannot both transform and agg
|
334 |
+
msg = "Function did not transform"
|
335 |
+
with pytest.raises(ValueError, match=msg):
|
336 |
+
float_frame.transform(func, axis=axis)
|
337 |
+
|
338 |
+
|
339 |
+
@pytest.mark.parametrize("func", [["min", "max"], ["sqrt", "max"]])
|
340 |
+
def test_transform_wont_agg_series(string_series, func):
|
341 |
+
# GH 35964
|
342 |
+
# we are trying to transform with an aggregator
|
343 |
+
msg = "Function did not transform"
|
344 |
+
|
345 |
+
with pytest.raises(ValueError, match=msg):
|
346 |
+
string_series.transform(func)
|
347 |
+
|
348 |
+
|
349 |
+
@pytest.mark.parametrize(
|
350 |
+
"op_wrapper", [lambda x: x, lambda x: [x], lambda x: {"A": x}, lambda x: {"A": [x]}]
|
351 |
+
)
|
352 |
+
def test_transform_reducer_raises(all_reductions, frame_or_series, op_wrapper):
|
353 |
+
# GH 35964
|
354 |
+
op = op_wrapper(all_reductions)
|
355 |
+
|
356 |
+
obj = DataFrame({"A": [1, 2, 3]})
|
357 |
+
obj = tm.get_obj(obj, frame_or_series)
|
358 |
+
|
359 |
+
msg = "Function did not transform"
|
360 |
+
with pytest.raises(ValueError, match=msg):
|
361 |
+
obj.transform(op)
|
llmeval-env/lib/python3.10/site-packages/pandas/tests/apply/test_numba.py
ADDED
@@ -0,0 +1,118 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import numpy as np
|
2 |
+
import pytest
|
3 |
+
|
4 |
+
import pandas.util._test_decorators as td
|
5 |
+
|
6 |
+
from pandas import (
|
7 |
+
DataFrame,
|
8 |
+
Index,
|
9 |
+
)
|
10 |
+
import pandas._testing as tm
|
11 |
+
|
12 |
+
pytestmark = [td.skip_if_no("numba"), pytest.mark.single_cpu]
|
13 |
+
|
14 |
+
|
15 |
+
@pytest.fixture(params=[0, 1])
|
16 |
+
def apply_axis(request):
|
17 |
+
return request.param
|
18 |
+
|
19 |
+
|
20 |
+
def test_numba_vs_python_noop(float_frame, apply_axis):
|
21 |
+
func = lambda x: x
|
22 |
+
result = float_frame.apply(func, engine="numba", axis=apply_axis)
|
23 |
+
expected = float_frame.apply(func, engine="python", axis=apply_axis)
|
24 |
+
tm.assert_frame_equal(result, expected)
|
25 |
+
|
26 |
+
|
27 |
+
def test_numba_vs_python_string_index():
|
28 |
+
# GH#56189
|
29 |
+
pytest.importorskip("pyarrow")
|
30 |
+
df = DataFrame(
|
31 |
+
1,
|
32 |
+
index=Index(["a", "b"], dtype="string[pyarrow_numpy]"),
|
33 |
+
columns=Index(["x", "y"], dtype="string[pyarrow_numpy]"),
|
34 |
+
)
|
35 |
+
func = lambda x: x
|
36 |
+
result = df.apply(func, engine="numba", axis=0)
|
37 |
+
expected = df.apply(func, engine="python", axis=0)
|
38 |
+
tm.assert_frame_equal(
|
39 |
+
result, expected, check_column_type=False, check_index_type=False
|
40 |
+
)
|
41 |
+
|
42 |
+
|
43 |
+
def test_numba_vs_python_indexing():
|
44 |
+
frame = DataFrame(
|
45 |
+
{"a": [1, 2, 3], "b": [4, 5, 6], "c": [7.0, 8.0, 9.0]},
|
46 |
+
index=Index(["A", "B", "C"]),
|
47 |
+
)
|
48 |
+
row_func = lambda x: x["c"]
|
49 |
+
result = frame.apply(row_func, engine="numba", axis=1)
|
50 |
+
expected = frame.apply(row_func, engine="python", axis=1)
|
51 |
+
tm.assert_series_equal(result, expected)
|
52 |
+
|
53 |
+
col_func = lambda x: x["A"]
|
54 |
+
result = frame.apply(col_func, engine="numba", axis=0)
|
55 |
+
expected = frame.apply(col_func, engine="python", axis=0)
|
56 |
+
tm.assert_series_equal(result, expected)
|
57 |
+
|
58 |
+
|
59 |
+
@pytest.mark.parametrize(
|
60 |
+
"reduction",
|
61 |
+
[lambda x: x.mean(), lambda x: x.min(), lambda x: x.max(), lambda x: x.sum()],
|
62 |
+
)
|
63 |
+
def test_numba_vs_python_reductions(reduction, apply_axis):
|
64 |
+
df = DataFrame(np.ones((4, 4), dtype=np.float64))
|
65 |
+
result = df.apply(reduction, engine="numba", axis=apply_axis)
|
66 |
+
expected = df.apply(reduction, engine="python", axis=apply_axis)
|
67 |
+
tm.assert_series_equal(result, expected)
|
68 |
+
|
69 |
+
|
70 |
+
@pytest.mark.parametrize("colnames", [[1, 2, 3], [1.0, 2.0, 3.0]])
|
71 |
+
def test_numba_numeric_colnames(colnames):
|
72 |
+
# Check that numeric column names lower properly and can be indxed on
|
73 |
+
df = DataFrame(
|
74 |
+
np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]], dtype=np.int64), columns=colnames
|
75 |
+
)
|
76 |
+
first_col = colnames[0]
|
77 |
+
f = lambda x: x[first_col] # Get the first column
|
78 |
+
result = df.apply(f, engine="numba", axis=1)
|
79 |
+
expected = df.apply(f, engine="python", axis=1)
|
80 |
+
tm.assert_series_equal(result, expected)
|
81 |
+
|
82 |
+
|
83 |
+
def test_numba_parallel_unsupported(float_frame):
|
84 |
+
f = lambda x: x
|
85 |
+
with pytest.raises(
|
86 |
+
NotImplementedError,
|
87 |
+
match="Parallel apply is not supported when raw=False and engine='numba'",
|
88 |
+
):
|
89 |
+
float_frame.apply(f, engine="numba", engine_kwargs={"parallel": True})
|
90 |
+
|
91 |
+
|
92 |
+
def test_numba_nonunique_unsupported(apply_axis):
|
93 |
+
f = lambda x: x
|
94 |
+
df = DataFrame({"a": [1, 2]}, index=Index(["a", "a"]))
|
95 |
+
with pytest.raises(
|
96 |
+
NotImplementedError,
|
97 |
+
match="The index/columns must be unique when raw=False and engine='numba'",
|
98 |
+
):
|
99 |
+
df.apply(f, engine="numba", axis=apply_axis)
|
100 |
+
|
101 |
+
|
102 |
+
def test_numba_unsupported_dtypes(apply_axis):
|
103 |
+
f = lambda x: x
|
104 |
+
df = DataFrame({"a": [1, 2], "b": ["a", "b"], "c": [4, 5]})
|
105 |
+
df["c"] = df["c"].astype("double[pyarrow]")
|
106 |
+
|
107 |
+
with pytest.raises(
|
108 |
+
ValueError,
|
109 |
+
match="Column b must have a numeric dtype. Found 'object|string' instead",
|
110 |
+
):
|
111 |
+
df.apply(f, engine="numba", axis=apply_axis)
|
112 |
+
|
113 |
+
with pytest.raises(
|
114 |
+
ValueError,
|
115 |
+
match="Column c is backed by an extension array, "
|
116 |
+
"which is not supported by the numba engine.",
|
117 |
+
):
|
118 |
+
df["c"].to_frame().apply(f, engine="numba", axis=apply_axis)
|
llmeval-env/lib/python3.10/site-packages/pandas/tests/apply/test_series_apply.py
ADDED
@@ -0,0 +1,701 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import numpy as np
|
2 |
+
import pytest
|
3 |
+
|
4 |
+
import pandas as pd
|
5 |
+
from pandas import (
|
6 |
+
DataFrame,
|
7 |
+
Index,
|
8 |
+
MultiIndex,
|
9 |
+
Series,
|
10 |
+
concat,
|
11 |
+
date_range,
|
12 |
+
timedelta_range,
|
13 |
+
)
|
14 |
+
import pandas._testing as tm
|
15 |
+
from pandas.tests.apply.common import series_transform_kernels
|
16 |
+
|
17 |
+
|
18 |
+
@pytest.fixture(params=[False, "compat"])
|
19 |
+
def by_row(request):
|
20 |
+
return request.param
|
21 |
+
|
22 |
+
|
23 |
+
def test_series_map_box_timedelta(by_row):
|
24 |
+
# GH#11349
|
25 |
+
ser = Series(timedelta_range("1 day 1 s", periods=3, freq="h"))
|
26 |
+
|
27 |
+
def f(x):
|
28 |
+
return x.total_seconds() if by_row else x.dt.total_seconds()
|
29 |
+
|
30 |
+
result = ser.apply(f, by_row=by_row)
|
31 |
+
|
32 |
+
expected = ser.map(lambda x: x.total_seconds())
|
33 |
+
tm.assert_series_equal(result, expected)
|
34 |
+
|
35 |
+
expected = Series([86401.0, 90001.0, 93601.0])
|
36 |
+
tm.assert_series_equal(result, expected)
|
37 |
+
|
38 |
+
|
39 |
+
def test_apply(datetime_series, by_row):
|
40 |
+
result = datetime_series.apply(np.sqrt, by_row=by_row)
|
41 |
+
with np.errstate(all="ignore"):
|
42 |
+
expected = np.sqrt(datetime_series)
|
43 |
+
tm.assert_series_equal(result, expected)
|
44 |
+
|
45 |
+
# element-wise apply (ufunc)
|
46 |
+
result = datetime_series.apply(np.exp, by_row=by_row)
|
47 |
+
expected = np.exp(datetime_series)
|
48 |
+
tm.assert_series_equal(result, expected)
|
49 |
+
|
50 |
+
# empty series
|
51 |
+
s = Series(dtype=object, name="foo", index=Index([], name="bar"))
|
52 |
+
rs = s.apply(lambda x: x, by_row=by_row)
|
53 |
+
tm.assert_series_equal(s, rs)
|
54 |
+
|
55 |
+
# check all metadata (GH 9322)
|
56 |
+
assert s is not rs
|
57 |
+
assert s.index is rs.index
|
58 |
+
assert s.dtype == rs.dtype
|
59 |
+
assert s.name == rs.name
|
60 |
+
|
61 |
+
# index but no data
|
62 |
+
s = Series(index=[1, 2, 3], dtype=np.float64)
|
63 |
+
rs = s.apply(lambda x: x, by_row=by_row)
|
64 |
+
tm.assert_series_equal(s, rs)
|
65 |
+
|
66 |
+
|
67 |
+
def test_apply_map_same_length_inference_bug():
|
68 |
+
s = Series([1, 2])
|
69 |
+
|
70 |
+
def f(x):
|
71 |
+
return (x, x + 1)
|
72 |
+
|
73 |
+
result = s.apply(f, by_row="compat")
|
74 |
+
expected = s.map(f)
|
75 |
+
tm.assert_series_equal(result, expected)
|
76 |
+
|
77 |
+
|
78 |
+
@pytest.mark.parametrize("convert_dtype", [True, False])
|
79 |
+
def test_apply_convert_dtype_deprecated(convert_dtype):
|
80 |
+
ser = Series(np.random.default_rng(2).standard_normal(10))
|
81 |
+
|
82 |
+
def func(x):
|
83 |
+
return x if x > 0 else np.nan
|
84 |
+
|
85 |
+
with tm.assert_produces_warning(FutureWarning):
|
86 |
+
ser.apply(func, convert_dtype=convert_dtype, by_row="compat")
|
87 |
+
|
88 |
+
|
89 |
+
def test_apply_args():
|
90 |
+
s = Series(["foo,bar"])
|
91 |
+
|
92 |
+
result = s.apply(str.split, args=(",",))
|
93 |
+
assert result[0] == ["foo", "bar"]
|
94 |
+
assert isinstance(result[0], list)
|
95 |
+
|
96 |
+
|
97 |
+
@pytest.mark.parametrize(
|
98 |
+
"args, kwargs, increment",
|
99 |
+
[((), {}, 0), ((), {"a": 1}, 1), ((2, 3), {}, 32), ((1,), {"c": 2}, 201)],
|
100 |
+
)
|
101 |
+
def test_agg_args(args, kwargs, increment):
|
102 |
+
# GH 43357
|
103 |
+
def f(x, a=0, b=0, c=0):
|
104 |
+
return x + a + 10 * b + 100 * c
|
105 |
+
|
106 |
+
s = Series([1, 2])
|
107 |
+
msg = (
|
108 |
+
"in Series.agg cannot aggregate and has been deprecated. "
|
109 |
+
"Use Series.transform to keep behavior unchanged."
|
110 |
+
)
|
111 |
+
with tm.assert_produces_warning(FutureWarning, match=msg):
|
112 |
+
result = s.agg(f, 0, *args, **kwargs)
|
113 |
+
expected = s + increment
|
114 |
+
tm.assert_series_equal(result, expected)
|
115 |
+
|
116 |
+
|
117 |
+
def test_agg_mapping_func_deprecated():
|
118 |
+
# GH 53325
|
119 |
+
s = Series([1, 2, 3])
|
120 |
+
|
121 |
+
def foo1(x, a=1, c=0):
|
122 |
+
return x + a + c
|
123 |
+
|
124 |
+
def foo2(x, b=2, c=0):
|
125 |
+
return x + b + c
|
126 |
+
|
127 |
+
msg = "using .+ in Series.agg cannot aggregate and"
|
128 |
+
with tm.assert_produces_warning(FutureWarning, match=msg):
|
129 |
+
s.agg(foo1, 0, 3, c=4)
|
130 |
+
with tm.assert_produces_warning(FutureWarning, match=msg):
|
131 |
+
s.agg([foo1, foo2], 0, 3, c=4)
|
132 |
+
with tm.assert_produces_warning(FutureWarning, match=msg):
|
133 |
+
s.agg({"a": foo1, "b": foo2}, 0, 3, c=4)
|
134 |
+
|
135 |
+
|
136 |
+
def test_series_apply_map_box_timestamps(by_row):
|
137 |
+
# GH#2689, GH#2627
|
138 |
+
ser = Series(date_range("1/1/2000", periods=10))
|
139 |
+
|
140 |
+
def func(x):
|
141 |
+
return (x.hour, x.day, x.month)
|
142 |
+
|
143 |
+
if not by_row:
|
144 |
+
msg = "Series' object has no attribute 'hour'"
|
145 |
+
with pytest.raises(AttributeError, match=msg):
|
146 |
+
ser.apply(func, by_row=by_row)
|
147 |
+
return
|
148 |
+
|
149 |
+
result = ser.apply(func, by_row=by_row)
|
150 |
+
expected = ser.map(func)
|
151 |
+
tm.assert_series_equal(result, expected)
|
152 |
+
|
153 |
+
|
154 |
+
def test_apply_box_dt64():
|
155 |
+
# ufunc will not be boxed. Same test cases as the test_map_box
|
156 |
+
vals = [pd.Timestamp("2011-01-01"), pd.Timestamp("2011-01-02")]
|
157 |
+
ser = Series(vals, dtype="M8[ns]")
|
158 |
+
assert ser.dtype == "datetime64[ns]"
|
159 |
+
# boxed value must be Timestamp instance
|
160 |
+
res = ser.apply(lambda x: f"{type(x).__name__}_{x.day}_{x.tz}", by_row="compat")
|
161 |
+
exp = Series(["Timestamp_1_None", "Timestamp_2_None"])
|
162 |
+
tm.assert_series_equal(res, exp)
|
163 |
+
|
164 |
+
|
165 |
+
def test_apply_box_dt64tz():
|
166 |
+
vals = [
|
167 |
+
pd.Timestamp("2011-01-01", tz="US/Eastern"),
|
168 |
+
pd.Timestamp("2011-01-02", tz="US/Eastern"),
|
169 |
+
]
|
170 |
+
ser = Series(vals, dtype="M8[ns, US/Eastern]")
|
171 |
+
assert ser.dtype == "datetime64[ns, US/Eastern]"
|
172 |
+
res = ser.apply(lambda x: f"{type(x).__name__}_{x.day}_{x.tz}", by_row="compat")
|
173 |
+
exp = Series(["Timestamp_1_US/Eastern", "Timestamp_2_US/Eastern"])
|
174 |
+
tm.assert_series_equal(res, exp)
|
175 |
+
|
176 |
+
|
177 |
+
def test_apply_box_td64():
|
178 |
+
# timedelta
|
179 |
+
vals = [pd.Timedelta("1 days"), pd.Timedelta("2 days")]
|
180 |
+
ser = Series(vals)
|
181 |
+
assert ser.dtype == "timedelta64[ns]"
|
182 |
+
res = ser.apply(lambda x: f"{type(x).__name__}_{x.days}", by_row="compat")
|
183 |
+
exp = Series(["Timedelta_1", "Timedelta_2"])
|
184 |
+
tm.assert_series_equal(res, exp)
|
185 |
+
|
186 |
+
|
187 |
+
def test_apply_box_period():
|
188 |
+
# period
|
189 |
+
vals = [pd.Period("2011-01-01", freq="M"), pd.Period("2011-01-02", freq="M")]
|
190 |
+
ser = Series(vals)
|
191 |
+
assert ser.dtype == "Period[M]"
|
192 |
+
res = ser.apply(lambda x: f"{type(x).__name__}_{x.freqstr}", by_row="compat")
|
193 |
+
exp = Series(["Period_M", "Period_M"])
|
194 |
+
tm.assert_series_equal(res, exp)
|
195 |
+
|
196 |
+
|
197 |
+
def test_apply_datetimetz(by_row):
|
198 |
+
values = date_range("2011-01-01", "2011-01-02", freq="h").tz_localize("Asia/Tokyo")
|
199 |
+
s = Series(values, name="XX")
|
200 |
+
|
201 |
+
result = s.apply(lambda x: x + pd.offsets.Day(), by_row=by_row)
|
202 |
+
exp_values = date_range("2011-01-02", "2011-01-03", freq="h").tz_localize(
|
203 |
+
"Asia/Tokyo"
|
204 |
+
)
|
205 |
+
exp = Series(exp_values, name="XX")
|
206 |
+
tm.assert_series_equal(result, exp)
|
207 |
+
|
208 |
+
result = s.apply(lambda x: x.hour if by_row else x.dt.hour, by_row=by_row)
|
209 |
+
exp = Series(list(range(24)) + [0], name="XX", dtype="int64" if by_row else "int32")
|
210 |
+
tm.assert_series_equal(result, exp)
|
211 |
+
|
212 |
+
# not vectorized
|
213 |
+
def f(x):
|
214 |
+
return str(x.tz) if by_row else str(x.dt.tz)
|
215 |
+
|
216 |
+
result = s.apply(f, by_row=by_row)
|
217 |
+
if by_row:
|
218 |
+
exp = Series(["Asia/Tokyo"] * 25, name="XX")
|
219 |
+
tm.assert_series_equal(result, exp)
|
220 |
+
else:
|
221 |
+
assert result == "Asia/Tokyo"
|
222 |
+
|
223 |
+
|
224 |
+
def test_apply_categorical(by_row, using_infer_string):
|
225 |
+
values = pd.Categorical(list("ABBABCD"), categories=list("DCBA"), ordered=True)
|
226 |
+
ser = Series(values, name="XX", index=list("abcdefg"))
|
227 |
+
|
228 |
+
if not by_row:
|
229 |
+
msg = "Series' object has no attribute 'lower"
|
230 |
+
with pytest.raises(AttributeError, match=msg):
|
231 |
+
ser.apply(lambda x: x.lower(), by_row=by_row)
|
232 |
+
assert ser.apply(lambda x: "A", by_row=by_row) == "A"
|
233 |
+
return
|
234 |
+
|
235 |
+
result = ser.apply(lambda x: x.lower(), by_row=by_row)
|
236 |
+
|
237 |
+
# should be categorical dtype when the number of categories are
|
238 |
+
# the same
|
239 |
+
values = pd.Categorical(list("abbabcd"), categories=list("dcba"), ordered=True)
|
240 |
+
exp = Series(values, name="XX", index=list("abcdefg"))
|
241 |
+
tm.assert_series_equal(result, exp)
|
242 |
+
tm.assert_categorical_equal(result.values, exp.values)
|
243 |
+
|
244 |
+
result = ser.apply(lambda x: "A")
|
245 |
+
exp = Series(["A"] * 7, name="XX", index=list("abcdefg"))
|
246 |
+
tm.assert_series_equal(result, exp)
|
247 |
+
assert result.dtype == object if not using_infer_string else "string[pyarrow_numpy]"
|
248 |
+
|
249 |
+
|
250 |
+
@pytest.mark.parametrize("series", [["1-1", "1-1", np.nan], ["1-1", "1-2", np.nan]])
|
251 |
+
def test_apply_categorical_with_nan_values(series, by_row):
|
252 |
+
# GH 20714 bug fixed in: GH 24275
|
253 |
+
s = Series(series, dtype="category")
|
254 |
+
if not by_row:
|
255 |
+
msg = "'Series' object has no attribute 'split'"
|
256 |
+
with pytest.raises(AttributeError, match=msg):
|
257 |
+
s.apply(lambda x: x.split("-")[0], by_row=by_row)
|
258 |
+
return
|
259 |
+
|
260 |
+
result = s.apply(lambda x: x.split("-")[0], by_row=by_row)
|
261 |
+
result = result.astype(object)
|
262 |
+
expected = Series(["1", "1", np.nan], dtype="category")
|
263 |
+
expected = expected.astype(object)
|
264 |
+
tm.assert_series_equal(result, expected)
|
265 |
+
|
266 |
+
|
267 |
+
def test_apply_empty_integer_series_with_datetime_index(by_row):
|
268 |
+
# GH 21245
|
269 |
+
s = Series([], index=date_range(start="2018-01-01", periods=0), dtype=int)
|
270 |
+
result = s.apply(lambda x: x, by_row=by_row)
|
271 |
+
tm.assert_series_equal(result, s)
|
272 |
+
|
273 |
+
|
274 |
+
def test_apply_dataframe_iloc():
|
275 |
+
uintDF = DataFrame(np.uint64([1, 2, 3, 4, 5]), columns=["Numbers"])
|
276 |
+
indexDF = DataFrame([2, 3, 2, 1, 2], columns=["Indices"])
|
277 |
+
|
278 |
+
def retrieve(targetRow, targetDF):
|
279 |
+
val = targetDF["Numbers"].iloc[targetRow]
|
280 |
+
return val
|
281 |
+
|
282 |
+
result = indexDF["Indices"].apply(retrieve, args=(uintDF,))
|
283 |
+
expected = Series([3, 4, 3, 2, 3], name="Indices", dtype="uint64")
|
284 |
+
tm.assert_series_equal(result, expected)
|
285 |
+
|
286 |
+
|
287 |
+
def test_transform(string_series, by_row):
|
288 |
+
# transforming functions
|
289 |
+
|
290 |
+
with np.errstate(all="ignore"):
|
291 |
+
f_sqrt = np.sqrt(string_series)
|
292 |
+
f_abs = np.abs(string_series)
|
293 |
+
|
294 |
+
# ufunc
|
295 |
+
result = string_series.apply(np.sqrt, by_row=by_row)
|
296 |
+
expected = f_sqrt.copy()
|
297 |
+
tm.assert_series_equal(result, expected)
|
298 |
+
|
299 |
+
# list-like
|
300 |
+
result = string_series.apply([np.sqrt], by_row=by_row)
|
301 |
+
expected = f_sqrt.to_frame().copy()
|
302 |
+
expected.columns = ["sqrt"]
|
303 |
+
tm.assert_frame_equal(result, expected)
|
304 |
+
|
305 |
+
result = string_series.apply(["sqrt"], by_row=by_row)
|
306 |
+
tm.assert_frame_equal(result, expected)
|
307 |
+
|
308 |
+
# multiple items in list
|
309 |
+
# these are in the order as if we are applying both functions per
|
310 |
+
# series and then concatting
|
311 |
+
expected = concat([f_sqrt, f_abs], axis=1)
|
312 |
+
expected.columns = ["sqrt", "absolute"]
|
313 |
+
result = string_series.apply([np.sqrt, np.abs], by_row=by_row)
|
314 |
+
tm.assert_frame_equal(result, expected)
|
315 |
+
|
316 |
+
# dict, provide renaming
|
317 |
+
expected = concat([f_sqrt, f_abs], axis=1)
|
318 |
+
expected.columns = ["foo", "bar"]
|
319 |
+
expected = expected.unstack().rename("series")
|
320 |
+
|
321 |
+
result = string_series.apply({"foo": np.sqrt, "bar": np.abs}, by_row=by_row)
|
322 |
+
tm.assert_series_equal(result.reindex_like(expected), expected)
|
323 |
+
|
324 |
+
|
325 |
+
@pytest.mark.parametrize("op", series_transform_kernels)
|
326 |
+
def test_transform_partial_failure(op, request):
|
327 |
+
# GH 35964
|
328 |
+
if op in ("ffill", "bfill", "pad", "backfill", "shift"):
|
329 |
+
request.applymarker(
|
330 |
+
pytest.mark.xfail(reason=f"{op} is successful on any dtype")
|
331 |
+
)
|
332 |
+
|
333 |
+
# Using object makes most transform kernels fail
|
334 |
+
ser = Series(3 * [object])
|
335 |
+
|
336 |
+
if op in ("fillna", "ngroup"):
|
337 |
+
error = ValueError
|
338 |
+
msg = "Transform function failed"
|
339 |
+
else:
|
340 |
+
error = TypeError
|
341 |
+
msg = "|".join(
|
342 |
+
[
|
343 |
+
"not supported between instances of 'type' and 'type'",
|
344 |
+
"unsupported operand type",
|
345 |
+
]
|
346 |
+
)
|
347 |
+
|
348 |
+
with pytest.raises(error, match=msg):
|
349 |
+
ser.transform([op, "shift"])
|
350 |
+
|
351 |
+
with pytest.raises(error, match=msg):
|
352 |
+
ser.transform({"A": op, "B": "shift"})
|
353 |
+
|
354 |
+
with pytest.raises(error, match=msg):
|
355 |
+
ser.transform({"A": [op], "B": ["shift"]})
|
356 |
+
|
357 |
+
with pytest.raises(error, match=msg):
|
358 |
+
ser.transform({"A": [op, "shift"], "B": [op]})
|
359 |
+
|
360 |
+
|
361 |
+
def test_transform_partial_failure_valueerror():
|
362 |
+
# GH 40211
|
363 |
+
def noop(x):
|
364 |
+
return x
|
365 |
+
|
366 |
+
def raising_op(_):
|
367 |
+
raise ValueError
|
368 |
+
|
369 |
+
ser = Series(3 * [object])
|
370 |
+
msg = "Transform function failed"
|
371 |
+
|
372 |
+
with pytest.raises(ValueError, match=msg):
|
373 |
+
ser.transform([noop, raising_op])
|
374 |
+
|
375 |
+
with pytest.raises(ValueError, match=msg):
|
376 |
+
ser.transform({"A": raising_op, "B": noop})
|
377 |
+
|
378 |
+
with pytest.raises(ValueError, match=msg):
|
379 |
+
ser.transform({"A": [raising_op], "B": [noop]})
|
380 |
+
|
381 |
+
with pytest.raises(ValueError, match=msg):
|
382 |
+
ser.transform({"A": [noop, raising_op], "B": [noop]})
|
383 |
+
|
384 |
+
|
385 |
+
def test_demo():
|
386 |
+
# demonstration tests
|
387 |
+
s = Series(range(6), dtype="int64", name="series")
|
388 |
+
|
389 |
+
result = s.agg(["min", "max"])
|
390 |
+
expected = Series([0, 5], index=["min", "max"], name="series")
|
391 |
+
tm.assert_series_equal(result, expected)
|
392 |
+
|
393 |
+
result = s.agg({"foo": "min"})
|
394 |
+
expected = Series([0], index=["foo"], name="series")
|
395 |
+
tm.assert_series_equal(result, expected)
|
396 |
+
|
397 |
+
|
398 |
+
@pytest.mark.parametrize("func", [str, lambda x: str(x)])
|
399 |
+
def test_apply_map_evaluate_lambdas_the_same(string_series, func, by_row):
|
400 |
+
# test that we are evaluating row-by-row first if by_row="compat"
|
401 |
+
# else vectorized evaluation
|
402 |
+
result = string_series.apply(func, by_row=by_row)
|
403 |
+
|
404 |
+
if by_row:
|
405 |
+
expected = string_series.map(func)
|
406 |
+
tm.assert_series_equal(result, expected)
|
407 |
+
else:
|
408 |
+
assert result == str(string_series)
|
409 |
+
|
410 |
+
|
411 |
+
def test_agg_evaluate_lambdas(string_series):
|
412 |
+
# GH53325
|
413 |
+
# in the future, the result will be a Series class.
|
414 |
+
|
415 |
+
with tm.assert_produces_warning(FutureWarning):
|
416 |
+
result = string_series.agg(lambda x: type(x))
|
417 |
+
assert isinstance(result, Series) and len(result) == len(string_series)
|
418 |
+
|
419 |
+
with tm.assert_produces_warning(FutureWarning):
|
420 |
+
result = string_series.agg(type)
|
421 |
+
assert isinstance(result, Series) and len(result) == len(string_series)
|
422 |
+
|
423 |
+
|
424 |
+
@pytest.mark.parametrize("op_name", ["agg", "apply"])
|
425 |
+
def test_with_nested_series(datetime_series, op_name):
|
426 |
+
# GH 2316
|
427 |
+
# .agg with a reducer and a transform, what to do
|
428 |
+
msg = "cannot aggregate"
|
429 |
+
warning = FutureWarning if op_name == "agg" else None
|
430 |
+
with tm.assert_produces_warning(warning, match=msg):
|
431 |
+
# GH52123
|
432 |
+
result = getattr(datetime_series, op_name)(
|
433 |
+
lambda x: Series([x, x**2], index=["x", "x^2"])
|
434 |
+
)
|
435 |
+
expected = DataFrame({"x": datetime_series, "x^2": datetime_series**2})
|
436 |
+
tm.assert_frame_equal(result, expected)
|
437 |
+
|
438 |
+
with tm.assert_produces_warning(FutureWarning, match=msg):
|
439 |
+
result = datetime_series.agg(lambda x: Series([x, x**2], index=["x", "x^2"]))
|
440 |
+
tm.assert_frame_equal(result, expected)
|
441 |
+
|
442 |
+
|
443 |
+
def test_replicate_describe(string_series):
|
444 |
+
# this also tests a result set that is all scalars
|
445 |
+
expected = string_series.describe()
|
446 |
+
result = string_series.apply(
|
447 |
+
{
|
448 |
+
"count": "count",
|
449 |
+
"mean": "mean",
|
450 |
+
"std": "std",
|
451 |
+
"min": "min",
|
452 |
+
"25%": lambda x: x.quantile(0.25),
|
453 |
+
"50%": "median",
|
454 |
+
"75%": lambda x: x.quantile(0.75),
|
455 |
+
"max": "max",
|
456 |
+
},
|
457 |
+
)
|
458 |
+
tm.assert_series_equal(result, expected)
|
459 |
+
|
460 |
+
|
461 |
+
def test_reduce(string_series):
|
462 |
+
# reductions with named functions
|
463 |
+
result = string_series.agg(["sum", "mean"])
|
464 |
+
expected = Series(
|
465 |
+
[string_series.sum(), string_series.mean()],
|
466 |
+
["sum", "mean"],
|
467 |
+
name=string_series.name,
|
468 |
+
)
|
469 |
+
tm.assert_series_equal(result, expected)
|
470 |
+
|
471 |
+
|
472 |
+
@pytest.mark.parametrize(
|
473 |
+
"how, kwds",
|
474 |
+
[("agg", {}), ("apply", {"by_row": "compat"}), ("apply", {"by_row": False})],
|
475 |
+
)
|
476 |
+
def test_non_callable_aggregates(how, kwds):
|
477 |
+
# test agg using non-callable series attributes
|
478 |
+
# GH 39116 - expand to apply
|
479 |
+
s = Series([1, 2, None])
|
480 |
+
|
481 |
+
# Calling agg w/ just a string arg same as calling s.arg
|
482 |
+
result = getattr(s, how)("size", **kwds)
|
483 |
+
expected = s.size
|
484 |
+
assert result == expected
|
485 |
+
|
486 |
+
# test when mixed w/ callable reducers
|
487 |
+
result = getattr(s, how)(["size", "count", "mean"], **kwds)
|
488 |
+
expected = Series({"size": 3.0, "count": 2.0, "mean": 1.5})
|
489 |
+
tm.assert_series_equal(result, expected)
|
490 |
+
|
491 |
+
result = getattr(s, how)({"size": "size", "count": "count", "mean": "mean"}, **kwds)
|
492 |
+
tm.assert_series_equal(result, expected)
|
493 |
+
|
494 |
+
|
495 |
+
def test_series_apply_no_suffix_index(by_row):
|
496 |
+
# GH36189
|
497 |
+
s = Series([4] * 3)
|
498 |
+
result = s.apply(["sum", lambda x: x.sum(), lambda x: x.sum()], by_row=by_row)
|
499 |
+
expected = Series([12, 12, 12], index=["sum", "<lambda>", "<lambda>"])
|
500 |
+
|
501 |
+
tm.assert_series_equal(result, expected)
|
502 |
+
|
503 |
+
|
504 |
+
@pytest.mark.parametrize(
|
505 |
+
"dti,exp",
|
506 |
+
[
|
507 |
+
(
|
508 |
+
Series([1, 2], index=pd.DatetimeIndex([0, 31536000000])),
|
509 |
+
DataFrame(np.repeat([[1, 2]], 2, axis=0), dtype="int64"),
|
510 |
+
),
|
511 |
+
(
|
512 |
+
Series(
|
513 |
+
np.arange(10, dtype=np.float64),
|
514 |
+
index=date_range("2020-01-01", periods=10),
|
515 |
+
name="ts",
|
516 |
+
),
|
517 |
+
DataFrame(np.repeat([[1, 2]], 10, axis=0), dtype="int64"),
|
518 |
+
),
|
519 |
+
],
|
520 |
+
)
|
521 |
+
@pytest.mark.parametrize("aware", [True, False])
|
522 |
+
def test_apply_series_on_date_time_index_aware_series(dti, exp, aware):
|
523 |
+
# GH 25959
|
524 |
+
# Calling apply on a localized time series should not cause an error
|
525 |
+
if aware:
|
526 |
+
index = dti.tz_localize("UTC").index
|
527 |
+
else:
|
528 |
+
index = dti.index
|
529 |
+
result = Series(index).apply(lambda x: Series([1, 2]))
|
530 |
+
tm.assert_frame_equal(result, exp)
|
531 |
+
|
532 |
+
|
533 |
+
@pytest.mark.parametrize(
|
534 |
+
"by_row, expected", [("compat", Series(np.ones(10), dtype="int64")), (False, 1)]
|
535 |
+
)
|
536 |
+
def test_apply_scalar_on_date_time_index_aware_series(by_row, expected):
|
537 |
+
# GH 25959
|
538 |
+
# Calling apply on a localized time series should not cause an error
|
539 |
+
series = Series(
|
540 |
+
np.arange(10, dtype=np.float64),
|
541 |
+
index=date_range("2020-01-01", periods=10, tz="UTC"),
|
542 |
+
)
|
543 |
+
result = Series(series.index).apply(lambda x: 1, by_row=by_row)
|
544 |
+
tm.assert_equal(result, expected)
|
545 |
+
|
546 |
+
|
547 |
+
def test_apply_to_timedelta(by_row):
|
548 |
+
list_of_valid_strings = ["00:00:01", "00:00:02"]
|
549 |
+
a = pd.to_timedelta(list_of_valid_strings)
|
550 |
+
b = Series(list_of_valid_strings).apply(pd.to_timedelta, by_row=by_row)
|
551 |
+
tm.assert_series_equal(Series(a), b)
|
552 |
+
|
553 |
+
list_of_strings = ["00:00:01", np.nan, pd.NaT, pd.NaT]
|
554 |
+
|
555 |
+
a = pd.to_timedelta(list_of_strings)
|
556 |
+
ser = Series(list_of_strings)
|
557 |
+
b = ser.apply(pd.to_timedelta, by_row=by_row)
|
558 |
+
tm.assert_series_equal(Series(a), b)
|
559 |
+
|
560 |
+
|
561 |
+
@pytest.mark.parametrize(
|
562 |
+
"ops, names",
|
563 |
+
[
|
564 |
+
([np.sum], ["sum"]),
|
565 |
+
([np.sum, np.mean], ["sum", "mean"]),
|
566 |
+
(np.array([np.sum]), ["sum"]),
|
567 |
+
(np.array([np.sum, np.mean]), ["sum", "mean"]),
|
568 |
+
],
|
569 |
+
)
|
570 |
+
@pytest.mark.parametrize(
|
571 |
+
"how, kwargs",
|
572 |
+
[["agg", {}], ["apply", {"by_row": "compat"}], ["apply", {"by_row": False}]],
|
573 |
+
)
|
574 |
+
def test_apply_listlike_reducer(string_series, ops, names, how, kwargs):
|
575 |
+
# GH 39140
|
576 |
+
expected = Series({name: op(string_series) for name, op in zip(names, ops)})
|
577 |
+
expected.name = "series"
|
578 |
+
warn = FutureWarning if how == "agg" else None
|
579 |
+
msg = f"using Series.[{'|'.join(names)}]"
|
580 |
+
with tm.assert_produces_warning(warn, match=msg):
|
581 |
+
result = getattr(string_series, how)(ops, **kwargs)
|
582 |
+
tm.assert_series_equal(result, expected)
|
583 |
+
|
584 |
+
|
585 |
+
@pytest.mark.parametrize(
|
586 |
+
"ops",
|
587 |
+
[
|
588 |
+
{"A": np.sum},
|
589 |
+
{"A": np.sum, "B": np.mean},
|
590 |
+
Series({"A": np.sum}),
|
591 |
+
Series({"A": np.sum, "B": np.mean}),
|
592 |
+
],
|
593 |
+
)
|
594 |
+
@pytest.mark.parametrize(
|
595 |
+
"how, kwargs",
|
596 |
+
[["agg", {}], ["apply", {"by_row": "compat"}], ["apply", {"by_row": False}]],
|
597 |
+
)
|
598 |
+
def test_apply_dictlike_reducer(string_series, ops, how, kwargs, by_row):
|
599 |
+
# GH 39140
|
600 |
+
expected = Series({name: op(string_series) for name, op in ops.items()})
|
601 |
+
expected.name = string_series.name
|
602 |
+
warn = FutureWarning if how == "agg" else None
|
603 |
+
msg = "using Series.[sum|mean]"
|
604 |
+
with tm.assert_produces_warning(warn, match=msg):
|
605 |
+
result = getattr(string_series, how)(ops, **kwargs)
|
606 |
+
tm.assert_series_equal(result, expected)
|
607 |
+
|
608 |
+
|
609 |
+
@pytest.mark.parametrize(
|
610 |
+
"ops, names",
|
611 |
+
[
|
612 |
+
([np.sqrt], ["sqrt"]),
|
613 |
+
([np.abs, np.sqrt], ["absolute", "sqrt"]),
|
614 |
+
(np.array([np.sqrt]), ["sqrt"]),
|
615 |
+
(np.array([np.abs, np.sqrt]), ["absolute", "sqrt"]),
|
616 |
+
],
|
617 |
+
)
|
618 |
+
def test_apply_listlike_transformer(string_series, ops, names, by_row):
|
619 |
+
# GH 39140
|
620 |
+
with np.errstate(all="ignore"):
|
621 |
+
expected = concat([op(string_series) for op in ops], axis=1)
|
622 |
+
expected.columns = names
|
623 |
+
result = string_series.apply(ops, by_row=by_row)
|
624 |
+
tm.assert_frame_equal(result, expected)
|
625 |
+
|
626 |
+
|
627 |
+
@pytest.mark.parametrize(
|
628 |
+
"ops, expected",
|
629 |
+
[
|
630 |
+
([lambda x: x], DataFrame({"<lambda>": [1, 2, 3]})),
|
631 |
+
([lambda x: x.sum()], Series([6], index=["<lambda>"])),
|
632 |
+
],
|
633 |
+
)
|
634 |
+
def test_apply_listlike_lambda(ops, expected, by_row):
|
635 |
+
# GH53400
|
636 |
+
ser = Series([1, 2, 3])
|
637 |
+
result = ser.apply(ops, by_row=by_row)
|
638 |
+
tm.assert_equal(result, expected)
|
639 |
+
|
640 |
+
|
641 |
+
@pytest.mark.parametrize(
|
642 |
+
"ops",
|
643 |
+
[
|
644 |
+
{"A": np.sqrt},
|
645 |
+
{"A": np.sqrt, "B": np.exp},
|
646 |
+
Series({"A": np.sqrt}),
|
647 |
+
Series({"A": np.sqrt, "B": np.exp}),
|
648 |
+
],
|
649 |
+
)
|
650 |
+
def test_apply_dictlike_transformer(string_series, ops, by_row):
|
651 |
+
# GH 39140
|
652 |
+
with np.errstate(all="ignore"):
|
653 |
+
expected = concat({name: op(string_series) for name, op in ops.items()})
|
654 |
+
expected.name = string_series.name
|
655 |
+
result = string_series.apply(ops, by_row=by_row)
|
656 |
+
tm.assert_series_equal(result, expected)
|
657 |
+
|
658 |
+
|
659 |
+
@pytest.mark.parametrize(
|
660 |
+
"ops, expected",
|
661 |
+
[
|
662 |
+
(
|
663 |
+
{"a": lambda x: x},
|
664 |
+
Series([1, 2, 3], index=MultiIndex.from_arrays([["a"] * 3, range(3)])),
|
665 |
+
),
|
666 |
+
({"a": lambda x: x.sum()}, Series([6], index=["a"])),
|
667 |
+
],
|
668 |
+
)
|
669 |
+
def test_apply_dictlike_lambda(ops, by_row, expected):
|
670 |
+
# GH53400
|
671 |
+
ser = Series([1, 2, 3])
|
672 |
+
result = ser.apply(ops, by_row=by_row)
|
673 |
+
tm.assert_equal(result, expected)
|
674 |
+
|
675 |
+
|
676 |
+
def test_apply_retains_column_name(by_row):
|
677 |
+
# GH 16380
|
678 |
+
df = DataFrame({"x": range(3)}, Index(range(3), name="x"))
|
679 |
+
result = df.x.apply(lambda x: Series(range(x + 1), Index(range(x + 1), name="y")))
|
680 |
+
expected = DataFrame(
|
681 |
+
[[0.0, np.nan, np.nan], [0.0, 1.0, np.nan], [0.0, 1.0, 2.0]],
|
682 |
+
columns=Index(range(3), name="y"),
|
683 |
+
index=Index(range(3), name="x"),
|
684 |
+
)
|
685 |
+
tm.assert_frame_equal(result, expected)
|
686 |
+
|
687 |
+
|
688 |
+
def test_apply_type():
|
689 |
+
# GH 46719
|
690 |
+
s = Series([3, "string", float], index=["a", "b", "c"])
|
691 |
+
result = s.apply(type)
|
692 |
+
expected = Series([int, str, type], index=["a", "b", "c"])
|
693 |
+
tm.assert_series_equal(result, expected)
|
694 |
+
|
695 |
+
|
696 |
+
def test_series_apply_unpack_nested_data():
|
697 |
+
# GH#55189
|
698 |
+
ser = Series([[1, 2, 3], [4, 5, 6, 7]])
|
699 |
+
result = ser.apply(lambda x: Series(x))
|
700 |
+
expected = DataFrame({0: [1.0, 4.0], 1: [2.0, 5.0], 2: [3.0, 6.0], 3: [np.nan, 7]})
|
701 |
+
tm.assert_frame_equal(result, expected)
|
llmeval-env/lib/python3.10/site-packages/pandas/tests/apply/test_series_apply_relabeling.py
ADDED
@@ -0,0 +1,39 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import pandas as pd
|
2 |
+
import pandas._testing as tm
|
3 |
+
|
4 |
+
|
5 |
+
def test_relabel_no_duplicated_method():
|
6 |
+
# this is to test there is no duplicated method used in agg
|
7 |
+
df = pd.DataFrame({"A": [1, 2, 1, 2], "B": [1, 2, 3, 4]})
|
8 |
+
|
9 |
+
result = df["A"].agg(foo="sum")
|
10 |
+
expected = df["A"].agg({"foo": "sum"})
|
11 |
+
tm.assert_series_equal(result, expected)
|
12 |
+
|
13 |
+
result = df["B"].agg(foo="min", bar="max")
|
14 |
+
expected = df["B"].agg({"foo": "min", "bar": "max"})
|
15 |
+
tm.assert_series_equal(result, expected)
|
16 |
+
|
17 |
+
msg = "using Series.[sum|min|max]"
|
18 |
+
with tm.assert_produces_warning(FutureWarning, match=msg):
|
19 |
+
result = df["B"].agg(foo=sum, bar=min, cat="max")
|
20 |
+
msg = "using Series.[sum|min|max]"
|
21 |
+
with tm.assert_produces_warning(FutureWarning, match=msg):
|
22 |
+
expected = df["B"].agg({"foo": sum, "bar": min, "cat": "max"})
|
23 |
+
tm.assert_series_equal(result, expected)
|
24 |
+
|
25 |
+
|
26 |
+
def test_relabel_duplicated_method():
|
27 |
+
# this is to test with nested renaming, duplicated method can be used
|
28 |
+
# if they are assigned with different new names
|
29 |
+
df = pd.DataFrame({"A": [1, 2, 1, 2], "B": [1, 2, 3, 4]})
|
30 |
+
|
31 |
+
result = df["A"].agg(foo="sum", bar="sum")
|
32 |
+
expected = pd.Series([6, 6], index=["foo", "bar"], name="A")
|
33 |
+
tm.assert_series_equal(result, expected)
|
34 |
+
|
35 |
+
msg = "using Series.min"
|
36 |
+
with tm.assert_produces_warning(FutureWarning, match=msg):
|
37 |
+
result = df["B"].agg(foo=min, bar="min")
|
38 |
+
expected = pd.Series([1, 1], index=["foo", "bar"], name="B")
|
39 |
+
tm.assert_series_equal(result, expected)
|
llmeval-env/lib/python3.10/site-packages/pandas/tests/apply/test_series_transform.py
ADDED
@@ -0,0 +1,84 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import numpy as np
|
2 |
+
import pytest
|
3 |
+
|
4 |
+
from pandas import (
|
5 |
+
DataFrame,
|
6 |
+
MultiIndex,
|
7 |
+
Series,
|
8 |
+
concat,
|
9 |
+
)
|
10 |
+
import pandas._testing as tm
|
11 |
+
|
12 |
+
|
13 |
+
@pytest.mark.parametrize(
|
14 |
+
"args, kwargs, increment",
|
15 |
+
[((), {}, 0), ((), {"a": 1}, 1), ((2, 3), {}, 32), ((1,), {"c": 2}, 201)],
|
16 |
+
)
|
17 |
+
def test_agg_args(args, kwargs, increment):
|
18 |
+
# GH 43357
|
19 |
+
def f(x, a=0, b=0, c=0):
|
20 |
+
return x + a + 10 * b + 100 * c
|
21 |
+
|
22 |
+
s = Series([1, 2])
|
23 |
+
result = s.transform(f, 0, *args, **kwargs)
|
24 |
+
expected = s + increment
|
25 |
+
tm.assert_series_equal(result, expected)
|
26 |
+
|
27 |
+
|
28 |
+
@pytest.mark.parametrize(
|
29 |
+
"ops, names",
|
30 |
+
[
|
31 |
+
([np.sqrt], ["sqrt"]),
|
32 |
+
([np.abs, np.sqrt], ["absolute", "sqrt"]),
|
33 |
+
(np.array([np.sqrt]), ["sqrt"]),
|
34 |
+
(np.array([np.abs, np.sqrt]), ["absolute", "sqrt"]),
|
35 |
+
],
|
36 |
+
)
|
37 |
+
def test_transform_listlike(string_series, ops, names):
|
38 |
+
# GH 35964
|
39 |
+
with np.errstate(all="ignore"):
|
40 |
+
expected = concat([op(string_series) for op in ops], axis=1)
|
41 |
+
expected.columns = names
|
42 |
+
result = string_series.transform(ops)
|
43 |
+
tm.assert_frame_equal(result, expected)
|
44 |
+
|
45 |
+
|
46 |
+
def test_transform_listlike_func_with_args():
|
47 |
+
# GH 50624
|
48 |
+
|
49 |
+
s = Series([1, 2, 3])
|
50 |
+
|
51 |
+
def foo1(x, a=1, c=0):
|
52 |
+
return x + a + c
|
53 |
+
|
54 |
+
def foo2(x, b=2, c=0):
|
55 |
+
return x + b + c
|
56 |
+
|
57 |
+
msg = r"foo1\(\) got an unexpected keyword argument 'b'"
|
58 |
+
with pytest.raises(TypeError, match=msg):
|
59 |
+
s.transform([foo1, foo2], 0, 3, b=3, c=4)
|
60 |
+
|
61 |
+
result = s.transform([foo1, foo2], 0, 3, c=4)
|
62 |
+
expected = DataFrame({"foo1": [8, 9, 10], "foo2": [8, 9, 10]})
|
63 |
+
tm.assert_frame_equal(result, expected)
|
64 |
+
|
65 |
+
|
66 |
+
@pytest.mark.parametrize("box", [dict, Series])
|
67 |
+
def test_transform_dictlike(string_series, box):
|
68 |
+
# GH 35964
|
69 |
+
with np.errstate(all="ignore"):
|
70 |
+
expected = concat([np.sqrt(string_series), np.abs(string_series)], axis=1)
|
71 |
+
expected.columns = ["foo", "bar"]
|
72 |
+
result = string_series.transform(box({"foo": np.sqrt, "bar": np.abs}))
|
73 |
+
tm.assert_frame_equal(result, expected)
|
74 |
+
|
75 |
+
|
76 |
+
def test_transform_dictlike_mixed():
|
77 |
+
# GH 40018 - mix of lists and non-lists in values of a dictionary
|
78 |
+
df = Series([1, 4])
|
79 |
+
result = df.transform({"b": ["sqrt", "abs"], "c": "sqrt"})
|
80 |
+
expected = DataFrame(
|
81 |
+
[[1.0, 1, 1.0], [2.0, 4, 2.0]],
|
82 |
+
columns=MultiIndex([("b", "c"), ("sqrt", "abs")], [(0, 0, 1), (0, 1, 0)]),
|
83 |
+
)
|
84 |
+
tm.assert_frame_equal(result, expected)
|
llmeval-env/lib/python3.10/site-packages/pandas/tests/apply/test_str.py
ADDED
@@ -0,0 +1,326 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from itertools import chain
|
2 |
+
import operator
|
3 |
+
|
4 |
+
import numpy as np
|
5 |
+
import pytest
|
6 |
+
|
7 |
+
from pandas.core.dtypes.common import is_number
|
8 |
+
|
9 |
+
from pandas import (
|
10 |
+
DataFrame,
|
11 |
+
Series,
|
12 |
+
)
|
13 |
+
import pandas._testing as tm
|
14 |
+
from pandas.tests.apply.common import (
|
15 |
+
frame_transform_kernels,
|
16 |
+
series_transform_kernels,
|
17 |
+
)
|
18 |
+
|
19 |
+
|
20 |
+
@pytest.mark.parametrize("func", ["sum", "mean", "min", "max", "std"])
|
21 |
+
@pytest.mark.parametrize(
|
22 |
+
"args,kwds",
|
23 |
+
[
|
24 |
+
pytest.param([], {}, id="no_args_or_kwds"),
|
25 |
+
pytest.param([1], {}, id="axis_from_args"),
|
26 |
+
pytest.param([], {"axis": 1}, id="axis_from_kwds"),
|
27 |
+
pytest.param([], {"numeric_only": True}, id="optional_kwds"),
|
28 |
+
pytest.param([1, True], {"numeric_only": True}, id="args_and_kwds"),
|
29 |
+
],
|
30 |
+
)
|
31 |
+
@pytest.mark.parametrize("how", ["agg", "apply"])
|
32 |
+
def test_apply_with_string_funcs(request, float_frame, func, args, kwds, how):
|
33 |
+
if len(args) > 1 and how == "agg":
|
34 |
+
request.applymarker(
|
35 |
+
pytest.mark.xfail(
|
36 |
+
raises=TypeError,
|
37 |
+
reason="agg/apply signature mismatch - agg passes 2nd "
|
38 |
+
"argument to func",
|
39 |
+
)
|
40 |
+
)
|
41 |
+
result = getattr(float_frame, how)(func, *args, **kwds)
|
42 |
+
expected = getattr(float_frame, func)(*args, **kwds)
|
43 |
+
tm.assert_series_equal(result, expected)
|
44 |
+
|
45 |
+
|
46 |
+
@pytest.mark.parametrize("arg", ["sum", "mean", "min", "max", "std"])
|
47 |
+
def test_with_string_args(datetime_series, arg):
|
48 |
+
result = datetime_series.apply(arg)
|
49 |
+
expected = getattr(datetime_series, arg)()
|
50 |
+
assert result == expected
|
51 |
+
|
52 |
+
|
53 |
+
@pytest.mark.parametrize("op", ["mean", "median", "std", "var"])
|
54 |
+
@pytest.mark.parametrize("how", ["agg", "apply"])
|
55 |
+
def test_apply_np_reducer(op, how):
|
56 |
+
# GH 39116
|
57 |
+
float_frame = DataFrame({"a": [1, 2], "b": [3, 4]})
|
58 |
+
result = getattr(float_frame, how)(op)
|
59 |
+
# pandas ddof defaults to 1, numpy to 0
|
60 |
+
kwargs = {"ddof": 1} if op in ("std", "var") else {}
|
61 |
+
expected = Series(
|
62 |
+
getattr(np, op)(float_frame, axis=0, **kwargs), index=float_frame.columns
|
63 |
+
)
|
64 |
+
tm.assert_series_equal(result, expected)
|
65 |
+
|
66 |
+
|
67 |
+
@pytest.mark.parametrize(
|
68 |
+
"op", ["abs", "ceil", "cos", "cumsum", "exp", "log", "sqrt", "square"]
|
69 |
+
)
|
70 |
+
@pytest.mark.parametrize("how", ["transform", "apply"])
|
71 |
+
def test_apply_np_transformer(float_frame, op, how):
|
72 |
+
# GH 39116
|
73 |
+
|
74 |
+
# float_frame will _usually_ have negative values, which will
|
75 |
+
# trigger the warning here, but let's put one in just to be sure
|
76 |
+
float_frame.iloc[0, 0] = -1.0
|
77 |
+
warn = None
|
78 |
+
if op in ["log", "sqrt"]:
|
79 |
+
warn = RuntimeWarning
|
80 |
+
|
81 |
+
with tm.assert_produces_warning(warn, check_stacklevel=False):
|
82 |
+
# float_frame fixture is defined in conftest.py, so we don't check the
|
83 |
+
# stacklevel as otherwise the test would fail.
|
84 |
+
result = getattr(float_frame, how)(op)
|
85 |
+
expected = getattr(np, op)(float_frame)
|
86 |
+
tm.assert_frame_equal(result, expected)
|
87 |
+
|
88 |
+
|
89 |
+
@pytest.mark.parametrize(
|
90 |
+
"series, func, expected",
|
91 |
+
chain(
|
92 |
+
tm.get_cython_table_params(
|
93 |
+
Series(dtype=np.float64),
|
94 |
+
[
|
95 |
+
("sum", 0),
|
96 |
+
("max", np.nan),
|
97 |
+
("min", np.nan),
|
98 |
+
("all", True),
|
99 |
+
("any", False),
|
100 |
+
("mean", np.nan),
|
101 |
+
("prod", 1),
|
102 |
+
("std", np.nan),
|
103 |
+
("var", np.nan),
|
104 |
+
("median", np.nan),
|
105 |
+
],
|
106 |
+
),
|
107 |
+
tm.get_cython_table_params(
|
108 |
+
Series([np.nan, 1, 2, 3]),
|
109 |
+
[
|
110 |
+
("sum", 6),
|
111 |
+
("max", 3),
|
112 |
+
("min", 1),
|
113 |
+
("all", True),
|
114 |
+
("any", True),
|
115 |
+
("mean", 2),
|
116 |
+
("prod", 6),
|
117 |
+
("std", 1),
|
118 |
+
("var", 1),
|
119 |
+
("median", 2),
|
120 |
+
],
|
121 |
+
),
|
122 |
+
tm.get_cython_table_params(
|
123 |
+
Series("a b c".split()),
|
124 |
+
[
|
125 |
+
("sum", "abc"),
|
126 |
+
("max", "c"),
|
127 |
+
("min", "a"),
|
128 |
+
("all", True),
|
129 |
+
("any", True),
|
130 |
+
],
|
131 |
+
),
|
132 |
+
),
|
133 |
+
)
|
134 |
+
def test_agg_cython_table_series(series, func, expected):
|
135 |
+
# GH21224
|
136 |
+
# test reducing functions in
|
137 |
+
# pandas.core.base.SelectionMixin._cython_table
|
138 |
+
warn = None if isinstance(func, str) else FutureWarning
|
139 |
+
with tm.assert_produces_warning(warn, match="is currently using Series.*"):
|
140 |
+
result = series.agg(func)
|
141 |
+
if is_number(expected):
|
142 |
+
assert np.isclose(result, expected, equal_nan=True)
|
143 |
+
else:
|
144 |
+
assert result == expected
|
145 |
+
|
146 |
+
|
147 |
+
@pytest.mark.parametrize(
|
148 |
+
"series, func, expected",
|
149 |
+
chain(
|
150 |
+
tm.get_cython_table_params(
|
151 |
+
Series(dtype=np.float64),
|
152 |
+
[
|
153 |
+
("cumprod", Series([], dtype=np.float64)),
|
154 |
+
("cumsum", Series([], dtype=np.float64)),
|
155 |
+
],
|
156 |
+
),
|
157 |
+
tm.get_cython_table_params(
|
158 |
+
Series([np.nan, 1, 2, 3]),
|
159 |
+
[
|
160 |
+
("cumprod", Series([np.nan, 1, 2, 6])),
|
161 |
+
("cumsum", Series([np.nan, 1, 3, 6])),
|
162 |
+
],
|
163 |
+
),
|
164 |
+
tm.get_cython_table_params(
|
165 |
+
Series("a b c".split()), [("cumsum", Series(["a", "ab", "abc"]))]
|
166 |
+
),
|
167 |
+
),
|
168 |
+
)
|
169 |
+
def test_agg_cython_table_transform_series(series, func, expected):
|
170 |
+
# GH21224
|
171 |
+
# test transforming functions in
|
172 |
+
# pandas.core.base.SelectionMixin._cython_table (cumprod, cumsum)
|
173 |
+
warn = None if isinstance(func, str) else FutureWarning
|
174 |
+
with tm.assert_produces_warning(warn, match="is currently using Series.*"):
|
175 |
+
result = series.agg(func)
|
176 |
+
tm.assert_series_equal(result, expected)
|
177 |
+
|
178 |
+
|
179 |
+
@pytest.mark.parametrize(
|
180 |
+
"df, func, expected",
|
181 |
+
chain(
|
182 |
+
tm.get_cython_table_params(
|
183 |
+
DataFrame(),
|
184 |
+
[
|
185 |
+
("sum", Series(dtype="float64")),
|
186 |
+
("max", Series(dtype="float64")),
|
187 |
+
("min", Series(dtype="float64")),
|
188 |
+
("all", Series(dtype=bool)),
|
189 |
+
("any", Series(dtype=bool)),
|
190 |
+
("mean", Series(dtype="float64")),
|
191 |
+
("prod", Series(dtype="float64")),
|
192 |
+
("std", Series(dtype="float64")),
|
193 |
+
("var", Series(dtype="float64")),
|
194 |
+
("median", Series(dtype="float64")),
|
195 |
+
],
|
196 |
+
),
|
197 |
+
tm.get_cython_table_params(
|
198 |
+
DataFrame([[np.nan, 1], [1, 2]]),
|
199 |
+
[
|
200 |
+
("sum", Series([1.0, 3])),
|
201 |
+
("max", Series([1.0, 2])),
|
202 |
+
("min", Series([1.0, 1])),
|
203 |
+
("all", Series([True, True])),
|
204 |
+
("any", Series([True, True])),
|
205 |
+
("mean", Series([1, 1.5])),
|
206 |
+
("prod", Series([1.0, 2])),
|
207 |
+
("std", Series([np.nan, 0.707107])),
|
208 |
+
("var", Series([np.nan, 0.5])),
|
209 |
+
("median", Series([1, 1.5])),
|
210 |
+
],
|
211 |
+
),
|
212 |
+
),
|
213 |
+
)
|
214 |
+
def test_agg_cython_table_frame(df, func, expected, axis):
|
215 |
+
# GH 21224
|
216 |
+
# test reducing functions in
|
217 |
+
# pandas.core.base.SelectionMixin._cython_table
|
218 |
+
warn = None if isinstance(func, str) else FutureWarning
|
219 |
+
with tm.assert_produces_warning(warn, match="is currently using DataFrame.*"):
|
220 |
+
# GH#53425
|
221 |
+
result = df.agg(func, axis=axis)
|
222 |
+
tm.assert_series_equal(result, expected)
|
223 |
+
|
224 |
+
|
225 |
+
@pytest.mark.parametrize(
|
226 |
+
"df, func, expected",
|
227 |
+
chain(
|
228 |
+
tm.get_cython_table_params(
|
229 |
+
DataFrame(), [("cumprod", DataFrame()), ("cumsum", DataFrame())]
|
230 |
+
),
|
231 |
+
tm.get_cython_table_params(
|
232 |
+
DataFrame([[np.nan, 1], [1, 2]]),
|
233 |
+
[
|
234 |
+
("cumprod", DataFrame([[np.nan, 1], [1, 2]])),
|
235 |
+
("cumsum", DataFrame([[np.nan, 1], [1, 3]])),
|
236 |
+
],
|
237 |
+
),
|
238 |
+
),
|
239 |
+
)
|
240 |
+
def test_agg_cython_table_transform_frame(df, func, expected, axis):
|
241 |
+
# GH 21224
|
242 |
+
# test transforming functions in
|
243 |
+
# pandas.core.base.SelectionMixin._cython_table (cumprod, cumsum)
|
244 |
+
if axis in ("columns", 1):
|
245 |
+
# operating blockwise doesn't let us preserve dtypes
|
246 |
+
expected = expected.astype("float64")
|
247 |
+
|
248 |
+
warn = None if isinstance(func, str) else FutureWarning
|
249 |
+
with tm.assert_produces_warning(warn, match="is currently using DataFrame.*"):
|
250 |
+
# GH#53425
|
251 |
+
result = df.agg(func, axis=axis)
|
252 |
+
tm.assert_frame_equal(result, expected)
|
253 |
+
|
254 |
+
|
255 |
+
@pytest.mark.parametrize("op", series_transform_kernels)
|
256 |
+
def test_transform_groupby_kernel_series(request, string_series, op):
|
257 |
+
# GH 35964
|
258 |
+
if op == "ngroup":
|
259 |
+
request.applymarker(
|
260 |
+
pytest.mark.xfail(raises=ValueError, reason="ngroup not valid for NDFrame")
|
261 |
+
)
|
262 |
+
args = [0.0] if op == "fillna" else []
|
263 |
+
ones = np.ones(string_series.shape[0])
|
264 |
+
|
265 |
+
warn = FutureWarning if op == "fillna" else None
|
266 |
+
msg = "SeriesGroupBy.fillna is deprecated"
|
267 |
+
with tm.assert_produces_warning(warn, match=msg):
|
268 |
+
expected = string_series.groupby(ones).transform(op, *args)
|
269 |
+
result = string_series.transform(op, 0, *args)
|
270 |
+
tm.assert_series_equal(result, expected)
|
271 |
+
|
272 |
+
|
273 |
+
@pytest.mark.parametrize("op", frame_transform_kernels)
|
274 |
+
def test_transform_groupby_kernel_frame(request, axis, float_frame, op):
|
275 |
+
if op == "ngroup":
|
276 |
+
request.applymarker(
|
277 |
+
pytest.mark.xfail(raises=ValueError, reason="ngroup not valid for NDFrame")
|
278 |
+
)
|
279 |
+
|
280 |
+
# GH 35964
|
281 |
+
|
282 |
+
args = [0.0] if op == "fillna" else []
|
283 |
+
if axis in (0, "index"):
|
284 |
+
ones = np.ones(float_frame.shape[0])
|
285 |
+
msg = "The 'axis' keyword in DataFrame.groupby is deprecated"
|
286 |
+
else:
|
287 |
+
ones = np.ones(float_frame.shape[1])
|
288 |
+
msg = "DataFrame.groupby with axis=1 is deprecated"
|
289 |
+
|
290 |
+
with tm.assert_produces_warning(FutureWarning, match=msg):
|
291 |
+
gb = float_frame.groupby(ones, axis=axis)
|
292 |
+
|
293 |
+
warn = FutureWarning if op == "fillna" else None
|
294 |
+
op_msg = "DataFrameGroupBy.fillna is deprecated"
|
295 |
+
with tm.assert_produces_warning(warn, match=op_msg):
|
296 |
+
expected = gb.transform(op, *args)
|
297 |
+
|
298 |
+
result = float_frame.transform(op, axis, *args)
|
299 |
+
tm.assert_frame_equal(result, expected)
|
300 |
+
|
301 |
+
# same thing, but ensuring we have multiple blocks
|
302 |
+
assert "E" not in float_frame.columns
|
303 |
+
float_frame["E"] = float_frame["A"].copy()
|
304 |
+
assert len(float_frame._mgr.arrays) > 1
|
305 |
+
|
306 |
+
if axis in (0, "index"):
|
307 |
+
ones = np.ones(float_frame.shape[0])
|
308 |
+
else:
|
309 |
+
ones = np.ones(float_frame.shape[1])
|
310 |
+
with tm.assert_produces_warning(FutureWarning, match=msg):
|
311 |
+
gb2 = float_frame.groupby(ones, axis=axis)
|
312 |
+
warn = FutureWarning if op == "fillna" else None
|
313 |
+
op_msg = "DataFrameGroupBy.fillna is deprecated"
|
314 |
+
with tm.assert_produces_warning(warn, match=op_msg):
|
315 |
+
expected2 = gb2.transform(op, *args)
|
316 |
+
result2 = float_frame.transform(op, axis, *args)
|
317 |
+
tm.assert_frame_equal(result2, expected2)
|
318 |
+
|
319 |
+
|
320 |
+
@pytest.mark.parametrize("method", ["abs", "shift", "pct_change", "cumsum", "rank"])
|
321 |
+
def test_transform_method_name(method):
|
322 |
+
# GH 19760
|
323 |
+
df = DataFrame({"A": [-1, 2]})
|
324 |
+
result = df.transform(method)
|
325 |
+
expected = operator.methodcaller(method)(df)
|
326 |
+
tm.assert_frame_equal(result, expected)
|
llmeval-env/lib/python3.10/site-packages/pandas/tests/generic/__init__.py
ADDED
File without changes
|
llmeval-env/lib/python3.10/site-packages/pandas/tests/generic/__pycache__/__init__.cpython-310.pyc
ADDED
Binary file (193 Bytes). View file
|
|
llmeval-env/lib/python3.10/site-packages/pandas/tests/generic/__pycache__/test_duplicate_labels.cpython-310.pyc
ADDED
Binary file (11.9 kB). View file
|
|
llmeval-env/lib/python3.10/site-packages/pandas/tests/generic/__pycache__/test_finalize.cpython-310.pyc
ADDED
Binary file (18.1 kB). View file
|
|
llmeval-env/lib/python3.10/site-packages/pandas/tests/generic/__pycache__/test_frame.cpython-310.pyc
ADDED
Binary file (7.46 kB). View file
|
|
llmeval-env/lib/python3.10/site-packages/pandas/tests/generic/__pycache__/test_generic.cpython-310.pyc
ADDED
Binary file (15.8 kB). View file
|
|
llmeval-env/lib/python3.10/site-packages/pandas/tests/generic/__pycache__/test_label_or_level_utils.cpython-310.pyc
ADDED
Binary file (7.43 kB). View file
|
|
llmeval-env/lib/python3.10/site-packages/pandas/tests/generic/__pycache__/test_series.cpython-310.pyc
ADDED
Binary file (5.96 kB). View file
|
|
llmeval-env/lib/python3.10/site-packages/pandas/tests/generic/__pycache__/test_to_xarray.cpython-310.pyc
ADDED
Binary file (4.26 kB). View file
|
|
llmeval-env/lib/python3.10/site-packages/pandas/tests/generic/test_frame.py
ADDED
@@ -0,0 +1,209 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from copy import deepcopy
|
2 |
+
from operator import methodcaller
|
3 |
+
|
4 |
+
import numpy as np
|
5 |
+
import pytest
|
6 |
+
|
7 |
+
import pandas as pd
|
8 |
+
from pandas import (
|
9 |
+
DataFrame,
|
10 |
+
MultiIndex,
|
11 |
+
Series,
|
12 |
+
date_range,
|
13 |
+
)
|
14 |
+
import pandas._testing as tm
|
15 |
+
|
16 |
+
|
17 |
+
class TestDataFrame:
|
18 |
+
@pytest.mark.parametrize("func", ["_set_axis_name", "rename_axis"])
|
19 |
+
def test_set_axis_name(self, func):
|
20 |
+
df = DataFrame([[1, 2], [3, 4]])
|
21 |
+
|
22 |
+
result = methodcaller(func, "foo")(df)
|
23 |
+
assert df.index.name is None
|
24 |
+
assert result.index.name == "foo"
|
25 |
+
|
26 |
+
result = methodcaller(func, "cols", axis=1)(df)
|
27 |
+
assert df.columns.name is None
|
28 |
+
assert result.columns.name == "cols"
|
29 |
+
|
30 |
+
@pytest.mark.parametrize("func", ["_set_axis_name", "rename_axis"])
|
31 |
+
def test_set_axis_name_mi(self, func):
|
32 |
+
df = DataFrame(
|
33 |
+
np.empty((3, 3)),
|
34 |
+
index=MultiIndex.from_tuples([("A", x) for x in list("aBc")]),
|
35 |
+
columns=MultiIndex.from_tuples([("C", x) for x in list("xyz")]),
|
36 |
+
)
|
37 |
+
|
38 |
+
level_names = ["L1", "L2"]
|
39 |
+
|
40 |
+
result = methodcaller(func, level_names)(df)
|
41 |
+
assert result.index.names == level_names
|
42 |
+
assert result.columns.names == [None, None]
|
43 |
+
|
44 |
+
result = methodcaller(func, level_names, axis=1)(df)
|
45 |
+
assert result.columns.names == ["L1", "L2"]
|
46 |
+
assert result.index.names == [None, None]
|
47 |
+
|
48 |
+
def test_nonzero_single_element(self):
|
49 |
+
# allow single item via bool method
|
50 |
+
msg_warn = (
|
51 |
+
"DataFrame.bool is now deprecated and will be removed "
|
52 |
+
"in future version of pandas"
|
53 |
+
)
|
54 |
+
df = DataFrame([[True]])
|
55 |
+
df1 = DataFrame([[False]])
|
56 |
+
with tm.assert_produces_warning(FutureWarning, match=msg_warn):
|
57 |
+
assert df.bool()
|
58 |
+
|
59 |
+
with tm.assert_produces_warning(FutureWarning, match=msg_warn):
|
60 |
+
assert not df1.bool()
|
61 |
+
|
62 |
+
df = DataFrame([[False, False]])
|
63 |
+
msg_err = "The truth value of a DataFrame is ambiguous"
|
64 |
+
with pytest.raises(ValueError, match=msg_err):
|
65 |
+
bool(df)
|
66 |
+
|
67 |
+
with tm.assert_produces_warning(FutureWarning, match=msg_warn):
|
68 |
+
with pytest.raises(ValueError, match=msg_err):
|
69 |
+
df.bool()
|
70 |
+
|
71 |
+
def test_metadata_propagation_indiv_groupby(self):
|
72 |
+
# groupby
|
73 |
+
df = DataFrame(
|
74 |
+
{
|
75 |
+
"A": ["foo", "bar", "foo", "bar", "foo", "bar", "foo", "foo"],
|
76 |
+
"B": ["one", "one", "two", "three", "two", "two", "one", "three"],
|
77 |
+
"C": np.random.default_rng(2).standard_normal(8),
|
78 |
+
"D": np.random.default_rng(2).standard_normal(8),
|
79 |
+
}
|
80 |
+
)
|
81 |
+
result = df.groupby("A").sum()
|
82 |
+
tm.assert_metadata_equivalent(df, result)
|
83 |
+
|
84 |
+
def test_metadata_propagation_indiv_resample(self):
|
85 |
+
# resample
|
86 |
+
df = DataFrame(
|
87 |
+
np.random.default_rng(2).standard_normal((1000, 2)),
|
88 |
+
index=date_range("20130101", periods=1000, freq="s"),
|
89 |
+
)
|
90 |
+
result = df.resample("1min")
|
91 |
+
tm.assert_metadata_equivalent(df, result)
|
92 |
+
|
93 |
+
def test_metadata_propagation_indiv(self, monkeypatch):
|
94 |
+
# merging with override
|
95 |
+
# GH 6923
|
96 |
+
|
97 |
+
def finalize(self, other, method=None, **kwargs):
|
98 |
+
for name in self._metadata:
|
99 |
+
if method == "merge":
|
100 |
+
left, right = other.left, other.right
|
101 |
+
value = getattr(left, name, "") + "|" + getattr(right, name, "")
|
102 |
+
object.__setattr__(self, name, value)
|
103 |
+
elif method == "concat":
|
104 |
+
value = "+".join(
|
105 |
+
[getattr(o, name) for o in other.objs if getattr(o, name, None)]
|
106 |
+
)
|
107 |
+
object.__setattr__(self, name, value)
|
108 |
+
else:
|
109 |
+
object.__setattr__(self, name, getattr(other, name, ""))
|
110 |
+
|
111 |
+
return self
|
112 |
+
|
113 |
+
with monkeypatch.context() as m:
|
114 |
+
m.setattr(DataFrame, "_metadata", ["filename"])
|
115 |
+
m.setattr(DataFrame, "__finalize__", finalize)
|
116 |
+
|
117 |
+
df1 = DataFrame(
|
118 |
+
np.random.default_rng(2).integers(0, 4, (3, 2)), columns=["a", "b"]
|
119 |
+
)
|
120 |
+
df2 = DataFrame(
|
121 |
+
np.random.default_rng(2).integers(0, 4, (3, 2)), columns=["c", "d"]
|
122 |
+
)
|
123 |
+
DataFrame._metadata = ["filename"]
|
124 |
+
df1.filename = "fname1.csv"
|
125 |
+
df2.filename = "fname2.csv"
|
126 |
+
|
127 |
+
result = df1.merge(df2, left_on=["a"], right_on=["c"], how="inner")
|
128 |
+
assert result.filename == "fname1.csv|fname2.csv"
|
129 |
+
|
130 |
+
# concat
|
131 |
+
# GH#6927
|
132 |
+
df1 = DataFrame(
|
133 |
+
np.random.default_rng(2).integers(0, 4, (3, 2)), columns=list("ab")
|
134 |
+
)
|
135 |
+
df1.filename = "foo"
|
136 |
+
|
137 |
+
result = pd.concat([df1, df1])
|
138 |
+
assert result.filename == "foo+foo"
|
139 |
+
|
140 |
+
def test_set_attribute(self):
|
141 |
+
# Test for consistent setattr behavior when an attribute and a column
|
142 |
+
# have the same name (Issue #8994)
|
143 |
+
df = DataFrame({"x": [1, 2, 3]})
|
144 |
+
|
145 |
+
df.y = 2
|
146 |
+
df["y"] = [2, 4, 6]
|
147 |
+
df.y = 5
|
148 |
+
|
149 |
+
assert df.y == 5
|
150 |
+
tm.assert_series_equal(df["y"], Series([2, 4, 6], name="y"))
|
151 |
+
|
152 |
+
def test_deepcopy_empty(self):
|
153 |
+
# This test covers empty frame copying with non-empty column sets
|
154 |
+
# as reported in issue GH15370
|
155 |
+
empty_frame = DataFrame(data=[], index=[], columns=["A"])
|
156 |
+
empty_frame_copy = deepcopy(empty_frame)
|
157 |
+
|
158 |
+
tm.assert_frame_equal(empty_frame_copy, empty_frame)
|
159 |
+
|
160 |
+
|
161 |
+
# formerly in Generic but only test DataFrame
|
162 |
+
class TestDataFrame2:
|
163 |
+
@pytest.mark.parametrize("value", [1, "True", [1, 2, 3], 5.0])
|
164 |
+
def test_validate_bool_args(self, value):
|
165 |
+
df = DataFrame({"a": [1, 2, 3], "b": [4, 5, 6]})
|
166 |
+
|
167 |
+
msg = 'For argument "inplace" expected type bool, received type'
|
168 |
+
with pytest.raises(ValueError, match=msg):
|
169 |
+
df.copy().rename_axis(mapper={"a": "x", "b": "y"}, axis=1, inplace=value)
|
170 |
+
|
171 |
+
with pytest.raises(ValueError, match=msg):
|
172 |
+
df.copy().drop("a", axis=1, inplace=value)
|
173 |
+
|
174 |
+
with pytest.raises(ValueError, match=msg):
|
175 |
+
df.copy().fillna(value=0, inplace=value)
|
176 |
+
|
177 |
+
with pytest.raises(ValueError, match=msg):
|
178 |
+
df.copy().replace(to_replace=1, value=7, inplace=value)
|
179 |
+
|
180 |
+
with pytest.raises(ValueError, match=msg):
|
181 |
+
df.copy().interpolate(inplace=value)
|
182 |
+
|
183 |
+
with pytest.raises(ValueError, match=msg):
|
184 |
+
df.copy()._where(cond=df.a > 2, inplace=value)
|
185 |
+
|
186 |
+
with pytest.raises(ValueError, match=msg):
|
187 |
+
df.copy().mask(cond=df.a > 2, inplace=value)
|
188 |
+
|
189 |
+
def test_unexpected_keyword(self):
|
190 |
+
# GH8597
|
191 |
+
df = DataFrame(
|
192 |
+
np.random.default_rng(2).standard_normal((5, 2)), columns=["jim", "joe"]
|
193 |
+
)
|
194 |
+
ca = pd.Categorical([0, 0, 2, 2, 3, np.nan])
|
195 |
+
ts = df["joe"].copy()
|
196 |
+
ts[2] = np.nan
|
197 |
+
|
198 |
+
msg = "unexpected keyword"
|
199 |
+
with pytest.raises(TypeError, match=msg):
|
200 |
+
df.drop("joe", axis=1, in_place=True)
|
201 |
+
|
202 |
+
with pytest.raises(TypeError, match=msg):
|
203 |
+
df.reindex([1, 0], inplace=True)
|
204 |
+
|
205 |
+
with pytest.raises(TypeError, match=msg):
|
206 |
+
ca.fillna(0, inplace=True)
|
207 |
+
|
208 |
+
with pytest.raises(TypeError, match=msg):
|
209 |
+
ts.fillna(0, in_place=True)
|
llmeval-env/lib/python3.10/site-packages/pandas/tests/generic/test_generic.py
ADDED
@@ -0,0 +1,504 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from copy import (
|
2 |
+
copy,
|
3 |
+
deepcopy,
|
4 |
+
)
|
5 |
+
|
6 |
+
import numpy as np
|
7 |
+
import pytest
|
8 |
+
|
9 |
+
from pandas.core.dtypes.common import is_scalar
|
10 |
+
|
11 |
+
from pandas import (
|
12 |
+
DataFrame,
|
13 |
+
Index,
|
14 |
+
Series,
|
15 |
+
date_range,
|
16 |
+
)
|
17 |
+
import pandas._testing as tm
|
18 |
+
|
19 |
+
# ----------------------------------------------------------------------
|
20 |
+
# Generic types test cases
|
21 |
+
|
22 |
+
|
23 |
+
def construct(box, shape, value=None, dtype=None, **kwargs):
|
24 |
+
"""
|
25 |
+
construct an object for the given shape
|
26 |
+
if value is specified use that if its a scalar
|
27 |
+
if value is an array, repeat it as needed
|
28 |
+
"""
|
29 |
+
if isinstance(shape, int):
|
30 |
+
shape = tuple([shape] * box._AXIS_LEN)
|
31 |
+
if value is not None:
|
32 |
+
if is_scalar(value):
|
33 |
+
if value == "empty":
|
34 |
+
arr = None
|
35 |
+
dtype = np.float64
|
36 |
+
|
37 |
+
# remove the info axis
|
38 |
+
kwargs.pop(box._info_axis_name, None)
|
39 |
+
else:
|
40 |
+
arr = np.empty(shape, dtype=dtype)
|
41 |
+
arr.fill(value)
|
42 |
+
else:
|
43 |
+
fshape = np.prod(shape)
|
44 |
+
arr = value.ravel()
|
45 |
+
new_shape = fshape / arr.shape[0]
|
46 |
+
if fshape % arr.shape[0] != 0:
|
47 |
+
raise Exception("invalid value passed in construct")
|
48 |
+
|
49 |
+
arr = np.repeat(arr, new_shape).reshape(shape)
|
50 |
+
else:
|
51 |
+
arr = np.random.default_rng(2).standard_normal(shape)
|
52 |
+
return box(arr, dtype=dtype, **kwargs)
|
53 |
+
|
54 |
+
|
55 |
+
class TestGeneric:
|
56 |
+
@pytest.mark.parametrize(
|
57 |
+
"func",
|
58 |
+
[
|
59 |
+
str.lower,
|
60 |
+
{x: x.lower() for x in list("ABCD")},
|
61 |
+
Series({x: x.lower() for x in list("ABCD")}),
|
62 |
+
],
|
63 |
+
)
|
64 |
+
def test_rename(self, frame_or_series, func):
|
65 |
+
# single axis
|
66 |
+
idx = list("ABCD")
|
67 |
+
|
68 |
+
for axis in frame_or_series._AXIS_ORDERS:
|
69 |
+
kwargs = {axis: idx}
|
70 |
+
obj = construct(frame_or_series, 4, **kwargs)
|
71 |
+
|
72 |
+
# rename a single axis
|
73 |
+
result = obj.rename(**{axis: func})
|
74 |
+
expected = obj.copy()
|
75 |
+
setattr(expected, axis, list("abcd"))
|
76 |
+
tm.assert_equal(result, expected)
|
77 |
+
|
78 |
+
def test_get_numeric_data(self, frame_or_series):
|
79 |
+
n = 4
|
80 |
+
kwargs = {
|
81 |
+
frame_or_series._get_axis_name(i): list(range(n))
|
82 |
+
for i in range(frame_or_series._AXIS_LEN)
|
83 |
+
}
|
84 |
+
|
85 |
+
# get the numeric data
|
86 |
+
o = construct(frame_or_series, n, **kwargs)
|
87 |
+
result = o._get_numeric_data()
|
88 |
+
tm.assert_equal(result, o)
|
89 |
+
|
90 |
+
# non-inclusion
|
91 |
+
result = o._get_bool_data()
|
92 |
+
expected = construct(frame_or_series, n, value="empty", **kwargs)
|
93 |
+
if isinstance(o, DataFrame):
|
94 |
+
# preserve columns dtype
|
95 |
+
expected.columns = o.columns[:0]
|
96 |
+
# https://github.com/pandas-dev/pandas/issues/50862
|
97 |
+
tm.assert_equal(result.reset_index(drop=True), expected)
|
98 |
+
|
99 |
+
# get the bool data
|
100 |
+
arr = np.array([True, True, False, True])
|
101 |
+
o = construct(frame_or_series, n, value=arr, **kwargs)
|
102 |
+
result = o._get_numeric_data()
|
103 |
+
tm.assert_equal(result, o)
|
104 |
+
|
105 |
+
def test_nonzero(self, frame_or_series):
|
106 |
+
# GH 4633
|
107 |
+
# look at the boolean/nonzero behavior for objects
|
108 |
+
obj = construct(frame_or_series, shape=4)
|
109 |
+
msg = f"The truth value of a {frame_or_series.__name__} is ambiguous"
|
110 |
+
with pytest.raises(ValueError, match=msg):
|
111 |
+
bool(obj == 0)
|
112 |
+
with pytest.raises(ValueError, match=msg):
|
113 |
+
bool(obj == 1)
|
114 |
+
with pytest.raises(ValueError, match=msg):
|
115 |
+
bool(obj)
|
116 |
+
|
117 |
+
obj = construct(frame_or_series, shape=4, value=1)
|
118 |
+
with pytest.raises(ValueError, match=msg):
|
119 |
+
bool(obj == 0)
|
120 |
+
with pytest.raises(ValueError, match=msg):
|
121 |
+
bool(obj == 1)
|
122 |
+
with pytest.raises(ValueError, match=msg):
|
123 |
+
bool(obj)
|
124 |
+
|
125 |
+
obj = construct(frame_or_series, shape=4, value=np.nan)
|
126 |
+
with pytest.raises(ValueError, match=msg):
|
127 |
+
bool(obj == 0)
|
128 |
+
with pytest.raises(ValueError, match=msg):
|
129 |
+
bool(obj == 1)
|
130 |
+
with pytest.raises(ValueError, match=msg):
|
131 |
+
bool(obj)
|
132 |
+
|
133 |
+
# empty
|
134 |
+
obj = construct(frame_or_series, shape=0)
|
135 |
+
with pytest.raises(ValueError, match=msg):
|
136 |
+
bool(obj)
|
137 |
+
|
138 |
+
# invalid behaviors
|
139 |
+
|
140 |
+
obj1 = construct(frame_or_series, shape=4, value=1)
|
141 |
+
obj2 = construct(frame_or_series, shape=4, value=1)
|
142 |
+
|
143 |
+
with pytest.raises(ValueError, match=msg):
|
144 |
+
if obj1:
|
145 |
+
pass
|
146 |
+
|
147 |
+
with pytest.raises(ValueError, match=msg):
|
148 |
+
obj1 and obj2
|
149 |
+
with pytest.raises(ValueError, match=msg):
|
150 |
+
obj1 or obj2
|
151 |
+
with pytest.raises(ValueError, match=msg):
|
152 |
+
not obj1
|
153 |
+
|
154 |
+
def test_frame_or_series_compound_dtypes(self, frame_or_series):
|
155 |
+
# see gh-5191
|
156 |
+
# Compound dtypes should raise NotImplementedError.
|
157 |
+
|
158 |
+
def f(dtype):
|
159 |
+
return construct(frame_or_series, shape=3, value=1, dtype=dtype)
|
160 |
+
|
161 |
+
msg = (
|
162 |
+
"compound dtypes are not implemented "
|
163 |
+
f"in the {frame_or_series.__name__} constructor"
|
164 |
+
)
|
165 |
+
|
166 |
+
with pytest.raises(NotImplementedError, match=msg):
|
167 |
+
f([("A", "datetime64[h]"), ("B", "str"), ("C", "int32")])
|
168 |
+
|
169 |
+
# these work (though results may be unexpected)
|
170 |
+
f("int64")
|
171 |
+
f("float64")
|
172 |
+
f("M8[ns]")
|
173 |
+
|
174 |
+
def test_metadata_propagation(self, frame_or_series):
|
175 |
+
# check that the metadata matches up on the resulting ops
|
176 |
+
|
177 |
+
o = construct(frame_or_series, shape=3)
|
178 |
+
o.name = "foo"
|
179 |
+
o2 = construct(frame_or_series, shape=3)
|
180 |
+
o2.name = "bar"
|
181 |
+
|
182 |
+
# ----------
|
183 |
+
# preserving
|
184 |
+
# ----------
|
185 |
+
|
186 |
+
# simple ops with scalars
|
187 |
+
for op in ["__add__", "__sub__", "__truediv__", "__mul__"]:
|
188 |
+
result = getattr(o, op)(1)
|
189 |
+
tm.assert_metadata_equivalent(o, result)
|
190 |
+
|
191 |
+
# ops with like
|
192 |
+
for op in ["__add__", "__sub__", "__truediv__", "__mul__"]:
|
193 |
+
result = getattr(o, op)(o)
|
194 |
+
tm.assert_metadata_equivalent(o, result)
|
195 |
+
|
196 |
+
# simple boolean
|
197 |
+
for op in ["__eq__", "__le__", "__ge__"]:
|
198 |
+
v1 = getattr(o, op)(o)
|
199 |
+
tm.assert_metadata_equivalent(o, v1)
|
200 |
+
tm.assert_metadata_equivalent(o, v1 & v1)
|
201 |
+
tm.assert_metadata_equivalent(o, v1 | v1)
|
202 |
+
|
203 |
+
# combine_first
|
204 |
+
result = o.combine_first(o2)
|
205 |
+
tm.assert_metadata_equivalent(o, result)
|
206 |
+
|
207 |
+
# ---------------------------
|
208 |
+
# non-preserving (by default)
|
209 |
+
# ---------------------------
|
210 |
+
|
211 |
+
# add non-like
|
212 |
+
result = o + o2
|
213 |
+
tm.assert_metadata_equivalent(result)
|
214 |
+
|
215 |
+
# simple boolean
|
216 |
+
for op in ["__eq__", "__le__", "__ge__"]:
|
217 |
+
# this is a name matching op
|
218 |
+
v1 = getattr(o, op)(o)
|
219 |
+
v2 = getattr(o, op)(o2)
|
220 |
+
tm.assert_metadata_equivalent(v2)
|
221 |
+
tm.assert_metadata_equivalent(v1 & v2)
|
222 |
+
tm.assert_metadata_equivalent(v1 | v2)
|
223 |
+
|
224 |
+
def test_size_compat(self, frame_or_series):
|
225 |
+
# GH8846
|
226 |
+
# size property should be defined
|
227 |
+
|
228 |
+
o = construct(frame_or_series, shape=10)
|
229 |
+
assert o.size == np.prod(o.shape)
|
230 |
+
assert o.size == 10 ** len(o.axes)
|
231 |
+
|
232 |
+
def test_split_compat(self, frame_or_series):
|
233 |
+
# xref GH8846
|
234 |
+
o = construct(frame_or_series, shape=10)
|
235 |
+
with tm.assert_produces_warning(
|
236 |
+
FutureWarning, match=".swapaxes' is deprecated", check_stacklevel=False
|
237 |
+
):
|
238 |
+
assert len(np.array_split(o, 5)) == 5
|
239 |
+
assert len(np.array_split(o, 2)) == 2
|
240 |
+
|
241 |
+
# See gh-12301
|
242 |
+
def test_stat_unexpected_keyword(self, frame_or_series):
|
243 |
+
obj = construct(frame_or_series, 5)
|
244 |
+
starwars = "Star Wars"
|
245 |
+
errmsg = "unexpected keyword"
|
246 |
+
|
247 |
+
with pytest.raises(TypeError, match=errmsg):
|
248 |
+
obj.max(epic=starwars) # stat_function
|
249 |
+
with pytest.raises(TypeError, match=errmsg):
|
250 |
+
obj.var(epic=starwars) # stat_function_ddof
|
251 |
+
with pytest.raises(TypeError, match=errmsg):
|
252 |
+
obj.sum(epic=starwars) # cum_function
|
253 |
+
with pytest.raises(TypeError, match=errmsg):
|
254 |
+
obj.any(epic=starwars) # logical_function
|
255 |
+
|
256 |
+
@pytest.mark.parametrize("func", ["sum", "cumsum", "any", "var"])
|
257 |
+
def test_api_compat(self, func, frame_or_series):
|
258 |
+
# GH 12021
|
259 |
+
# compat for __name__, __qualname__
|
260 |
+
|
261 |
+
obj = construct(frame_or_series, 5)
|
262 |
+
f = getattr(obj, func)
|
263 |
+
assert f.__name__ == func
|
264 |
+
assert f.__qualname__.endswith(func)
|
265 |
+
|
266 |
+
def test_stat_non_defaults_args(self, frame_or_series):
|
267 |
+
obj = construct(frame_or_series, 5)
|
268 |
+
out = np.array([0])
|
269 |
+
errmsg = "the 'out' parameter is not supported"
|
270 |
+
|
271 |
+
with pytest.raises(ValueError, match=errmsg):
|
272 |
+
obj.max(out=out) # stat_function
|
273 |
+
with pytest.raises(ValueError, match=errmsg):
|
274 |
+
obj.var(out=out) # stat_function_ddof
|
275 |
+
with pytest.raises(ValueError, match=errmsg):
|
276 |
+
obj.sum(out=out) # cum_function
|
277 |
+
with pytest.raises(ValueError, match=errmsg):
|
278 |
+
obj.any(out=out) # logical_function
|
279 |
+
|
280 |
+
def test_truncate_out_of_bounds(self, frame_or_series):
|
281 |
+
# GH11382
|
282 |
+
|
283 |
+
# small
|
284 |
+
shape = [2000] + ([1] * (frame_or_series._AXIS_LEN - 1))
|
285 |
+
small = construct(frame_or_series, shape, dtype="int8", value=1)
|
286 |
+
tm.assert_equal(small.truncate(), small)
|
287 |
+
tm.assert_equal(small.truncate(before=0, after=3e3), small)
|
288 |
+
tm.assert_equal(small.truncate(before=-1, after=2e3), small)
|
289 |
+
|
290 |
+
# big
|
291 |
+
shape = [2_000_000] + ([1] * (frame_or_series._AXIS_LEN - 1))
|
292 |
+
big = construct(frame_or_series, shape, dtype="int8", value=1)
|
293 |
+
tm.assert_equal(big.truncate(), big)
|
294 |
+
tm.assert_equal(big.truncate(before=0, after=3e6), big)
|
295 |
+
tm.assert_equal(big.truncate(before=-1, after=2e6), big)
|
296 |
+
|
297 |
+
@pytest.mark.parametrize(
|
298 |
+
"func",
|
299 |
+
[copy, deepcopy, lambda x: x.copy(deep=False), lambda x: x.copy(deep=True)],
|
300 |
+
)
|
301 |
+
@pytest.mark.parametrize("shape", [0, 1, 2])
|
302 |
+
def test_copy_and_deepcopy(self, frame_or_series, shape, func):
|
303 |
+
# GH 15444
|
304 |
+
obj = construct(frame_or_series, shape)
|
305 |
+
obj_copy = func(obj)
|
306 |
+
assert obj_copy is not obj
|
307 |
+
tm.assert_equal(obj_copy, obj)
|
308 |
+
|
309 |
+
def test_data_deprecated(self, frame_or_series):
|
310 |
+
obj = frame_or_series()
|
311 |
+
msg = "(Series|DataFrame)._data is deprecated"
|
312 |
+
with tm.assert_produces_warning(DeprecationWarning, match=msg):
|
313 |
+
mgr = obj._data
|
314 |
+
assert mgr is obj._mgr
|
315 |
+
|
316 |
+
|
317 |
+
class TestNDFrame:
|
318 |
+
# tests that don't fit elsewhere
|
319 |
+
|
320 |
+
@pytest.mark.parametrize(
|
321 |
+
"ser",
|
322 |
+
[
|
323 |
+
Series(range(10), dtype=np.float64),
|
324 |
+
Series([str(i) for i in range(10)], dtype=object),
|
325 |
+
],
|
326 |
+
)
|
327 |
+
def test_squeeze_series_noop(self, ser):
|
328 |
+
# noop
|
329 |
+
tm.assert_series_equal(ser.squeeze(), ser)
|
330 |
+
|
331 |
+
def test_squeeze_frame_noop(self):
|
332 |
+
# noop
|
333 |
+
df = DataFrame(np.eye(2))
|
334 |
+
tm.assert_frame_equal(df.squeeze(), df)
|
335 |
+
|
336 |
+
def test_squeeze_frame_reindex(self):
|
337 |
+
# squeezing
|
338 |
+
df = DataFrame(
|
339 |
+
np.random.default_rng(2).standard_normal((10, 4)),
|
340 |
+
columns=Index(list("ABCD"), dtype=object),
|
341 |
+
index=date_range("2000-01-01", periods=10, freq="B"),
|
342 |
+
).reindex(columns=["A"])
|
343 |
+
tm.assert_series_equal(df.squeeze(), df["A"])
|
344 |
+
|
345 |
+
def test_squeeze_0_len_dim(self):
|
346 |
+
# don't fail with 0 length dimensions GH11229 & GH8999
|
347 |
+
empty_series = Series([], name="five", dtype=np.float64)
|
348 |
+
empty_frame = DataFrame([empty_series])
|
349 |
+
tm.assert_series_equal(empty_series, empty_series.squeeze())
|
350 |
+
tm.assert_series_equal(empty_series, empty_frame.squeeze())
|
351 |
+
|
352 |
+
def test_squeeze_axis(self):
|
353 |
+
# axis argument
|
354 |
+
df = DataFrame(
|
355 |
+
np.random.default_rng(2).standard_normal((1, 4)),
|
356 |
+
columns=Index(list("ABCD"), dtype=object),
|
357 |
+
index=date_range("2000-01-01", periods=1, freq="B"),
|
358 |
+
).iloc[:, :1]
|
359 |
+
assert df.shape == (1, 1)
|
360 |
+
tm.assert_series_equal(df.squeeze(axis=0), df.iloc[0])
|
361 |
+
tm.assert_series_equal(df.squeeze(axis="index"), df.iloc[0])
|
362 |
+
tm.assert_series_equal(df.squeeze(axis=1), df.iloc[:, 0])
|
363 |
+
tm.assert_series_equal(df.squeeze(axis="columns"), df.iloc[:, 0])
|
364 |
+
assert df.squeeze() == df.iloc[0, 0]
|
365 |
+
msg = "No axis named 2 for object type DataFrame"
|
366 |
+
with pytest.raises(ValueError, match=msg):
|
367 |
+
df.squeeze(axis=2)
|
368 |
+
msg = "No axis named x for object type DataFrame"
|
369 |
+
with pytest.raises(ValueError, match=msg):
|
370 |
+
df.squeeze(axis="x")
|
371 |
+
|
372 |
+
def test_squeeze_axis_len_3(self):
|
373 |
+
df = DataFrame(
|
374 |
+
np.random.default_rng(2).standard_normal((3, 4)),
|
375 |
+
columns=Index(list("ABCD"), dtype=object),
|
376 |
+
index=date_range("2000-01-01", periods=3, freq="B"),
|
377 |
+
)
|
378 |
+
tm.assert_frame_equal(df.squeeze(axis=0), df)
|
379 |
+
|
380 |
+
def test_numpy_squeeze(self):
|
381 |
+
s = Series(range(2), dtype=np.float64)
|
382 |
+
tm.assert_series_equal(np.squeeze(s), s)
|
383 |
+
|
384 |
+
df = DataFrame(
|
385 |
+
np.random.default_rng(2).standard_normal((10, 4)),
|
386 |
+
columns=Index(list("ABCD"), dtype=object),
|
387 |
+
index=date_range("2000-01-01", periods=10, freq="B"),
|
388 |
+
).reindex(columns=["A"])
|
389 |
+
tm.assert_series_equal(np.squeeze(df), df["A"])
|
390 |
+
|
391 |
+
@pytest.mark.parametrize(
|
392 |
+
"ser",
|
393 |
+
[
|
394 |
+
Series(range(10), dtype=np.float64),
|
395 |
+
Series([str(i) for i in range(10)], dtype=object),
|
396 |
+
],
|
397 |
+
)
|
398 |
+
def test_transpose_series(self, ser):
|
399 |
+
# calls implementation in pandas/core/base.py
|
400 |
+
tm.assert_series_equal(ser.transpose(), ser)
|
401 |
+
|
402 |
+
def test_transpose_frame(self):
|
403 |
+
df = DataFrame(
|
404 |
+
np.random.default_rng(2).standard_normal((10, 4)),
|
405 |
+
columns=Index(list("ABCD"), dtype=object),
|
406 |
+
index=date_range("2000-01-01", periods=10, freq="B"),
|
407 |
+
)
|
408 |
+
tm.assert_frame_equal(df.transpose().transpose(), df)
|
409 |
+
|
410 |
+
def test_numpy_transpose(self, frame_or_series):
|
411 |
+
obj = DataFrame(
|
412 |
+
np.random.default_rng(2).standard_normal((10, 4)),
|
413 |
+
columns=Index(list("ABCD"), dtype=object),
|
414 |
+
index=date_range("2000-01-01", periods=10, freq="B"),
|
415 |
+
)
|
416 |
+
obj = tm.get_obj(obj, frame_or_series)
|
417 |
+
|
418 |
+
if frame_or_series is Series:
|
419 |
+
# 1D -> np.transpose is no-op
|
420 |
+
tm.assert_series_equal(np.transpose(obj), obj)
|
421 |
+
|
422 |
+
# round-trip preserved
|
423 |
+
tm.assert_equal(np.transpose(np.transpose(obj)), obj)
|
424 |
+
|
425 |
+
msg = "the 'axes' parameter is not supported"
|
426 |
+
with pytest.raises(ValueError, match=msg):
|
427 |
+
np.transpose(obj, axes=1)
|
428 |
+
|
429 |
+
@pytest.mark.parametrize(
|
430 |
+
"ser",
|
431 |
+
[
|
432 |
+
Series(range(10), dtype=np.float64),
|
433 |
+
Series([str(i) for i in range(10)], dtype=object),
|
434 |
+
],
|
435 |
+
)
|
436 |
+
def test_take_series(self, ser):
|
437 |
+
indices = [1, 5, -2, 6, 3, -1]
|
438 |
+
out = ser.take(indices)
|
439 |
+
expected = Series(
|
440 |
+
data=ser.values.take(indices),
|
441 |
+
index=ser.index.take(indices),
|
442 |
+
dtype=ser.dtype,
|
443 |
+
)
|
444 |
+
tm.assert_series_equal(out, expected)
|
445 |
+
|
446 |
+
def test_take_frame(self):
|
447 |
+
indices = [1, 5, -2, 6, 3, -1]
|
448 |
+
df = DataFrame(
|
449 |
+
np.random.default_rng(2).standard_normal((10, 4)),
|
450 |
+
columns=Index(list("ABCD"), dtype=object),
|
451 |
+
index=date_range("2000-01-01", periods=10, freq="B"),
|
452 |
+
)
|
453 |
+
out = df.take(indices)
|
454 |
+
expected = DataFrame(
|
455 |
+
data=df.values.take(indices, axis=0),
|
456 |
+
index=df.index.take(indices),
|
457 |
+
columns=df.columns,
|
458 |
+
)
|
459 |
+
tm.assert_frame_equal(out, expected)
|
460 |
+
|
461 |
+
def test_take_invalid_kwargs(self, frame_or_series):
|
462 |
+
indices = [-3, 2, 0, 1]
|
463 |
+
|
464 |
+
obj = DataFrame(range(5))
|
465 |
+
obj = tm.get_obj(obj, frame_or_series)
|
466 |
+
|
467 |
+
msg = r"take\(\) got an unexpected keyword argument 'foo'"
|
468 |
+
with pytest.raises(TypeError, match=msg):
|
469 |
+
obj.take(indices, foo=2)
|
470 |
+
|
471 |
+
msg = "the 'out' parameter is not supported"
|
472 |
+
with pytest.raises(ValueError, match=msg):
|
473 |
+
obj.take(indices, out=indices)
|
474 |
+
|
475 |
+
msg = "the 'mode' parameter is not supported"
|
476 |
+
with pytest.raises(ValueError, match=msg):
|
477 |
+
obj.take(indices, mode="clip")
|
478 |
+
|
479 |
+
def test_axis_classmethods(self, frame_or_series):
|
480 |
+
box = frame_or_series
|
481 |
+
obj = box(dtype=object)
|
482 |
+
values = box._AXIS_TO_AXIS_NUMBER.keys()
|
483 |
+
for v in values:
|
484 |
+
assert obj._get_axis_number(v) == box._get_axis_number(v)
|
485 |
+
assert obj._get_axis_name(v) == box._get_axis_name(v)
|
486 |
+
assert obj._get_block_manager_axis(v) == box._get_block_manager_axis(v)
|
487 |
+
|
488 |
+
def test_flags_identity(self, frame_or_series):
|
489 |
+
obj = Series([1, 2])
|
490 |
+
if frame_or_series is DataFrame:
|
491 |
+
obj = obj.to_frame()
|
492 |
+
|
493 |
+
assert obj.flags is obj.flags
|
494 |
+
obj2 = obj.copy()
|
495 |
+
assert obj2.flags is not obj.flags
|
496 |
+
|
497 |
+
def test_bool_dep(self) -> None:
|
498 |
+
# GH-51749
|
499 |
+
msg_warn = (
|
500 |
+
"DataFrame.bool is now deprecated and will be removed "
|
501 |
+
"in future version of pandas"
|
502 |
+
)
|
503 |
+
with tm.assert_produces_warning(FutureWarning, match=msg_warn):
|
504 |
+
DataFrame({"col": [False]}).bool()
|
llmeval-env/lib/python3.10/site-packages/pandas/tests/generic/test_label_or_level_utils.py
ADDED
@@ -0,0 +1,336 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import pytest
|
2 |
+
|
3 |
+
from pandas.core.dtypes.missing import array_equivalent
|
4 |
+
|
5 |
+
import pandas as pd
|
6 |
+
|
7 |
+
|
8 |
+
# Fixtures
|
9 |
+
# ========
|
10 |
+
@pytest.fixture
|
11 |
+
def df():
|
12 |
+
"""DataFrame with columns 'L1', 'L2', and 'L3'"""
|
13 |
+
return pd.DataFrame({"L1": [1, 2, 3], "L2": [11, 12, 13], "L3": ["A", "B", "C"]})
|
14 |
+
|
15 |
+
|
16 |
+
@pytest.fixture(params=[[], ["L1"], ["L1", "L2"], ["L1", "L2", "L3"]])
|
17 |
+
def df_levels(request, df):
|
18 |
+
"""DataFrame with columns or index levels 'L1', 'L2', and 'L3'"""
|
19 |
+
levels = request.param
|
20 |
+
|
21 |
+
if levels:
|
22 |
+
df = df.set_index(levels)
|
23 |
+
|
24 |
+
return df
|
25 |
+
|
26 |
+
|
27 |
+
@pytest.fixture
|
28 |
+
def df_ambig(df):
|
29 |
+
"""DataFrame with levels 'L1' and 'L2' and labels 'L1' and 'L3'"""
|
30 |
+
df = df.set_index(["L1", "L2"])
|
31 |
+
|
32 |
+
df["L1"] = df["L3"]
|
33 |
+
|
34 |
+
return df
|
35 |
+
|
36 |
+
|
37 |
+
@pytest.fixture
|
38 |
+
def df_duplabels(df):
|
39 |
+
"""DataFrame with level 'L1' and labels 'L2', 'L3', and 'L2'"""
|
40 |
+
df = df.set_index(["L1"])
|
41 |
+
df = pd.concat([df, df["L2"]], axis=1)
|
42 |
+
|
43 |
+
return df
|
44 |
+
|
45 |
+
|
46 |
+
# Test is label/level reference
|
47 |
+
# =============================
|
48 |
+
def get_labels_levels(df_levels):
|
49 |
+
expected_labels = list(df_levels.columns)
|
50 |
+
expected_levels = [name for name in df_levels.index.names if name is not None]
|
51 |
+
return expected_labels, expected_levels
|
52 |
+
|
53 |
+
|
54 |
+
def assert_label_reference(frame, labels, axis):
|
55 |
+
for label in labels:
|
56 |
+
assert frame._is_label_reference(label, axis=axis)
|
57 |
+
assert not frame._is_level_reference(label, axis=axis)
|
58 |
+
assert frame._is_label_or_level_reference(label, axis=axis)
|
59 |
+
|
60 |
+
|
61 |
+
def assert_level_reference(frame, levels, axis):
|
62 |
+
for level in levels:
|
63 |
+
assert frame._is_level_reference(level, axis=axis)
|
64 |
+
assert not frame._is_label_reference(level, axis=axis)
|
65 |
+
assert frame._is_label_or_level_reference(level, axis=axis)
|
66 |
+
|
67 |
+
|
68 |
+
# DataFrame
|
69 |
+
# ---------
|
70 |
+
def test_is_level_or_label_reference_df_simple(df_levels, axis):
|
71 |
+
axis = df_levels._get_axis_number(axis)
|
72 |
+
# Compute expected labels and levels
|
73 |
+
expected_labels, expected_levels = get_labels_levels(df_levels)
|
74 |
+
|
75 |
+
# Transpose frame if axis == 1
|
76 |
+
if axis == 1:
|
77 |
+
df_levels = df_levels.T
|
78 |
+
|
79 |
+
# Perform checks
|
80 |
+
assert_level_reference(df_levels, expected_levels, axis=axis)
|
81 |
+
assert_label_reference(df_levels, expected_labels, axis=axis)
|
82 |
+
|
83 |
+
|
84 |
+
def test_is_level_reference_df_ambig(df_ambig, axis):
|
85 |
+
axis = df_ambig._get_axis_number(axis)
|
86 |
+
|
87 |
+
# Transpose frame if axis == 1
|
88 |
+
if axis == 1:
|
89 |
+
df_ambig = df_ambig.T
|
90 |
+
|
91 |
+
# df has both an on-axis level and off-axis label named L1
|
92 |
+
# Therefore L1 should reference the label, not the level
|
93 |
+
assert_label_reference(df_ambig, ["L1"], axis=axis)
|
94 |
+
|
95 |
+
# df has an on-axis level named L2 and it is not ambiguous
|
96 |
+
# Therefore L2 is an level reference
|
97 |
+
assert_level_reference(df_ambig, ["L2"], axis=axis)
|
98 |
+
|
99 |
+
# df has a column named L3 and it not an level reference
|
100 |
+
assert_label_reference(df_ambig, ["L3"], axis=axis)
|
101 |
+
|
102 |
+
|
103 |
+
# Series
|
104 |
+
# ------
|
105 |
+
def test_is_level_reference_series_simple_axis0(df):
|
106 |
+
# Make series with L1 as index
|
107 |
+
s = df.set_index("L1").L2
|
108 |
+
assert_level_reference(s, ["L1"], axis=0)
|
109 |
+
assert not s._is_level_reference("L2")
|
110 |
+
|
111 |
+
# Make series with L1 and L2 as index
|
112 |
+
s = df.set_index(["L1", "L2"]).L3
|
113 |
+
assert_level_reference(s, ["L1", "L2"], axis=0)
|
114 |
+
assert not s._is_level_reference("L3")
|
115 |
+
|
116 |
+
|
117 |
+
def test_is_level_reference_series_axis1_error(df):
|
118 |
+
# Make series with L1 as index
|
119 |
+
s = df.set_index("L1").L2
|
120 |
+
|
121 |
+
with pytest.raises(ValueError, match="No axis named 1"):
|
122 |
+
s._is_level_reference("L1", axis=1)
|
123 |
+
|
124 |
+
|
125 |
+
# Test _check_label_or_level_ambiguity_df
|
126 |
+
# =======================================
|
127 |
+
|
128 |
+
|
129 |
+
# DataFrame
|
130 |
+
# ---------
|
131 |
+
def test_check_label_or_level_ambiguity_df(df_ambig, axis):
|
132 |
+
axis = df_ambig._get_axis_number(axis)
|
133 |
+
# Transpose frame if axis == 1
|
134 |
+
if axis == 1:
|
135 |
+
df_ambig = df_ambig.T
|
136 |
+
msg = "'L1' is both a column level and an index label"
|
137 |
+
|
138 |
+
else:
|
139 |
+
msg = "'L1' is both an index level and a column label"
|
140 |
+
# df_ambig has both an on-axis level and off-axis label named L1
|
141 |
+
# Therefore, L1 is ambiguous.
|
142 |
+
with pytest.raises(ValueError, match=msg):
|
143 |
+
df_ambig._check_label_or_level_ambiguity("L1", axis=axis)
|
144 |
+
|
145 |
+
# df_ambig has an on-axis level named L2,, and it is not ambiguous.
|
146 |
+
df_ambig._check_label_or_level_ambiguity("L2", axis=axis)
|
147 |
+
|
148 |
+
# df_ambig has an off-axis label named L3, and it is not ambiguous
|
149 |
+
assert not df_ambig._check_label_or_level_ambiguity("L3", axis=axis)
|
150 |
+
|
151 |
+
|
152 |
+
# Series
|
153 |
+
# ------
|
154 |
+
def test_check_label_or_level_ambiguity_series(df):
|
155 |
+
# A series has no columns and therefore references are never ambiguous
|
156 |
+
|
157 |
+
# Make series with L1 as index
|
158 |
+
s = df.set_index("L1").L2
|
159 |
+
s._check_label_or_level_ambiguity("L1", axis=0)
|
160 |
+
s._check_label_or_level_ambiguity("L2", axis=0)
|
161 |
+
|
162 |
+
# Make series with L1 and L2 as index
|
163 |
+
s = df.set_index(["L1", "L2"]).L3
|
164 |
+
s._check_label_or_level_ambiguity("L1", axis=0)
|
165 |
+
s._check_label_or_level_ambiguity("L2", axis=0)
|
166 |
+
s._check_label_or_level_ambiguity("L3", axis=0)
|
167 |
+
|
168 |
+
|
169 |
+
def test_check_label_or_level_ambiguity_series_axis1_error(df):
|
170 |
+
# Make series with L1 as index
|
171 |
+
s = df.set_index("L1").L2
|
172 |
+
|
173 |
+
with pytest.raises(ValueError, match="No axis named 1"):
|
174 |
+
s._check_label_or_level_ambiguity("L1", axis=1)
|
175 |
+
|
176 |
+
|
177 |
+
# Test _get_label_or_level_values
|
178 |
+
# ===============================
|
179 |
+
def assert_label_values(frame, labels, axis):
|
180 |
+
axis = frame._get_axis_number(axis)
|
181 |
+
for label in labels:
|
182 |
+
if axis == 0:
|
183 |
+
expected = frame[label]._values
|
184 |
+
else:
|
185 |
+
expected = frame.loc[label]._values
|
186 |
+
|
187 |
+
result = frame._get_label_or_level_values(label, axis=axis)
|
188 |
+
assert array_equivalent(expected, result)
|
189 |
+
|
190 |
+
|
191 |
+
def assert_level_values(frame, levels, axis):
|
192 |
+
axis = frame._get_axis_number(axis)
|
193 |
+
for level in levels:
|
194 |
+
if axis == 0:
|
195 |
+
expected = frame.index.get_level_values(level=level)._values
|
196 |
+
else:
|
197 |
+
expected = frame.columns.get_level_values(level=level)._values
|
198 |
+
|
199 |
+
result = frame._get_label_or_level_values(level, axis=axis)
|
200 |
+
assert array_equivalent(expected, result)
|
201 |
+
|
202 |
+
|
203 |
+
# DataFrame
|
204 |
+
# ---------
|
205 |
+
def test_get_label_or_level_values_df_simple(df_levels, axis):
|
206 |
+
# Compute expected labels and levels
|
207 |
+
expected_labels, expected_levels = get_labels_levels(df_levels)
|
208 |
+
|
209 |
+
axis = df_levels._get_axis_number(axis)
|
210 |
+
# Transpose frame if axis == 1
|
211 |
+
if axis == 1:
|
212 |
+
df_levels = df_levels.T
|
213 |
+
|
214 |
+
# Perform checks
|
215 |
+
assert_label_values(df_levels, expected_labels, axis=axis)
|
216 |
+
assert_level_values(df_levels, expected_levels, axis=axis)
|
217 |
+
|
218 |
+
|
219 |
+
def test_get_label_or_level_values_df_ambig(df_ambig, axis):
|
220 |
+
axis = df_ambig._get_axis_number(axis)
|
221 |
+
# Transpose frame if axis == 1
|
222 |
+
if axis == 1:
|
223 |
+
df_ambig = df_ambig.T
|
224 |
+
|
225 |
+
# df has an on-axis level named L2, and it is not ambiguous.
|
226 |
+
assert_level_values(df_ambig, ["L2"], axis=axis)
|
227 |
+
|
228 |
+
# df has an off-axis label named L3, and it is not ambiguous.
|
229 |
+
assert_label_values(df_ambig, ["L3"], axis=axis)
|
230 |
+
|
231 |
+
|
232 |
+
def test_get_label_or_level_values_df_duplabels(df_duplabels, axis):
|
233 |
+
axis = df_duplabels._get_axis_number(axis)
|
234 |
+
# Transpose frame if axis == 1
|
235 |
+
if axis == 1:
|
236 |
+
df_duplabels = df_duplabels.T
|
237 |
+
|
238 |
+
# df has unambiguous level 'L1'
|
239 |
+
assert_level_values(df_duplabels, ["L1"], axis=axis)
|
240 |
+
|
241 |
+
# df has unique label 'L3'
|
242 |
+
assert_label_values(df_duplabels, ["L3"], axis=axis)
|
243 |
+
|
244 |
+
# df has duplicate labels 'L2'
|
245 |
+
if axis == 0:
|
246 |
+
expected_msg = "The column label 'L2' is not unique"
|
247 |
+
else:
|
248 |
+
expected_msg = "The index label 'L2' is not unique"
|
249 |
+
|
250 |
+
with pytest.raises(ValueError, match=expected_msg):
|
251 |
+
assert_label_values(df_duplabels, ["L2"], axis=axis)
|
252 |
+
|
253 |
+
|
254 |
+
# Series
|
255 |
+
# ------
|
256 |
+
def test_get_label_or_level_values_series_axis0(df):
|
257 |
+
# Make series with L1 as index
|
258 |
+
s = df.set_index("L1").L2
|
259 |
+
assert_level_values(s, ["L1"], axis=0)
|
260 |
+
|
261 |
+
# Make series with L1 and L2 as index
|
262 |
+
s = df.set_index(["L1", "L2"]).L3
|
263 |
+
assert_level_values(s, ["L1", "L2"], axis=0)
|
264 |
+
|
265 |
+
|
266 |
+
def test_get_label_or_level_values_series_axis1_error(df):
|
267 |
+
# Make series with L1 as index
|
268 |
+
s = df.set_index("L1").L2
|
269 |
+
|
270 |
+
with pytest.raises(ValueError, match="No axis named 1"):
|
271 |
+
s._get_label_or_level_values("L1", axis=1)
|
272 |
+
|
273 |
+
|
274 |
+
# Test _drop_labels_or_levels
|
275 |
+
# ===========================
|
276 |
+
def assert_labels_dropped(frame, labels, axis):
|
277 |
+
axis = frame._get_axis_number(axis)
|
278 |
+
for label in labels:
|
279 |
+
df_dropped = frame._drop_labels_or_levels(label, axis=axis)
|
280 |
+
|
281 |
+
if axis == 0:
|
282 |
+
assert label in frame.columns
|
283 |
+
assert label not in df_dropped.columns
|
284 |
+
else:
|
285 |
+
assert label in frame.index
|
286 |
+
assert label not in df_dropped.index
|
287 |
+
|
288 |
+
|
289 |
+
def assert_levels_dropped(frame, levels, axis):
|
290 |
+
axis = frame._get_axis_number(axis)
|
291 |
+
for level in levels:
|
292 |
+
df_dropped = frame._drop_labels_or_levels(level, axis=axis)
|
293 |
+
|
294 |
+
if axis == 0:
|
295 |
+
assert level in frame.index.names
|
296 |
+
assert level not in df_dropped.index.names
|
297 |
+
else:
|
298 |
+
assert level in frame.columns.names
|
299 |
+
assert level not in df_dropped.columns.names
|
300 |
+
|
301 |
+
|
302 |
+
# DataFrame
|
303 |
+
# ---------
|
304 |
+
def test_drop_labels_or_levels_df(df_levels, axis):
|
305 |
+
# Compute expected labels and levels
|
306 |
+
expected_labels, expected_levels = get_labels_levels(df_levels)
|
307 |
+
|
308 |
+
axis = df_levels._get_axis_number(axis)
|
309 |
+
# Transpose frame if axis == 1
|
310 |
+
if axis == 1:
|
311 |
+
df_levels = df_levels.T
|
312 |
+
|
313 |
+
# Perform checks
|
314 |
+
assert_labels_dropped(df_levels, expected_labels, axis=axis)
|
315 |
+
assert_levels_dropped(df_levels, expected_levels, axis=axis)
|
316 |
+
|
317 |
+
with pytest.raises(ValueError, match="not valid labels or levels"):
|
318 |
+
df_levels._drop_labels_or_levels("L4", axis=axis)
|
319 |
+
|
320 |
+
|
321 |
+
# Series
|
322 |
+
# ------
|
323 |
+
def test_drop_labels_or_levels_series(df):
|
324 |
+
# Make series with L1 as index
|
325 |
+
s = df.set_index("L1").L2
|
326 |
+
assert_levels_dropped(s, ["L1"], axis=0)
|
327 |
+
|
328 |
+
with pytest.raises(ValueError, match="not valid labels or levels"):
|
329 |
+
s._drop_labels_or_levels("L4", axis=0)
|
330 |
+
|
331 |
+
# Make series with L1 and L2 as index
|
332 |
+
s = df.set_index(["L1", "L2"]).L3
|
333 |
+
assert_levels_dropped(s, ["L1", "L2"], axis=0)
|
334 |
+
|
335 |
+
with pytest.raises(ValueError, match="not valid labels or levels"):
|
336 |
+
s._drop_labels_or_levels("L4", axis=0)
|
llmeval-env/lib/python3.10/site-packages/pandas/tests/generic/test_to_xarray.py
ADDED
@@ -0,0 +1,130 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import numpy as np
|
2 |
+
import pytest
|
3 |
+
|
4 |
+
from pandas import (
|
5 |
+
Categorical,
|
6 |
+
DataFrame,
|
7 |
+
MultiIndex,
|
8 |
+
Series,
|
9 |
+
date_range,
|
10 |
+
)
|
11 |
+
import pandas._testing as tm
|
12 |
+
|
13 |
+
pytest.importorskip("xarray")
|
14 |
+
|
15 |
+
|
16 |
+
class TestDataFrameToXArray:
|
17 |
+
@pytest.fixture
|
18 |
+
def df(self):
|
19 |
+
return DataFrame(
|
20 |
+
{
|
21 |
+
"a": list("abcd"),
|
22 |
+
"b": list(range(1, 5)),
|
23 |
+
"c": np.arange(3, 7).astype("u1"),
|
24 |
+
"d": np.arange(4.0, 8.0, dtype="float64"),
|
25 |
+
"e": [True, False, True, False],
|
26 |
+
"f": Categorical(list("abcd")),
|
27 |
+
"g": date_range("20130101", periods=4),
|
28 |
+
"h": date_range("20130101", periods=4, tz="US/Eastern"),
|
29 |
+
}
|
30 |
+
)
|
31 |
+
|
32 |
+
def test_to_xarray_index_types(self, index_flat, df, using_infer_string):
|
33 |
+
index = index_flat
|
34 |
+
# MultiIndex is tested in test_to_xarray_with_multiindex
|
35 |
+
if len(index) == 0:
|
36 |
+
pytest.skip("Test doesn't make sense for empty index")
|
37 |
+
|
38 |
+
from xarray import Dataset
|
39 |
+
|
40 |
+
df.index = index[:4]
|
41 |
+
df.index.name = "foo"
|
42 |
+
df.columns.name = "bar"
|
43 |
+
result = df.to_xarray()
|
44 |
+
assert result.sizes["foo"] == 4
|
45 |
+
assert len(result.coords) == 1
|
46 |
+
assert len(result.data_vars) == 8
|
47 |
+
tm.assert_almost_equal(list(result.coords.keys()), ["foo"])
|
48 |
+
assert isinstance(result, Dataset)
|
49 |
+
|
50 |
+
# idempotency
|
51 |
+
# datetimes w/tz are preserved
|
52 |
+
# column names are lost
|
53 |
+
expected = df.copy()
|
54 |
+
expected["f"] = expected["f"].astype(
|
55 |
+
object if not using_infer_string else "string[pyarrow_numpy]"
|
56 |
+
)
|
57 |
+
expected.columns.name = None
|
58 |
+
tm.assert_frame_equal(result.to_dataframe(), expected)
|
59 |
+
|
60 |
+
def test_to_xarray_empty(self, df):
|
61 |
+
from xarray import Dataset
|
62 |
+
|
63 |
+
df.index.name = "foo"
|
64 |
+
result = df[0:0].to_xarray()
|
65 |
+
assert result.sizes["foo"] == 0
|
66 |
+
assert isinstance(result, Dataset)
|
67 |
+
|
68 |
+
def test_to_xarray_with_multiindex(self, df, using_infer_string):
|
69 |
+
from xarray import Dataset
|
70 |
+
|
71 |
+
# MultiIndex
|
72 |
+
df.index = MultiIndex.from_product([["a"], range(4)], names=["one", "two"])
|
73 |
+
result = df.to_xarray()
|
74 |
+
assert result.sizes["one"] == 1
|
75 |
+
assert result.sizes["two"] == 4
|
76 |
+
assert len(result.coords) == 2
|
77 |
+
assert len(result.data_vars) == 8
|
78 |
+
tm.assert_almost_equal(list(result.coords.keys()), ["one", "two"])
|
79 |
+
assert isinstance(result, Dataset)
|
80 |
+
|
81 |
+
result = result.to_dataframe()
|
82 |
+
expected = df.copy()
|
83 |
+
expected["f"] = expected["f"].astype(
|
84 |
+
object if not using_infer_string else "string[pyarrow_numpy]"
|
85 |
+
)
|
86 |
+
expected.columns.name = None
|
87 |
+
tm.assert_frame_equal(result, expected)
|
88 |
+
|
89 |
+
|
90 |
+
class TestSeriesToXArray:
|
91 |
+
def test_to_xarray_index_types(self, index_flat):
|
92 |
+
index = index_flat
|
93 |
+
# MultiIndex is tested in test_to_xarray_with_multiindex
|
94 |
+
|
95 |
+
from xarray import DataArray
|
96 |
+
|
97 |
+
ser = Series(range(len(index)), index=index, dtype="int64")
|
98 |
+
ser.index.name = "foo"
|
99 |
+
result = ser.to_xarray()
|
100 |
+
repr(result)
|
101 |
+
assert len(result) == len(index)
|
102 |
+
assert len(result.coords) == 1
|
103 |
+
tm.assert_almost_equal(list(result.coords.keys()), ["foo"])
|
104 |
+
assert isinstance(result, DataArray)
|
105 |
+
|
106 |
+
# idempotency
|
107 |
+
tm.assert_series_equal(result.to_series(), ser)
|
108 |
+
|
109 |
+
def test_to_xarray_empty(self):
|
110 |
+
from xarray import DataArray
|
111 |
+
|
112 |
+
ser = Series([], dtype=object)
|
113 |
+
ser.index.name = "foo"
|
114 |
+
result = ser.to_xarray()
|
115 |
+
assert len(result) == 0
|
116 |
+
assert len(result.coords) == 1
|
117 |
+
tm.assert_almost_equal(list(result.coords.keys()), ["foo"])
|
118 |
+
assert isinstance(result, DataArray)
|
119 |
+
|
120 |
+
def test_to_xarray_with_multiindex(self):
|
121 |
+
from xarray import DataArray
|
122 |
+
|
123 |
+
mi = MultiIndex.from_product([["a", "b"], range(3)], names=["one", "two"])
|
124 |
+
ser = Series(range(6), dtype="int64", index=mi)
|
125 |
+
result = ser.to_xarray()
|
126 |
+
assert len(result) == 2
|
127 |
+
tm.assert_almost_equal(list(result.coords.keys()), ["one", "two"])
|
128 |
+
assert isinstance(result, DataArray)
|
129 |
+
res = result.to_series()
|
130 |
+
tm.assert_series_equal(res, ser)
|
llmeval-env/lib/python3.10/site-packages/pandas/tests/indexes/datetimelike_/__pycache__/test_equals.cpython-310.pyc
ADDED
Binary file (6.08 kB). View file
|
|
llmeval-env/lib/python3.10/site-packages/pandas/tests/indexes/datetimelike_/__pycache__/test_indexing.cpython-310.pyc
ADDED
Binary file (1.4 kB). View file
|
|
llmeval-env/lib/python3.10/site-packages/pandas/tests/indexes/datetimelike_/__pycache__/test_is_monotonic.cpython-310.pyc
ADDED
Binary file (1.03 kB). View file
|
|
llmeval-env/lib/python3.10/site-packages/pandas/tests/indexes/datetimelike_/__pycache__/test_sort_values.cpython-310.pyc
ADDED
Binary file (7.63 kB). View file
|
|
llmeval-env/lib/python3.10/site-packages/pandas/tests/indexes/datetimelike_/__pycache__/test_value_counts.cpython-310.pyc
ADDED
Binary file (3.3 kB). View file
|
|
llmeval-env/lib/python3.10/site-packages/pandas/tests/indexes/datetimes/__init__.py
ADDED
File without changes
|
llmeval-env/lib/python3.10/site-packages/pandas/tests/indexes/datetimes/test_arithmetic.py
ADDED
@@ -0,0 +1,56 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Arithmetic tests specific to DatetimeIndex are generally about `freq`
|
2 |
+
# rentention or inference. Other arithmetic tests belong in
|
3 |
+
# tests/arithmetic/test_datetime64.py
|
4 |
+
import pytest
|
5 |
+
|
6 |
+
from pandas import (
|
7 |
+
Timedelta,
|
8 |
+
TimedeltaIndex,
|
9 |
+
Timestamp,
|
10 |
+
date_range,
|
11 |
+
timedelta_range,
|
12 |
+
)
|
13 |
+
import pandas._testing as tm
|
14 |
+
|
15 |
+
|
16 |
+
class TestDatetimeIndexArithmetic:
|
17 |
+
def test_add_timedelta_preserves_freq(self):
|
18 |
+
# GH#37295 should hold for any DTI with freq=None or Tick freq
|
19 |
+
tz = "Canada/Eastern"
|
20 |
+
dti = date_range(
|
21 |
+
start=Timestamp("2019-03-26 00:00:00-0400", tz=tz),
|
22 |
+
end=Timestamp("2020-10-17 00:00:00-0400", tz=tz),
|
23 |
+
freq="D",
|
24 |
+
)
|
25 |
+
result = dti + Timedelta(days=1)
|
26 |
+
assert result.freq == dti.freq
|
27 |
+
|
28 |
+
def test_sub_datetime_preserves_freq(self, tz_naive_fixture):
|
29 |
+
# GH#48818
|
30 |
+
dti = date_range("2016-01-01", periods=12, tz=tz_naive_fixture)
|
31 |
+
|
32 |
+
res = dti - dti[0]
|
33 |
+
expected = timedelta_range("0 Days", "11 Days")
|
34 |
+
tm.assert_index_equal(res, expected)
|
35 |
+
assert res.freq == expected.freq
|
36 |
+
|
37 |
+
@pytest.mark.xfail(
|
38 |
+
reason="The inherited freq is incorrect bc dti.freq is incorrect "
|
39 |
+
"https://github.com/pandas-dev/pandas/pull/48818/files#r982793461"
|
40 |
+
)
|
41 |
+
def test_sub_datetime_preserves_freq_across_dst(self):
|
42 |
+
# GH#48818
|
43 |
+
ts = Timestamp("2016-03-11", tz="US/Pacific")
|
44 |
+
dti = date_range(ts, periods=4)
|
45 |
+
|
46 |
+
res = dti - dti[0]
|
47 |
+
expected = TimedeltaIndex(
|
48 |
+
[
|
49 |
+
Timedelta(days=0),
|
50 |
+
Timedelta(days=1),
|
51 |
+
Timedelta(days=2),
|
52 |
+
Timedelta(days=2, hours=23),
|
53 |
+
]
|
54 |
+
)
|
55 |
+
tm.assert_index_equal(res, expected)
|
56 |
+
assert res.freq == expected.freq
|
llmeval-env/lib/python3.10/site-packages/pandas/tests/indexes/datetimes/test_iter.py
ADDED
@@ -0,0 +1,76 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import dateutil.tz
|
2 |
+
import numpy as np
|
3 |
+
import pytest
|
4 |
+
|
5 |
+
from pandas import (
|
6 |
+
DatetimeIndex,
|
7 |
+
date_range,
|
8 |
+
to_datetime,
|
9 |
+
)
|
10 |
+
from pandas.core.arrays import datetimes
|
11 |
+
|
12 |
+
|
13 |
+
class TestDatetimeIndexIteration:
|
14 |
+
@pytest.mark.parametrize(
|
15 |
+
"tz", [None, "UTC", "US/Central", dateutil.tz.tzoffset(None, -28800)]
|
16 |
+
)
|
17 |
+
def test_iteration_preserves_nanoseconds(self, tz):
|
18 |
+
# GH#19603
|
19 |
+
index = DatetimeIndex(
|
20 |
+
["2018-02-08 15:00:00.168456358", "2018-02-08 15:00:00.168456359"], tz=tz
|
21 |
+
)
|
22 |
+
for i, ts in enumerate(index):
|
23 |
+
assert ts == index[i] # pylint: disable=unnecessary-list-index-lookup
|
24 |
+
|
25 |
+
def test_iter_readonly(self):
|
26 |
+
# GH#28055 ints_to_pydatetime with readonly array
|
27 |
+
arr = np.array([np.datetime64("2012-02-15T12:00:00.000000000")])
|
28 |
+
arr.setflags(write=False)
|
29 |
+
dti = to_datetime(arr)
|
30 |
+
list(dti)
|
31 |
+
|
32 |
+
def test_iteration_preserves_tz(self):
|
33 |
+
# see GH#8890
|
34 |
+
index = date_range("2012-01-01", periods=3, freq="h", tz="US/Eastern")
|
35 |
+
|
36 |
+
for i, ts in enumerate(index):
|
37 |
+
result = ts
|
38 |
+
expected = index[i] # pylint: disable=unnecessary-list-index-lookup
|
39 |
+
assert result == expected
|
40 |
+
|
41 |
+
def test_iteration_preserves_tz2(self):
|
42 |
+
index = date_range(
|
43 |
+
"2012-01-01", periods=3, freq="h", tz=dateutil.tz.tzoffset(None, -28800)
|
44 |
+
)
|
45 |
+
|
46 |
+
for i, ts in enumerate(index):
|
47 |
+
result = ts
|
48 |
+
expected = index[i] # pylint: disable=unnecessary-list-index-lookup
|
49 |
+
assert result._repr_base == expected._repr_base
|
50 |
+
assert result == expected
|
51 |
+
|
52 |
+
def test_iteration_preserves_tz3(self):
|
53 |
+
# GH#9100
|
54 |
+
index = DatetimeIndex(
|
55 |
+
["2014-12-01 03:32:39.987000-08:00", "2014-12-01 04:12:34.987000-08:00"]
|
56 |
+
)
|
57 |
+
for i, ts in enumerate(index):
|
58 |
+
result = ts
|
59 |
+
expected = index[i] # pylint: disable=unnecessary-list-index-lookup
|
60 |
+
assert result._repr_base == expected._repr_base
|
61 |
+
assert result == expected
|
62 |
+
|
63 |
+
@pytest.mark.parametrize("offset", [-5, -1, 0, 1])
|
64 |
+
def test_iteration_over_chunksize(self, offset, monkeypatch):
|
65 |
+
# GH#21012
|
66 |
+
chunksize = 5
|
67 |
+
index = date_range(
|
68 |
+
"2000-01-01 00:00:00", periods=chunksize - offset, freq="min"
|
69 |
+
)
|
70 |
+
num = 0
|
71 |
+
with monkeypatch.context() as m:
|
72 |
+
m.setattr(datetimes, "_ITER_CHUNKSIZE", chunksize)
|
73 |
+
for stamp in index:
|
74 |
+
assert index[num] == stamp
|
75 |
+
num += 1
|
76 |
+
assert num == len(index)
|
llmeval-env/lib/python3.10/site-packages/pandas/tests/indexes/datetimes/test_join.py
ADDED
@@ -0,0 +1,149 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from datetime import (
|
2 |
+
datetime,
|
3 |
+
timezone,
|
4 |
+
)
|
5 |
+
|
6 |
+
import numpy as np
|
7 |
+
import pytest
|
8 |
+
|
9 |
+
from pandas import (
|
10 |
+
DataFrame,
|
11 |
+
DatetimeIndex,
|
12 |
+
Index,
|
13 |
+
Timestamp,
|
14 |
+
date_range,
|
15 |
+
period_range,
|
16 |
+
to_datetime,
|
17 |
+
)
|
18 |
+
import pandas._testing as tm
|
19 |
+
|
20 |
+
from pandas.tseries.offsets import (
|
21 |
+
BDay,
|
22 |
+
BMonthEnd,
|
23 |
+
)
|
24 |
+
|
25 |
+
|
26 |
+
class TestJoin:
|
27 |
+
def test_does_not_convert_mixed_integer(self):
|
28 |
+
df = DataFrame(np.ones((3, 2)), columns=date_range("2020-01-01", periods=2))
|
29 |
+
cols = df.columns.join(df.index, how="outer")
|
30 |
+
joined = cols.join(df.columns)
|
31 |
+
assert cols.dtype == np.dtype("O")
|
32 |
+
assert cols.dtype == joined.dtype
|
33 |
+
tm.assert_numpy_array_equal(cols.values, joined.values)
|
34 |
+
|
35 |
+
def test_join_self(self, join_type):
|
36 |
+
index = date_range("1/1/2000", periods=10)
|
37 |
+
joined = index.join(index, how=join_type)
|
38 |
+
assert index is joined
|
39 |
+
|
40 |
+
def test_join_with_period_index(self, join_type):
|
41 |
+
df = DataFrame(
|
42 |
+
np.ones((10, 2)),
|
43 |
+
index=date_range("2020-01-01", periods=10),
|
44 |
+
columns=period_range("2020-01-01", periods=2),
|
45 |
+
)
|
46 |
+
s = df.iloc[:5, 0]
|
47 |
+
|
48 |
+
expected = df.columns.astype("O").join(s.index, how=join_type)
|
49 |
+
result = df.columns.join(s.index, how=join_type)
|
50 |
+
tm.assert_index_equal(expected, result)
|
51 |
+
|
52 |
+
def test_join_object_index(self):
|
53 |
+
rng = date_range("1/1/2000", periods=10)
|
54 |
+
idx = Index(["a", "b", "c", "d"])
|
55 |
+
|
56 |
+
result = rng.join(idx, how="outer")
|
57 |
+
assert isinstance(result[0], Timestamp)
|
58 |
+
|
59 |
+
def test_join_utc_convert(self, join_type):
|
60 |
+
rng = date_range("1/1/2011", periods=100, freq="h", tz="utc")
|
61 |
+
|
62 |
+
left = rng.tz_convert("US/Eastern")
|
63 |
+
right = rng.tz_convert("Europe/Berlin")
|
64 |
+
|
65 |
+
result = left.join(left[:-5], how=join_type)
|
66 |
+
assert isinstance(result, DatetimeIndex)
|
67 |
+
assert result.tz == left.tz
|
68 |
+
|
69 |
+
result = left.join(right[:-5], how=join_type)
|
70 |
+
assert isinstance(result, DatetimeIndex)
|
71 |
+
assert result.tz is timezone.utc
|
72 |
+
|
73 |
+
def test_datetimeindex_union_join_empty(self, sort):
|
74 |
+
dti = date_range(start="1/1/2001", end="2/1/2001", freq="D")
|
75 |
+
empty = Index([])
|
76 |
+
|
77 |
+
result = dti.union(empty, sort=sort)
|
78 |
+
expected = dti.astype("O")
|
79 |
+
tm.assert_index_equal(result, expected)
|
80 |
+
|
81 |
+
result = dti.join(empty)
|
82 |
+
assert isinstance(result, DatetimeIndex)
|
83 |
+
tm.assert_index_equal(result, dti)
|
84 |
+
|
85 |
+
def test_join_nonunique(self):
|
86 |
+
idx1 = to_datetime(["2012-11-06 16:00:11.477563", "2012-11-06 16:00:11.477563"])
|
87 |
+
idx2 = to_datetime(["2012-11-06 15:11:09.006507", "2012-11-06 15:11:09.006507"])
|
88 |
+
rs = idx1.join(idx2, how="outer")
|
89 |
+
assert rs.is_monotonic_increasing
|
90 |
+
|
91 |
+
@pytest.mark.parametrize("freq", ["B", "C"])
|
92 |
+
def test_outer_join(self, freq):
|
93 |
+
# should just behave as union
|
94 |
+
start, end = datetime(2009, 1, 1), datetime(2010, 1, 1)
|
95 |
+
rng = date_range(start=start, end=end, freq=freq)
|
96 |
+
|
97 |
+
# overlapping
|
98 |
+
left = rng[:10]
|
99 |
+
right = rng[5:10]
|
100 |
+
|
101 |
+
the_join = left.join(right, how="outer")
|
102 |
+
assert isinstance(the_join, DatetimeIndex)
|
103 |
+
|
104 |
+
# non-overlapping, gap in middle
|
105 |
+
left = rng[:5]
|
106 |
+
right = rng[10:]
|
107 |
+
|
108 |
+
the_join = left.join(right, how="outer")
|
109 |
+
assert isinstance(the_join, DatetimeIndex)
|
110 |
+
assert the_join.freq is None
|
111 |
+
|
112 |
+
# non-overlapping, no gap
|
113 |
+
left = rng[:5]
|
114 |
+
right = rng[5:10]
|
115 |
+
|
116 |
+
the_join = left.join(right, how="outer")
|
117 |
+
assert isinstance(the_join, DatetimeIndex)
|
118 |
+
|
119 |
+
# overlapping, but different offset
|
120 |
+
other = date_range(start, end, freq=BMonthEnd())
|
121 |
+
|
122 |
+
the_join = rng.join(other, how="outer")
|
123 |
+
assert isinstance(the_join, DatetimeIndex)
|
124 |
+
assert the_join.freq is None
|
125 |
+
|
126 |
+
def test_naive_aware_conflicts(self):
|
127 |
+
start, end = datetime(2009, 1, 1), datetime(2010, 1, 1)
|
128 |
+
naive = date_range(start, end, freq=BDay(), tz=None)
|
129 |
+
aware = date_range(start, end, freq=BDay(), tz="Asia/Hong_Kong")
|
130 |
+
|
131 |
+
msg = "tz-naive.*tz-aware"
|
132 |
+
with pytest.raises(TypeError, match=msg):
|
133 |
+
naive.join(aware)
|
134 |
+
|
135 |
+
with pytest.raises(TypeError, match=msg):
|
136 |
+
aware.join(naive)
|
137 |
+
|
138 |
+
@pytest.mark.parametrize("tz", [None, "US/Pacific"])
|
139 |
+
def test_join_preserves_freq(self, tz):
|
140 |
+
# GH#32157
|
141 |
+
dti = date_range("2016-01-01", periods=10, tz=tz)
|
142 |
+
result = dti[:5].join(dti[5:], how="outer")
|
143 |
+
assert result.freq == dti.freq
|
144 |
+
tm.assert_index_equal(result, dti)
|
145 |
+
|
146 |
+
result = dti[:5].join(dti[6:], how="outer")
|
147 |
+
assert result.freq is None
|
148 |
+
expected = dti.delete(5)
|
149 |
+
tm.assert_index_equal(result, expected)
|
llmeval-env/lib/python3.10/site-packages/pandas/tests/indexes/datetimes/test_ops.py
ADDED
@@ -0,0 +1,56 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from datetime import datetime
|
2 |
+
|
3 |
+
import pytest
|
4 |
+
|
5 |
+
from pandas import (
|
6 |
+
DatetimeIndex,
|
7 |
+
Index,
|
8 |
+
bdate_range,
|
9 |
+
date_range,
|
10 |
+
)
|
11 |
+
import pandas._testing as tm
|
12 |
+
|
13 |
+
|
14 |
+
class TestDatetimeIndexOps:
|
15 |
+
def test_infer_freq(self, freq_sample):
|
16 |
+
# GH 11018
|
17 |
+
idx = date_range("2011-01-01 09:00:00", freq=freq_sample, periods=10)
|
18 |
+
result = DatetimeIndex(idx.asi8, freq="infer")
|
19 |
+
tm.assert_index_equal(idx, result)
|
20 |
+
assert result.freq == freq_sample
|
21 |
+
|
22 |
+
|
23 |
+
@pytest.mark.parametrize("freq", ["B", "C"])
|
24 |
+
class TestBusinessDatetimeIndex:
|
25 |
+
@pytest.fixture
|
26 |
+
def rng(self, freq):
|
27 |
+
START, END = datetime(2009, 1, 1), datetime(2010, 1, 1)
|
28 |
+
return bdate_range(START, END, freq=freq)
|
29 |
+
|
30 |
+
def test_comparison(self, rng):
|
31 |
+
d = rng[10]
|
32 |
+
|
33 |
+
comp = rng > d
|
34 |
+
assert comp[11]
|
35 |
+
assert not comp[9]
|
36 |
+
|
37 |
+
def test_copy(self, rng):
|
38 |
+
cp = rng.copy()
|
39 |
+
tm.assert_index_equal(cp, rng)
|
40 |
+
|
41 |
+
def test_identical(self, rng):
|
42 |
+
t1 = rng.copy()
|
43 |
+
t2 = rng.copy()
|
44 |
+
assert t1.identical(t2)
|
45 |
+
|
46 |
+
# name
|
47 |
+
t1 = t1.rename("foo")
|
48 |
+
assert t1.equals(t2)
|
49 |
+
assert not t1.identical(t2)
|
50 |
+
t2 = t2.rename("foo")
|
51 |
+
assert t1.identical(t2)
|
52 |
+
|
53 |
+
# freq
|
54 |
+
t2v = Index(t2.values)
|
55 |
+
assert t1.equals(t2v)
|
56 |
+
assert not t1.identical(t2v)
|
llmeval-env/lib/python3.10/site-packages/pandas/tests/indexes/datetimes/test_pickle.py
ADDED
@@ -0,0 +1,45 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import pytest
|
2 |
+
|
3 |
+
from pandas import (
|
4 |
+
NaT,
|
5 |
+
date_range,
|
6 |
+
to_datetime,
|
7 |
+
)
|
8 |
+
import pandas._testing as tm
|
9 |
+
|
10 |
+
|
11 |
+
class TestPickle:
|
12 |
+
def test_pickle(self):
|
13 |
+
# GH#4606
|
14 |
+
idx = to_datetime(["2013-01-01", NaT, "2014-01-06"])
|
15 |
+
idx_p = tm.round_trip_pickle(idx)
|
16 |
+
assert idx_p[0] == idx[0]
|
17 |
+
assert idx_p[1] is NaT
|
18 |
+
assert idx_p[2] == idx[2]
|
19 |
+
|
20 |
+
def test_pickle_dont_infer_freq(self):
|
21 |
+
# GH#11002
|
22 |
+
# don't infer freq
|
23 |
+
idx = date_range("1750-1-1", "2050-1-1", freq="7D")
|
24 |
+
idx_p = tm.round_trip_pickle(idx)
|
25 |
+
tm.assert_index_equal(idx, idx_p)
|
26 |
+
|
27 |
+
def test_pickle_after_set_freq(self):
|
28 |
+
dti = date_range("20130101", periods=3, tz="US/Eastern", name="foo")
|
29 |
+
dti = dti._with_freq(None)
|
30 |
+
|
31 |
+
res = tm.round_trip_pickle(dti)
|
32 |
+
tm.assert_index_equal(res, dti)
|
33 |
+
|
34 |
+
def test_roundtrip_pickle_with_tz(self):
|
35 |
+
# GH#8367
|
36 |
+
# round-trip of timezone
|
37 |
+
index = date_range("20130101", periods=3, tz="US/Eastern", name="foo")
|
38 |
+
unpickled = tm.round_trip_pickle(index)
|
39 |
+
tm.assert_index_equal(index, unpickled)
|
40 |
+
|
41 |
+
@pytest.mark.parametrize("freq", ["B", "C"])
|
42 |
+
def test_pickle_unpickle(self, freq):
|
43 |
+
rng = date_range("2009-01-01", "2010-01-01", freq=freq)
|
44 |
+
unpickled = tm.round_trip_pickle(rng)
|
45 |
+
assert unpickled.freq == freq
|
llmeval-env/lib/python3.10/site-packages/pandas/tests/indexes/datetimes/test_scalar_compat.py
ADDED
@@ -0,0 +1,329 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
"""
|
2 |
+
Tests for DatetimeIndex methods behaving like their Timestamp counterparts
|
3 |
+
"""
|
4 |
+
|
5 |
+
import calendar
|
6 |
+
from datetime import (
|
7 |
+
date,
|
8 |
+
datetime,
|
9 |
+
time,
|
10 |
+
)
|
11 |
+
import locale
|
12 |
+
import unicodedata
|
13 |
+
|
14 |
+
import numpy as np
|
15 |
+
import pytest
|
16 |
+
|
17 |
+
from pandas._libs.tslibs import timezones
|
18 |
+
|
19 |
+
from pandas import (
|
20 |
+
DatetimeIndex,
|
21 |
+
Index,
|
22 |
+
NaT,
|
23 |
+
Timestamp,
|
24 |
+
date_range,
|
25 |
+
offsets,
|
26 |
+
)
|
27 |
+
import pandas._testing as tm
|
28 |
+
from pandas.core.arrays import DatetimeArray
|
29 |
+
|
30 |
+
|
31 |
+
class TestDatetimeIndexOps:
|
32 |
+
def test_dti_no_millisecond_field(self):
|
33 |
+
msg = "type object 'DatetimeIndex' has no attribute 'millisecond'"
|
34 |
+
with pytest.raises(AttributeError, match=msg):
|
35 |
+
DatetimeIndex.millisecond
|
36 |
+
|
37 |
+
msg = "'DatetimeIndex' object has no attribute 'millisecond'"
|
38 |
+
with pytest.raises(AttributeError, match=msg):
|
39 |
+
DatetimeIndex([]).millisecond
|
40 |
+
|
41 |
+
def test_dti_time(self):
|
42 |
+
rng = date_range("1/1/2000", freq="12min", periods=10)
|
43 |
+
result = Index(rng).time
|
44 |
+
expected = [t.time() for t in rng]
|
45 |
+
assert (result == expected).all()
|
46 |
+
|
47 |
+
def test_dti_date(self):
|
48 |
+
rng = date_range("1/1/2000", freq="12h", periods=10)
|
49 |
+
result = Index(rng).date
|
50 |
+
expected = [t.date() for t in rng]
|
51 |
+
assert (result == expected).all()
|
52 |
+
|
53 |
+
@pytest.mark.parametrize(
|
54 |
+
"dtype",
|
55 |
+
[None, "datetime64[ns, CET]", "datetime64[ns, EST]", "datetime64[ns, UTC]"],
|
56 |
+
)
|
57 |
+
def test_dti_date2(self, dtype):
|
58 |
+
# Regression test for GH#21230
|
59 |
+
expected = np.array([date(2018, 6, 4), NaT])
|
60 |
+
|
61 |
+
index = DatetimeIndex(["2018-06-04 10:00:00", NaT], dtype=dtype)
|
62 |
+
result = index.date
|
63 |
+
|
64 |
+
tm.assert_numpy_array_equal(result, expected)
|
65 |
+
|
66 |
+
@pytest.mark.parametrize(
|
67 |
+
"dtype",
|
68 |
+
[None, "datetime64[ns, CET]", "datetime64[ns, EST]", "datetime64[ns, UTC]"],
|
69 |
+
)
|
70 |
+
def test_dti_time2(self, dtype):
|
71 |
+
# Regression test for GH#21267
|
72 |
+
expected = np.array([time(10, 20, 30), NaT])
|
73 |
+
|
74 |
+
index = DatetimeIndex(["2018-06-04 10:20:30", NaT], dtype=dtype)
|
75 |
+
result = index.time
|
76 |
+
|
77 |
+
tm.assert_numpy_array_equal(result, expected)
|
78 |
+
|
79 |
+
def test_dti_timetz(self, tz_naive_fixture):
|
80 |
+
# GH#21358
|
81 |
+
tz = timezones.maybe_get_tz(tz_naive_fixture)
|
82 |
+
|
83 |
+
expected = np.array([time(10, 20, 30, tzinfo=tz), NaT])
|
84 |
+
|
85 |
+
index = DatetimeIndex(["2018-06-04 10:20:30", NaT], tz=tz)
|
86 |
+
result = index.timetz
|
87 |
+
|
88 |
+
tm.assert_numpy_array_equal(result, expected)
|
89 |
+
|
90 |
+
@pytest.mark.parametrize(
|
91 |
+
"field",
|
92 |
+
[
|
93 |
+
"dayofweek",
|
94 |
+
"day_of_week",
|
95 |
+
"dayofyear",
|
96 |
+
"day_of_year",
|
97 |
+
"quarter",
|
98 |
+
"days_in_month",
|
99 |
+
"is_month_start",
|
100 |
+
"is_month_end",
|
101 |
+
"is_quarter_start",
|
102 |
+
"is_quarter_end",
|
103 |
+
"is_year_start",
|
104 |
+
"is_year_end",
|
105 |
+
],
|
106 |
+
)
|
107 |
+
def test_dti_timestamp_fields(self, field):
|
108 |
+
# extra fields from DatetimeIndex like quarter and week
|
109 |
+
idx = date_range("2020-01-01", periods=10)
|
110 |
+
expected = getattr(idx, field)[-1]
|
111 |
+
|
112 |
+
result = getattr(Timestamp(idx[-1]), field)
|
113 |
+
assert result == expected
|
114 |
+
|
115 |
+
def test_dti_nanosecond(self):
|
116 |
+
dti = DatetimeIndex(np.arange(10))
|
117 |
+
expected = Index(np.arange(10, dtype=np.int32))
|
118 |
+
|
119 |
+
tm.assert_index_equal(dti.nanosecond, expected)
|
120 |
+
|
121 |
+
@pytest.mark.parametrize("prefix", ["", "dateutil/"])
|
122 |
+
def test_dti_hour_tzaware(self, prefix):
|
123 |
+
strdates = ["1/1/2012", "3/1/2012", "4/1/2012"]
|
124 |
+
rng = DatetimeIndex(strdates, tz=prefix + "US/Eastern")
|
125 |
+
assert (rng.hour == 0).all()
|
126 |
+
|
127 |
+
# a more unusual time zone, GH#1946
|
128 |
+
dr = date_range(
|
129 |
+
"2011-10-02 00:00", freq="h", periods=10, tz=prefix + "America/Atikokan"
|
130 |
+
)
|
131 |
+
|
132 |
+
expected = Index(np.arange(10, dtype=np.int32))
|
133 |
+
tm.assert_index_equal(dr.hour, expected)
|
134 |
+
|
135 |
+
# GH#12806
|
136 |
+
# error: Unsupported operand types for + ("List[None]" and "List[str]")
|
137 |
+
@pytest.mark.parametrize(
|
138 |
+
"time_locale", [None] + tm.get_locales() # type: ignore[operator]
|
139 |
+
)
|
140 |
+
def test_day_name_month_name(self, time_locale):
|
141 |
+
# Test Monday -> Sunday and January -> December, in that sequence
|
142 |
+
if time_locale is None:
|
143 |
+
# If the time_locale is None, day-name and month_name should
|
144 |
+
# return the english attributes
|
145 |
+
expected_days = [
|
146 |
+
"Monday",
|
147 |
+
"Tuesday",
|
148 |
+
"Wednesday",
|
149 |
+
"Thursday",
|
150 |
+
"Friday",
|
151 |
+
"Saturday",
|
152 |
+
"Sunday",
|
153 |
+
]
|
154 |
+
expected_months = [
|
155 |
+
"January",
|
156 |
+
"February",
|
157 |
+
"March",
|
158 |
+
"April",
|
159 |
+
"May",
|
160 |
+
"June",
|
161 |
+
"July",
|
162 |
+
"August",
|
163 |
+
"September",
|
164 |
+
"October",
|
165 |
+
"November",
|
166 |
+
"December",
|
167 |
+
]
|
168 |
+
else:
|
169 |
+
with tm.set_locale(time_locale, locale.LC_TIME):
|
170 |
+
expected_days = calendar.day_name[:]
|
171 |
+
expected_months = calendar.month_name[1:]
|
172 |
+
|
173 |
+
# GH#11128
|
174 |
+
dti = date_range(freq="D", start=datetime(1998, 1, 1), periods=365)
|
175 |
+
english_days = [
|
176 |
+
"Monday",
|
177 |
+
"Tuesday",
|
178 |
+
"Wednesday",
|
179 |
+
"Thursday",
|
180 |
+
"Friday",
|
181 |
+
"Saturday",
|
182 |
+
"Sunday",
|
183 |
+
]
|
184 |
+
for day, name, eng_name in zip(range(4, 11), expected_days, english_days):
|
185 |
+
name = name.capitalize()
|
186 |
+
assert dti.day_name(locale=time_locale)[day] == name
|
187 |
+
assert dti.day_name(locale=None)[day] == eng_name
|
188 |
+
ts = Timestamp(datetime(2016, 4, day))
|
189 |
+
assert ts.day_name(locale=time_locale) == name
|
190 |
+
dti = dti.append(DatetimeIndex([NaT]))
|
191 |
+
assert np.isnan(dti.day_name(locale=time_locale)[-1])
|
192 |
+
ts = Timestamp(NaT)
|
193 |
+
assert np.isnan(ts.day_name(locale=time_locale))
|
194 |
+
|
195 |
+
# GH#12805
|
196 |
+
dti = date_range(freq="ME", start="2012", end="2013")
|
197 |
+
result = dti.month_name(locale=time_locale)
|
198 |
+
expected = Index([month.capitalize() for month in expected_months])
|
199 |
+
|
200 |
+
# work around different normalization schemes GH#22342
|
201 |
+
result = result.str.normalize("NFD")
|
202 |
+
expected = expected.str.normalize("NFD")
|
203 |
+
|
204 |
+
tm.assert_index_equal(result, expected)
|
205 |
+
|
206 |
+
for item, expected in zip(dti, expected_months):
|
207 |
+
result = item.month_name(locale=time_locale)
|
208 |
+
expected = expected.capitalize()
|
209 |
+
|
210 |
+
result = unicodedata.normalize("NFD", result)
|
211 |
+
expected = unicodedata.normalize("NFD", result)
|
212 |
+
|
213 |
+
assert result == expected
|
214 |
+
dti = dti.append(DatetimeIndex([NaT]))
|
215 |
+
assert np.isnan(dti.month_name(locale=time_locale)[-1])
|
216 |
+
|
217 |
+
def test_dti_week(self):
|
218 |
+
# GH#6538: Check that DatetimeIndex and its TimeStamp elements
|
219 |
+
# return the same weekofyear accessor close to new year w/ tz
|
220 |
+
dates = ["2013/12/29", "2013/12/30", "2013/12/31"]
|
221 |
+
dates = DatetimeIndex(dates, tz="Europe/Brussels")
|
222 |
+
expected = [52, 1, 1]
|
223 |
+
assert dates.isocalendar().week.tolist() == expected
|
224 |
+
assert [d.weekofyear for d in dates] == expected
|
225 |
+
|
226 |
+
@pytest.mark.parametrize("tz", [None, "US/Eastern"])
|
227 |
+
def test_dti_fields(self, tz):
|
228 |
+
# GH#13303
|
229 |
+
dti = date_range(freq="D", start=datetime(1998, 1, 1), periods=365, tz=tz)
|
230 |
+
assert dti.year[0] == 1998
|
231 |
+
assert dti.month[0] == 1
|
232 |
+
assert dti.day[0] == 1
|
233 |
+
assert dti.hour[0] == 0
|
234 |
+
assert dti.minute[0] == 0
|
235 |
+
assert dti.second[0] == 0
|
236 |
+
assert dti.microsecond[0] == 0
|
237 |
+
assert dti.dayofweek[0] == 3
|
238 |
+
|
239 |
+
assert dti.dayofyear[0] == 1
|
240 |
+
assert dti.dayofyear[120] == 121
|
241 |
+
|
242 |
+
assert dti.isocalendar().week.iloc[0] == 1
|
243 |
+
assert dti.isocalendar().week.iloc[120] == 18
|
244 |
+
|
245 |
+
assert dti.quarter[0] == 1
|
246 |
+
assert dti.quarter[120] == 2
|
247 |
+
|
248 |
+
assert dti.days_in_month[0] == 31
|
249 |
+
assert dti.days_in_month[90] == 30
|
250 |
+
|
251 |
+
assert dti.is_month_start[0]
|
252 |
+
assert not dti.is_month_start[1]
|
253 |
+
assert dti.is_month_start[31]
|
254 |
+
assert dti.is_quarter_start[0]
|
255 |
+
assert dti.is_quarter_start[90]
|
256 |
+
assert dti.is_year_start[0]
|
257 |
+
assert not dti.is_year_start[364]
|
258 |
+
assert not dti.is_month_end[0]
|
259 |
+
assert dti.is_month_end[30]
|
260 |
+
assert not dti.is_month_end[31]
|
261 |
+
assert dti.is_month_end[364]
|
262 |
+
assert not dti.is_quarter_end[0]
|
263 |
+
assert not dti.is_quarter_end[30]
|
264 |
+
assert dti.is_quarter_end[89]
|
265 |
+
assert dti.is_quarter_end[364]
|
266 |
+
assert not dti.is_year_end[0]
|
267 |
+
assert dti.is_year_end[364]
|
268 |
+
|
269 |
+
assert len(dti.year) == 365
|
270 |
+
assert len(dti.month) == 365
|
271 |
+
assert len(dti.day) == 365
|
272 |
+
assert len(dti.hour) == 365
|
273 |
+
assert len(dti.minute) == 365
|
274 |
+
assert len(dti.second) == 365
|
275 |
+
assert len(dti.microsecond) == 365
|
276 |
+
assert len(dti.dayofweek) == 365
|
277 |
+
assert len(dti.dayofyear) == 365
|
278 |
+
assert len(dti.isocalendar()) == 365
|
279 |
+
assert len(dti.quarter) == 365
|
280 |
+
assert len(dti.is_month_start) == 365
|
281 |
+
assert len(dti.is_month_end) == 365
|
282 |
+
assert len(dti.is_quarter_start) == 365
|
283 |
+
assert len(dti.is_quarter_end) == 365
|
284 |
+
assert len(dti.is_year_start) == 365
|
285 |
+
assert len(dti.is_year_end) == 365
|
286 |
+
|
287 |
+
dti.name = "name"
|
288 |
+
|
289 |
+
# non boolean accessors -> return Index
|
290 |
+
for accessor in DatetimeArray._field_ops:
|
291 |
+
res = getattr(dti, accessor)
|
292 |
+
assert len(res) == 365
|
293 |
+
assert isinstance(res, Index)
|
294 |
+
assert res.name == "name"
|
295 |
+
|
296 |
+
# boolean accessors -> return array
|
297 |
+
for accessor in DatetimeArray._bool_ops:
|
298 |
+
res = getattr(dti, accessor)
|
299 |
+
assert len(res) == 365
|
300 |
+
assert isinstance(res, np.ndarray)
|
301 |
+
|
302 |
+
# test boolean indexing
|
303 |
+
res = dti[dti.is_quarter_start]
|
304 |
+
exp = dti[[0, 90, 181, 273]]
|
305 |
+
tm.assert_index_equal(res, exp)
|
306 |
+
res = dti[dti.is_leap_year]
|
307 |
+
exp = DatetimeIndex([], freq="D", tz=dti.tz, name="name").as_unit("ns")
|
308 |
+
tm.assert_index_equal(res, exp)
|
309 |
+
|
310 |
+
def test_dti_is_year_quarter_start(self):
|
311 |
+
dti = date_range(freq="BQE-FEB", start=datetime(1998, 1, 1), periods=4)
|
312 |
+
|
313 |
+
assert sum(dti.is_quarter_start) == 0
|
314 |
+
assert sum(dti.is_quarter_end) == 4
|
315 |
+
assert sum(dti.is_year_start) == 0
|
316 |
+
assert sum(dti.is_year_end) == 1
|
317 |
+
|
318 |
+
def test_dti_is_month_start(self):
|
319 |
+
dti = DatetimeIndex(["2000-01-01", "2000-01-02", "2000-01-03"])
|
320 |
+
|
321 |
+
assert dti.is_month_start[0] == 1
|
322 |
+
|
323 |
+
def test_dti_is_month_start_custom(self):
|
324 |
+
# Ensure is_start/end accessors throw ValueError for CustomBusinessDay,
|
325 |
+
bday_egypt = offsets.CustomBusinessDay(weekmask="Sun Mon Tue Wed Thu")
|
326 |
+
dti = date_range(datetime(2013, 4, 30), periods=5, freq=bday_egypt)
|
327 |
+
msg = "Custom business days is not supported by is_month_start"
|
328 |
+
with pytest.raises(ValueError, match=msg):
|
329 |
+
dti.is_month_start
|
llmeval-env/lib/python3.10/site-packages/pandas/tests/indexes/numeric/__init__.py
ADDED
File without changes
|