Add files using upload-large-folder tool
Browse filesThis view is limited to 50 files because it contains too many changes.
See raw diff
- env-llmeval/lib/python3.10/site-packages/pandas/tests/apply/__init__.py +0 -0
- env-llmeval/lib/python3.10/site-packages/pandas/tests/apply/__pycache__/common.cpython-310.pyc +0 -0
- env-llmeval/lib/python3.10/site-packages/pandas/tests/apply/__pycache__/test_frame_apply.cpython-310.pyc +0 -0
- env-llmeval/lib/python3.10/site-packages/pandas/tests/apply/__pycache__/test_numba.cpython-310.pyc +0 -0
- env-llmeval/lib/python3.10/site-packages/pandas/tests/apply/__pycache__/test_str.cpython-310.pyc +0 -0
- env-llmeval/lib/python3.10/site-packages/pandas/tests/apply/common.py +7 -0
- env-llmeval/lib/python3.10/site-packages/pandas/tests/apply/test_frame_apply.py +1733 -0
- env-llmeval/lib/python3.10/site-packages/pandas/tests/apply/test_frame_apply_relabeling.py +113 -0
- env-llmeval/lib/python3.10/site-packages/pandas/tests/apply/test_frame_transform.py +264 -0
- env-llmeval/lib/python3.10/site-packages/pandas/tests/apply/test_invalid_arg.py +361 -0
- env-llmeval/lib/python3.10/site-packages/pandas/tests/apply/test_numba.py +118 -0
- env-llmeval/lib/python3.10/site-packages/pandas/tests/apply/test_series_apply.py +701 -0
- env-llmeval/lib/python3.10/site-packages/pandas/tests/apply/test_series_apply_relabeling.py +39 -0
- env-llmeval/lib/python3.10/site-packages/pandas/tests/apply/test_series_transform.py +84 -0
- env-llmeval/lib/python3.10/site-packages/pandas/tests/apply/test_str.py +326 -0
- env-llmeval/lib/python3.10/site-packages/pandas/tests/extension/__pycache__/__init__.cpython-310.pyc +0 -0
- env-llmeval/lib/python3.10/site-packages/pandas/tests/extension/__pycache__/conftest.cpython-310.pyc +0 -0
- env-llmeval/lib/python3.10/site-packages/pandas/tests/extension/__pycache__/test_arrow.cpython-310.pyc +0 -0
- env-llmeval/lib/python3.10/site-packages/pandas/tests/extension/__pycache__/test_categorical.cpython-310.pyc +0 -0
- env-llmeval/lib/python3.10/site-packages/pandas/tests/extension/__pycache__/test_common.cpython-310.pyc +0 -0
- env-llmeval/lib/python3.10/site-packages/pandas/tests/extension/__pycache__/test_datetime.cpython-310.pyc +0 -0
- env-llmeval/lib/python3.10/site-packages/pandas/tests/extension/__pycache__/test_extension.cpython-310.pyc +0 -0
- env-llmeval/lib/python3.10/site-packages/pandas/tests/extension/__pycache__/test_interval.cpython-310.pyc +0 -0
- env-llmeval/lib/python3.10/site-packages/pandas/tests/extension/__pycache__/test_masked.cpython-310.pyc +0 -0
- env-llmeval/lib/python3.10/site-packages/pandas/tests/extension/__pycache__/test_numpy.cpython-310.pyc +0 -0
- env-llmeval/lib/python3.10/site-packages/pandas/tests/extension/__pycache__/test_period.cpython-310.pyc +0 -0
- env-llmeval/lib/python3.10/site-packages/pandas/tests/extension/__pycache__/test_sparse.cpython-310.pyc +0 -0
- env-llmeval/lib/python3.10/site-packages/pandas/tests/extension/__pycache__/test_string.cpython-310.pyc +0 -0
- env-llmeval/lib/python3.10/site-packages/pandas/tests/extension/array_with_attr/__init__.py +6 -0
- env-llmeval/lib/python3.10/site-packages/pandas/tests/extension/array_with_attr/__pycache__/__init__.cpython-310.pyc +0 -0
- env-llmeval/lib/python3.10/site-packages/pandas/tests/extension/array_with_attr/__pycache__/array.cpython-310.pyc +0 -0
- env-llmeval/lib/python3.10/site-packages/pandas/tests/extension/array_with_attr/__pycache__/test_array_with_attr.cpython-310.pyc +0 -0
- env-llmeval/lib/python3.10/site-packages/pandas/tests/extension/array_with_attr/array.py +89 -0
- env-llmeval/lib/python3.10/site-packages/pandas/tests/extension/array_with_attr/test_array_with_attr.py +33 -0
- env-llmeval/lib/python3.10/site-packages/pandas/tests/extension/base/__init__.py +131 -0
- env-llmeval/lib/python3.10/site-packages/pandas/tests/extension/base/__pycache__/__init__.cpython-310.pyc +0 -0
- env-llmeval/lib/python3.10/site-packages/pandas/tests/extension/base/__pycache__/accumulate.cpython-310.pyc +0 -0
- env-llmeval/lib/python3.10/site-packages/pandas/tests/extension/base/__pycache__/base.cpython-310.pyc +0 -0
- env-llmeval/lib/python3.10/site-packages/pandas/tests/extension/base/__pycache__/casting.cpython-310.pyc +0 -0
- env-llmeval/lib/python3.10/site-packages/pandas/tests/extension/base/__pycache__/constructors.cpython-310.pyc +0 -0
- env-llmeval/lib/python3.10/site-packages/pandas/tests/extension/base/__pycache__/getitem.cpython-310.pyc +0 -0
- env-llmeval/lib/python3.10/site-packages/pandas/tests/extension/base/__pycache__/groupby.cpython-310.pyc +0 -0
- env-llmeval/lib/python3.10/site-packages/pandas/tests/extension/base/__pycache__/io.cpython-310.pyc +0 -0
- env-llmeval/lib/python3.10/site-packages/pandas/tests/extension/base/__pycache__/methods.cpython-310.pyc +0 -0
- env-llmeval/lib/python3.10/site-packages/pandas/tests/extension/base/__pycache__/missing.cpython-310.pyc +0 -0
- env-llmeval/lib/python3.10/site-packages/pandas/tests/extension/base/__pycache__/ops.cpython-310.pyc +0 -0
- env-llmeval/lib/python3.10/site-packages/pandas/tests/extension/base/__pycache__/printing.cpython-310.pyc +0 -0
- env-llmeval/lib/python3.10/site-packages/pandas/tests/extension/base/__pycache__/reduce.cpython-310.pyc +0 -0
- env-llmeval/lib/python3.10/site-packages/pandas/tests/extension/base/__pycache__/reshaping.cpython-310.pyc +0 -0
- env-llmeval/lib/python3.10/site-packages/pandas/tests/extension/base/__pycache__/setitem.cpython-310.pyc +0 -0
env-llmeval/lib/python3.10/site-packages/pandas/tests/apply/__init__.py
ADDED
|
File without changes
|
env-llmeval/lib/python3.10/site-packages/pandas/tests/apply/__pycache__/common.cpython-310.pyc
ADDED
|
Binary file (556 Bytes). View file
|
|
|
env-llmeval/lib/python3.10/site-packages/pandas/tests/apply/__pycache__/test_frame_apply.cpython-310.pyc
ADDED
|
Binary file (57.2 kB). View file
|
|
|
env-llmeval/lib/python3.10/site-packages/pandas/tests/apply/__pycache__/test_numba.cpython-310.pyc
ADDED
|
Binary file (5.3 kB). View file
|
|
|
env-llmeval/lib/python3.10/site-packages/pandas/tests/apply/__pycache__/test_str.cpython-310.pyc
ADDED
|
Binary file (7.24 kB). View file
|
|
|
env-llmeval/lib/python3.10/site-packages/pandas/tests/apply/common.py
ADDED
|
@@ -0,0 +1,7 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from pandas.core.groupby.base import transformation_kernels
|
| 2 |
+
|
| 3 |
+
# There is no Series.cumcount or DataFrame.cumcount
|
| 4 |
+
series_transform_kernels = [
|
| 5 |
+
x for x in sorted(transformation_kernels) if x != "cumcount"
|
| 6 |
+
]
|
| 7 |
+
frame_transform_kernels = [x for x in sorted(transformation_kernels) if x != "cumcount"]
|
env-llmeval/lib/python3.10/site-packages/pandas/tests/apply/test_frame_apply.py
ADDED
|
@@ -0,0 +1,1733 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from datetime import datetime
|
| 2 |
+
import warnings
|
| 3 |
+
|
| 4 |
+
import numpy as np
|
| 5 |
+
import pytest
|
| 6 |
+
|
| 7 |
+
from pandas.core.dtypes.dtypes import CategoricalDtype
|
| 8 |
+
|
| 9 |
+
import pandas as pd
|
| 10 |
+
from pandas import (
|
| 11 |
+
DataFrame,
|
| 12 |
+
MultiIndex,
|
| 13 |
+
Series,
|
| 14 |
+
Timestamp,
|
| 15 |
+
date_range,
|
| 16 |
+
)
|
| 17 |
+
import pandas._testing as tm
|
| 18 |
+
from pandas.tests.frame.common import zip_frames
|
| 19 |
+
|
| 20 |
+
|
| 21 |
+
@pytest.fixture
|
| 22 |
+
def int_frame_const_col():
|
| 23 |
+
"""
|
| 24 |
+
Fixture for DataFrame of ints which are constant per column
|
| 25 |
+
|
| 26 |
+
Columns are ['A', 'B', 'C'], with values (per column): [1, 2, 3]
|
| 27 |
+
"""
|
| 28 |
+
df = DataFrame(
|
| 29 |
+
np.tile(np.arange(3, dtype="int64"), 6).reshape(6, -1) + 1,
|
| 30 |
+
columns=["A", "B", "C"],
|
| 31 |
+
)
|
| 32 |
+
return df
|
| 33 |
+
|
| 34 |
+
|
| 35 |
+
@pytest.fixture(params=["python", pytest.param("numba", marks=pytest.mark.single_cpu)])
|
| 36 |
+
def engine(request):
|
| 37 |
+
if request.param == "numba":
|
| 38 |
+
pytest.importorskip("numba")
|
| 39 |
+
return request.param
|
| 40 |
+
|
| 41 |
+
|
| 42 |
+
def test_apply(float_frame, engine, request):
|
| 43 |
+
if engine == "numba":
|
| 44 |
+
mark = pytest.mark.xfail(reason="numba engine not supporting numpy ufunc yet")
|
| 45 |
+
request.node.add_marker(mark)
|
| 46 |
+
with np.errstate(all="ignore"):
|
| 47 |
+
# ufunc
|
| 48 |
+
result = np.sqrt(float_frame["A"])
|
| 49 |
+
expected = float_frame.apply(np.sqrt, engine=engine)["A"]
|
| 50 |
+
tm.assert_series_equal(result, expected)
|
| 51 |
+
|
| 52 |
+
# aggregator
|
| 53 |
+
result = float_frame.apply(np.mean, engine=engine)["A"]
|
| 54 |
+
expected = np.mean(float_frame["A"])
|
| 55 |
+
assert result == expected
|
| 56 |
+
|
| 57 |
+
d = float_frame.index[0]
|
| 58 |
+
result = float_frame.apply(np.mean, axis=1, engine=engine)
|
| 59 |
+
expected = np.mean(float_frame.xs(d))
|
| 60 |
+
assert result[d] == expected
|
| 61 |
+
assert result.index is float_frame.index
|
| 62 |
+
|
| 63 |
+
|
| 64 |
+
@pytest.mark.parametrize("axis", [0, 1])
|
| 65 |
+
@pytest.mark.parametrize("raw", [True, False])
|
| 66 |
+
def test_apply_args(float_frame, axis, raw, engine, request):
|
| 67 |
+
if engine == "numba":
|
| 68 |
+
mark = pytest.mark.xfail(reason="numba engine doesn't support args")
|
| 69 |
+
request.node.add_marker(mark)
|
| 70 |
+
result = float_frame.apply(
|
| 71 |
+
lambda x, y: x + y, axis, args=(1,), raw=raw, engine=engine
|
| 72 |
+
)
|
| 73 |
+
expected = float_frame + 1
|
| 74 |
+
tm.assert_frame_equal(result, expected)
|
| 75 |
+
|
| 76 |
+
|
| 77 |
+
def test_apply_categorical_func():
|
| 78 |
+
# GH 9573
|
| 79 |
+
df = DataFrame({"c0": ["A", "A", "B", "B"], "c1": ["C", "C", "D", "D"]})
|
| 80 |
+
result = df.apply(lambda ts: ts.astype("category"))
|
| 81 |
+
|
| 82 |
+
assert result.shape == (4, 2)
|
| 83 |
+
assert isinstance(result["c0"].dtype, CategoricalDtype)
|
| 84 |
+
assert isinstance(result["c1"].dtype, CategoricalDtype)
|
| 85 |
+
|
| 86 |
+
|
| 87 |
+
def test_apply_axis1_with_ea():
|
| 88 |
+
# GH#36785
|
| 89 |
+
expected = DataFrame({"A": [Timestamp("2013-01-01", tz="UTC")]})
|
| 90 |
+
result = expected.apply(lambda x: x, axis=1)
|
| 91 |
+
tm.assert_frame_equal(result, expected)
|
| 92 |
+
|
| 93 |
+
|
| 94 |
+
@pytest.mark.parametrize(
|
| 95 |
+
"data, dtype",
|
| 96 |
+
[(1, None), (1, CategoricalDtype([1])), (Timestamp("2013-01-01", tz="UTC"), None)],
|
| 97 |
+
)
|
| 98 |
+
def test_agg_axis1_duplicate_index(data, dtype):
|
| 99 |
+
# GH 42380
|
| 100 |
+
expected = DataFrame([[data], [data]], index=["a", "a"], dtype=dtype)
|
| 101 |
+
result = expected.agg(lambda x: x, axis=1)
|
| 102 |
+
tm.assert_frame_equal(result, expected)
|
| 103 |
+
|
| 104 |
+
|
| 105 |
+
def test_apply_mixed_datetimelike():
|
| 106 |
+
# mixed datetimelike
|
| 107 |
+
# GH 7778
|
| 108 |
+
expected = DataFrame(
|
| 109 |
+
{
|
| 110 |
+
"A": date_range("20130101", periods=3),
|
| 111 |
+
"B": pd.to_timedelta(np.arange(3), unit="s"),
|
| 112 |
+
}
|
| 113 |
+
)
|
| 114 |
+
result = expected.apply(lambda x: x, axis=1)
|
| 115 |
+
tm.assert_frame_equal(result, expected)
|
| 116 |
+
|
| 117 |
+
|
| 118 |
+
@pytest.mark.parametrize("func", [np.sqrt, np.mean])
|
| 119 |
+
def test_apply_empty(func, engine):
|
| 120 |
+
# empty
|
| 121 |
+
empty_frame = DataFrame()
|
| 122 |
+
|
| 123 |
+
result = empty_frame.apply(func, engine=engine)
|
| 124 |
+
assert result.empty
|
| 125 |
+
|
| 126 |
+
|
| 127 |
+
def test_apply_float_frame(float_frame, engine):
|
| 128 |
+
no_rows = float_frame[:0]
|
| 129 |
+
result = no_rows.apply(lambda x: x.mean(), engine=engine)
|
| 130 |
+
expected = Series(np.nan, index=float_frame.columns)
|
| 131 |
+
tm.assert_series_equal(result, expected)
|
| 132 |
+
|
| 133 |
+
no_cols = float_frame.loc[:, []]
|
| 134 |
+
result = no_cols.apply(lambda x: x.mean(), axis=1, engine=engine)
|
| 135 |
+
expected = Series(np.nan, index=float_frame.index)
|
| 136 |
+
tm.assert_series_equal(result, expected)
|
| 137 |
+
|
| 138 |
+
|
| 139 |
+
def test_apply_empty_except_index(engine):
|
| 140 |
+
# GH 2476
|
| 141 |
+
expected = DataFrame(index=["a"])
|
| 142 |
+
result = expected.apply(lambda x: x["a"], axis=1, engine=engine)
|
| 143 |
+
tm.assert_frame_equal(result, expected)
|
| 144 |
+
|
| 145 |
+
|
| 146 |
+
def test_apply_with_reduce_empty():
|
| 147 |
+
# reduce with an empty DataFrame
|
| 148 |
+
empty_frame = DataFrame()
|
| 149 |
+
|
| 150 |
+
x = []
|
| 151 |
+
result = empty_frame.apply(x.append, axis=1, result_type="expand")
|
| 152 |
+
tm.assert_frame_equal(result, empty_frame)
|
| 153 |
+
result = empty_frame.apply(x.append, axis=1, result_type="reduce")
|
| 154 |
+
expected = Series([], dtype=np.float64)
|
| 155 |
+
tm.assert_series_equal(result, expected)
|
| 156 |
+
|
| 157 |
+
empty_with_cols = DataFrame(columns=["a", "b", "c"])
|
| 158 |
+
result = empty_with_cols.apply(x.append, axis=1, result_type="expand")
|
| 159 |
+
tm.assert_frame_equal(result, empty_with_cols)
|
| 160 |
+
result = empty_with_cols.apply(x.append, axis=1, result_type="reduce")
|
| 161 |
+
expected = Series([], dtype=np.float64)
|
| 162 |
+
tm.assert_series_equal(result, expected)
|
| 163 |
+
|
| 164 |
+
# Ensure that x.append hasn't been called
|
| 165 |
+
assert x == []
|
| 166 |
+
|
| 167 |
+
|
| 168 |
+
@pytest.mark.parametrize("func", ["sum", "prod", "any", "all"])
|
| 169 |
+
def test_apply_funcs_over_empty(func):
|
| 170 |
+
# GH 28213
|
| 171 |
+
df = DataFrame(columns=["a", "b", "c"])
|
| 172 |
+
|
| 173 |
+
result = df.apply(getattr(np, func))
|
| 174 |
+
expected = getattr(df, func)()
|
| 175 |
+
if func in ("sum", "prod"):
|
| 176 |
+
expected = expected.astype(float)
|
| 177 |
+
tm.assert_series_equal(result, expected)
|
| 178 |
+
|
| 179 |
+
|
| 180 |
+
def test_nunique_empty():
|
| 181 |
+
# GH 28213
|
| 182 |
+
df = DataFrame(columns=["a", "b", "c"])
|
| 183 |
+
|
| 184 |
+
result = df.nunique()
|
| 185 |
+
expected = Series(0, index=df.columns)
|
| 186 |
+
tm.assert_series_equal(result, expected)
|
| 187 |
+
|
| 188 |
+
result = df.T.nunique()
|
| 189 |
+
expected = Series([], dtype=np.float64)
|
| 190 |
+
tm.assert_series_equal(result, expected)
|
| 191 |
+
|
| 192 |
+
|
| 193 |
+
def test_apply_standard_nonunique():
|
| 194 |
+
df = DataFrame([[1, 2, 3], [4, 5, 6], [7, 8, 9]], index=["a", "a", "c"])
|
| 195 |
+
|
| 196 |
+
result = df.apply(lambda s: s[0], axis=1)
|
| 197 |
+
expected = Series([1, 4, 7], ["a", "a", "c"])
|
| 198 |
+
tm.assert_series_equal(result, expected)
|
| 199 |
+
|
| 200 |
+
result = df.T.apply(lambda s: s[0], axis=0)
|
| 201 |
+
tm.assert_series_equal(result, expected)
|
| 202 |
+
|
| 203 |
+
|
| 204 |
+
def test_apply_broadcast_scalars(float_frame):
|
| 205 |
+
# scalars
|
| 206 |
+
result = float_frame.apply(np.mean, result_type="broadcast")
|
| 207 |
+
expected = DataFrame([float_frame.mean()], index=float_frame.index)
|
| 208 |
+
tm.assert_frame_equal(result, expected)
|
| 209 |
+
|
| 210 |
+
|
| 211 |
+
def test_apply_broadcast_scalars_axis1(float_frame):
|
| 212 |
+
result = float_frame.apply(np.mean, axis=1, result_type="broadcast")
|
| 213 |
+
m = float_frame.mean(axis=1)
|
| 214 |
+
expected = DataFrame({c: m for c in float_frame.columns})
|
| 215 |
+
tm.assert_frame_equal(result, expected)
|
| 216 |
+
|
| 217 |
+
|
| 218 |
+
def test_apply_broadcast_lists_columns(float_frame):
|
| 219 |
+
# lists
|
| 220 |
+
result = float_frame.apply(
|
| 221 |
+
lambda x: list(range(len(float_frame.columns))),
|
| 222 |
+
axis=1,
|
| 223 |
+
result_type="broadcast",
|
| 224 |
+
)
|
| 225 |
+
m = list(range(len(float_frame.columns)))
|
| 226 |
+
expected = DataFrame(
|
| 227 |
+
[m] * len(float_frame.index),
|
| 228 |
+
dtype="float64",
|
| 229 |
+
index=float_frame.index,
|
| 230 |
+
columns=float_frame.columns,
|
| 231 |
+
)
|
| 232 |
+
tm.assert_frame_equal(result, expected)
|
| 233 |
+
|
| 234 |
+
|
| 235 |
+
def test_apply_broadcast_lists_index(float_frame):
|
| 236 |
+
result = float_frame.apply(
|
| 237 |
+
lambda x: list(range(len(float_frame.index))), result_type="broadcast"
|
| 238 |
+
)
|
| 239 |
+
m = list(range(len(float_frame.index)))
|
| 240 |
+
expected = DataFrame(
|
| 241 |
+
{c: m for c in float_frame.columns},
|
| 242 |
+
dtype="float64",
|
| 243 |
+
index=float_frame.index,
|
| 244 |
+
)
|
| 245 |
+
tm.assert_frame_equal(result, expected)
|
| 246 |
+
|
| 247 |
+
|
| 248 |
+
def test_apply_broadcast_list_lambda_func(int_frame_const_col):
|
| 249 |
+
# preserve columns
|
| 250 |
+
df = int_frame_const_col
|
| 251 |
+
result = df.apply(lambda x: [1, 2, 3], axis=1, result_type="broadcast")
|
| 252 |
+
tm.assert_frame_equal(result, df)
|
| 253 |
+
|
| 254 |
+
|
| 255 |
+
def test_apply_broadcast_series_lambda_func(int_frame_const_col):
|
| 256 |
+
df = int_frame_const_col
|
| 257 |
+
result = df.apply(
|
| 258 |
+
lambda x: Series([1, 2, 3], index=list("abc")),
|
| 259 |
+
axis=1,
|
| 260 |
+
result_type="broadcast",
|
| 261 |
+
)
|
| 262 |
+
expected = df.copy()
|
| 263 |
+
tm.assert_frame_equal(result, expected)
|
| 264 |
+
|
| 265 |
+
|
| 266 |
+
@pytest.mark.parametrize("axis", [0, 1])
|
| 267 |
+
def test_apply_raw_float_frame(float_frame, axis, engine):
|
| 268 |
+
if engine == "numba":
|
| 269 |
+
pytest.skip("numba can't handle when UDF returns None.")
|
| 270 |
+
|
| 271 |
+
def _assert_raw(x):
|
| 272 |
+
assert isinstance(x, np.ndarray)
|
| 273 |
+
assert x.ndim == 1
|
| 274 |
+
|
| 275 |
+
float_frame.apply(_assert_raw, axis=axis, engine=engine, raw=True)
|
| 276 |
+
|
| 277 |
+
|
| 278 |
+
@pytest.mark.parametrize("axis", [0, 1])
|
| 279 |
+
def test_apply_raw_float_frame_lambda(float_frame, axis, engine):
|
| 280 |
+
result = float_frame.apply(np.mean, axis=axis, engine=engine, raw=True)
|
| 281 |
+
expected = float_frame.apply(lambda x: x.values.mean(), axis=axis)
|
| 282 |
+
tm.assert_series_equal(result, expected)
|
| 283 |
+
|
| 284 |
+
|
| 285 |
+
def test_apply_raw_float_frame_no_reduction(float_frame, engine):
|
| 286 |
+
# no reduction
|
| 287 |
+
result = float_frame.apply(lambda x: x * 2, engine=engine, raw=True)
|
| 288 |
+
expected = float_frame * 2
|
| 289 |
+
tm.assert_frame_equal(result, expected)
|
| 290 |
+
|
| 291 |
+
|
| 292 |
+
@pytest.mark.parametrize("axis", [0, 1])
|
| 293 |
+
def test_apply_raw_mixed_type_frame(axis, engine):
|
| 294 |
+
if engine == "numba":
|
| 295 |
+
pytest.skip("isinstance check doesn't work with numba")
|
| 296 |
+
|
| 297 |
+
def _assert_raw(x):
|
| 298 |
+
assert isinstance(x, np.ndarray)
|
| 299 |
+
assert x.ndim == 1
|
| 300 |
+
|
| 301 |
+
# Mixed dtype (GH-32423)
|
| 302 |
+
df = DataFrame(
|
| 303 |
+
{
|
| 304 |
+
"a": 1.0,
|
| 305 |
+
"b": 2,
|
| 306 |
+
"c": "foo",
|
| 307 |
+
"float32": np.array([1.0] * 10, dtype="float32"),
|
| 308 |
+
"int32": np.array([1] * 10, dtype="int32"),
|
| 309 |
+
},
|
| 310 |
+
index=np.arange(10),
|
| 311 |
+
)
|
| 312 |
+
df.apply(_assert_raw, axis=axis, engine=engine, raw=True)
|
| 313 |
+
|
| 314 |
+
|
| 315 |
+
def test_apply_axis1(float_frame):
|
| 316 |
+
d = float_frame.index[0]
|
| 317 |
+
result = float_frame.apply(np.mean, axis=1)[d]
|
| 318 |
+
expected = np.mean(float_frame.xs(d))
|
| 319 |
+
assert result == expected
|
| 320 |
+
|
| 321 |
+
|
| 322 |
+
def test_apply_mixed_dtype_corner():
|
| 323 |
+
df = DataFrame({"A": ["foo"], "B": [1.0]})
|
| 324 |
+
result = df[:0].apply(np.mean, axis=1)
|
| 325 |
+
# the result here is actually kind of ambiguous, should it be a Series
|
| 326 |
+
# or a DataFrame?
|
| 327 |
+
expected = Series(np.nan, index=pd.Index([], dtype="int64"))
|
| 328 |
+
tm.assert_series_equal(result, expected)
|
| 329 |
+
|
| 330 |
+
|
| 331 |
+
def test_apply_mixed_dtype_corner_indexing():
|
| 332 |
+
df = DataFrame({"A": ["foo"], "B": [1.0]})
|
| 333 |
+
result = df.apply(lambda x: x["A"], axis=1)
|
| 334 |
+
expected = Series(["foo"], index=[0])
|
| 335 |
+
tm.assert_series_equal(result, expected)
|
| 336 |
+
|
| 337 |
+
result = df.apply(lambda x: x["B"], axis=1)
|
| 338 |
+
expected = Series([1.0], index=[0])
|
| 339 |
+
tm.assert_series_equal(result, expected)
|
| 340 |
+
|
| 341 |
+
|
| 342 |
+
@pytest.mark.filterwarnings("ignore::RuntimeWarning")
|
| 343 |
+
@pytest.mark.parametrize("ax", ["index", "columns"])
|
| 344 |
+
@pytest.mark.parametrize(
|
| 345 |
+
"func", [lambda x: x, lambda x: x.mean()], ids=["identity", "mean"]
|
| 346 |
+
)
|
| 347 |
+
@pytest.mark.parametrize("raw", [True, False])
|
| 348 |
+
@pytest.mark.parametrize("axis", [0, 1])
|
| 349 |
+
def test_apply_empty_infer_type(ax, func, raw, axis, engine, request):
|
| 350 |
+
df = DataFrame(**{ax: ["a", "b", "c"]})
|
| 351 |
+
|
| 352 |
+
with np.errstate(all="ignore"):
|
| 353 |
+
test_res = func(np.array([], dtype="f8"))
|
| 354 |
+
is_reduction = not isinstance(test_res, np.ndarray)
|
| 355 |
+
|
| 356 |
+
result = df.apply(func, axis=axis, engine=engine, raw=raw)
|
| 357 |
+
if is_reduction:
|
| 358 |
+
agg_axis = df._get_agg_axis(axis)
|
| 359 |
+
assert isinstance(result, Series)
|
| 360 |
+
assert result.index is agg_axis
|
| 361 |
+
else:
|
| 362 |
+
assert isinstance(result, DataFrame)
|
| 363 |
+
|
| 364 |
+
|
| 365 |
+
def test_apply_empty_infer_type_broadcast():
|
| 366 |
+
no_cols = DataFrame(index=["a", "b", "c"])
|
| 367 |
+
result = no_cols.apply(lambda x: x.mean(), result_type="broadcast")
|
| 368 |
+
assert isinstance(result, DataFrame)
|
| 369 |
+
|
| 370 |
+
|
| 371 |
+
def test_apply_with_args_kwds_add_some(float_frame):
|
| 372 |
+
def add_some(x, howmuch=0):
|
| 373 |
+
return x + howmuch
|
| 374 |
+
|
| 375 |
+
result = float_frame.apply(add_some, howmuch=2)
|
| 376 |
+
expected = float_frame.apply(lambda x: x + 2)
|
| 377 |
+
tm.assert_frame_equal(result, expected)
|
| 378 |
+
|
| 379 |
+
|
| 380 |
+
def test_apply_with_args_kwds_agg_and_add(float_frame):
|
| 381 |
+
def agg_and_add(x, howmuch=0):
|
| 382 |
+
return x.mean() + howmuch
|
| 383 |
+
|
| 384 |
+
result = float_frame.apply(agg_and_add, howmuch=2)
|
| 385 |
+
expected = float_frame.apply(lambda x: x.mean() + 2)
|
| 386 |
+
tm.assert_series_equal(result, expected)
|
| 387 |
+
|
| 388 |
+
|
| 389 |
+
def test_apply_with_args_kwds_subtract_and_divide(float_frame):
|
| 390 |
+
def subtract_and_divide(x, sub, divide=1):
|
| 391 |
+
return (x - sub) / divide
|
| 392 |
+
|
| 393 |
+
result = float_frame.apply(subtract_and_divide, args=(2,), divide=2)
|
| 394 |
+
expected = float_frame.apply(lambda x: (x - 2.0) / 2.0)
|
| 395 |
+
tm.assert_frame_equal(result, expected)
|
| 396 |
+
|
| 397 |
+
|
| 398 |
+
def test_apply_yield_list(float_frame):
|
| 399 |
+
result = float_frame.apply(list)
|
| 400 |
+
tm.assert_frame_equal(result, float_frame)
|
| 401 |
+
|
| 402 |
+
|
| 403 |
+
def test_apply_reduce_Series(float_frame):
|
| 404 |
+
float_frame.iloc[::2, float_frame.columns.get_loc("A")] = np.nan
|
| 405 |
+
expected = float_frame.mean(1)
|
| 406 |
+
result = float_frame.apply(np.mean, axis=1)
|
| 407 |
+
tm.assert_series_equal(result, expected)
|
| 408 |
+
|
| 409 |
+
|
| 410 |
+
def test_apply_reduce_to_dict():
|
| 411 |
+
# GH 25196 37544
|
| 412 |
+
data = DataFrame([[1, 2], [3, 4]], columns=["c0", "c1"], index=["i0", "i1"])
|
| 413 |
+
|
| 414 |
+
result = data.apply(dict, axis=0)
|
| 415 |
+
expected = Series([{"i0": 1, "i1": 3}, {"i0": 2, "i1": 4}], index=data.columns)
|
| 416 |
+
tm.assert_series_equal(result, expected)
|
| 417 |
+
|
| 418 |
+
result = data.apply(dict, axis=1)
|
| 419 |
+
expected = Series([{"c0": 1, "c1": 2}, {"c0": 3, "c1": 4}], index=data.index)
|
| 420 |
+
tm.assert_series_equal(result, expected)
|
| 421 |
+
|
| 422 |
+
|
| 423 |
+
def test_apply_differently_indexed():
|
| 424 |
+
df = DataFrame(np.random.default_rng(2).standard_normal((20, 10)))
|
| 425 |
+
|
| 426 |
+
result = df.apply(Series.describe, axis=0)
|
| 427 |
+
expected = DataFrame({i: v.describe() for i, v in df.items()}, columns=df.columns)
|
| 428 |
+
tm.assert_frame_equal(result, expected)
|
| 429 |
+
|
| 430 |
+
result = df.apply(Series.describe, axis=1)
|
| 431 |
+
expected = DataFrame({i: v.describe() for i, v in df.T.items()}, columns=df.index).T
|
| 432 |
+
tm.assert_frame_equal(result, expected)
|
| 433 |
+
|
| 434 |
+
|
| 435 |
+
def test_apply_bug():
|
| 436 |
+
# GH 6125
|
| 437 |
+
positions = DataFrame(
|
| 438 |
+
[
|
| 439 |
+
[1, "ABC0", 50],
|
| 440 |
+
[1, "YUM0", 20],
|
| 441 |
+
[1, "DEF0", 20],
|
| 442 |
+
[2, "ABC1", 50],
|
| 443 |
+
[2, "YUM1", 20],
|
| 444 |
+
[2, "DEF1", 20],
|
| 445 |
+
],
|
| 446 |
+
columns=["a", "market", "position"],
|
| 447 |
+
)
|
| 448 |
+
|
| 449 |
+
def f(r):
|
| 450 |
+
return r["market"]
|
| 451 |
+
|
| 452 |
+
expected = positions.apply(f, axis=1)
|
| 453 |
+
|
| 454 |
+
positions = DataFrame(
|
| 455 |
+
[
|
| 456 |
+
[datetime(2013, 1, 1), "ABC0", 50],
|
| 457 |
+
[datetime(2013, 1, 2), "YUM0", 20],
|
| 458 |
+
[datetime(2013, 1, 3), "DEF0", 20],
|
| 459 |
+
[datetime(2013, 1, 4), "ABC1", 50],
|
| 460 |
+
[datetime(2013, 1, 5), "YUM1", 20],
|
| 461 |
+
[datetime(2013, 1, 6), "DEF1", 20],
|
| 462 |
+
],
|
| 463 |
+
columns=["a", "market", "position"],
|
| 464 |
+
)
|
| 465 |
+
result = positions.apply(f, axis=1)
|
| 466 |
+
tm.assert_series_equal(result, expected)
|
| 467 |
+
|
| 468 |
+
|
| 469 |
+
def test_apply_convert_objects():
|
| 470 |
+
expected = DataFrame(
|
| 471 |
+
{
|
| 472 |
+
"A": [
|
| 473 |
+
"foo",
|
| 474 |
+
"foo",
|
| 475 |
+
"foo",
|
| 476 |
+
"foo",
|
| 477 |
+
"bar",
|
| 478 |
+
"bar",
|
| 479 |
+
"bar",
|
| 480 |
+
"bar",
|
| 481 |
+
"foo",
|
| 482 |
+
"foo",
|
| 483 |
+
"foo",
|
| 484 |
+
],
|
| 485 |
+
"B": [
|
| 486 |
+
"one",
|
| 487 |
+
"one",
|
| 488 |
+
"one",
|
| 489 |
+
"two",
|
| 490 |
+
"one",
|
| 491 |
+
"one",
|
| 492 |
+
"one",
|
| 493 |
+
"two",
|
| 494 |
+
"two",
|
| 495 |
+
"two",
|
| 496 |
+
"one",
|
| 497 |
+
],
|
| 498 |
+
"C": [
|
| 499 |
+
"dull",
|
| 500 |
+
"dull",
|
| 501 |
+
"shiny",
|
| 502 |
+
"dull",
|
| 503 |
+
"dull",
|
| 504 |
+
"shiny",
|
| 505 |
+
"shiny",
|
| 506 |
+
"dull",
|
| 507 |
+
"shiny",
|
| 508 |
+
"shiny",
|
| 509 |
+
"shiny",
|
| 510 |
+
],
|
| 511 |
+
"D": np.random.default_rng(2).standard_normal(11),
|
| 512 |
+
"E": np.random.default_rng(2).standard_normal(11),
|
| 513 |
+
"F": np.random.default_rng(2).standard_normal(11),
|
| 514 |
+
}
|
| 515 |
+
)
|
| 516 |
+
|
| 517 |
+
result = expected.apply(lambda x: x, axis=1)
|
| 518 |
+
tm.assert_frame_equal(result, expected)
|
| 519 |
+
|
| 520 |
+
|
| 521 |
+
def test_apply_attach_name(float_frame):
|
| 522 |
+
result = float_frame.apply(lambda x: x.name)
|
| 523 |
+
expected = Series(float_frame.columns, index=float_frame.columns)
|
| 524 |
+
tm.assert_series_equal(result, expected)
|
| 525 |
+
|
| 526 |
+
|
| 527 |
+
def test_apply_attach_name_axis1(float_frame):
|
| 528 |
+
result = float_frame.apply(lambda x: x.name, axis=1)
|
| 529 |
+
expected = Series(float_frame.index, index=float_frame.index)
|
| 530 |
+
tm.assert_series_equal(result, expected)
|
| 531 |
+
|
| 532 |
+
|
| 533 |
+
def test_apply_attach_name_non_reduction(float_frame):
|
| 534 |
+
# non-reductions
|
| 535 |
+
result = float_frame.apply(lambda x: np.repeat(x.name, len(x)))
|
| 536 |
+
expected = DataFrame(
|
| 537 |
+
np.tile(float_frame.columns, (len(float_frame.index), 1)),
|
| 538 |
+
index=float_frame.index,
|
| 539 |
+
columns=float_frame.columns,
|
| 540 |
+
)
|
| 541 |
+
tm.assert_frame_equal(result, expected)
|
| 542 |
+
|
| 543 |
+
|
| 544 |
+
def test_apply_attach_name_non_reduction_axis1(float_frame):
|
| 545 |
+
result = float_frame.apply(lambda x: np.repeat(x.name, len(x)), axis=1)
|
| 546 |
+
expected = Series(
|
| 547 |
+
np.repeat(t[0], len(float_frame.columns)) for t in float_frame.itertuples()
|
| 548 |
+
)
|
| 549 |
+
expected.index = float_frame.index
|
| 550 |
+
tm.assert_series_equal(result, expected)
|
| 551 |
+
|
| 552 |
+
|
| 553 |
+
def test_apply_multi_index():
|
| 554 |
+
index = MultiIndex.from_arrays([["a", "a", "b"], ["c", "d", "d"]])
|
| 555 |
+
s = DataFrame([[1, 2], [3, 4], [5, 6]], index=index, columns=["col1", "col2"])
|
| 556 |
+
result = s.apply(lambda x: Series({"min": min(x), "max": max(x)}), 1)
|
| 557 |
+
expected = DataFrame([[1, 2], [3, 4], [5, 6]], index=index, columns=["min", "max"])
|
| 558 |
+
tm.assert_frame_equal(result, expected, check_like=True)
|
| 559 |
+
|
| 560 |
+
|
| 561 |
+
@pytest.mark.parametrize(
|
| 562 |
+
"df, dicts",
|
| 563 |
+
[
|
| 564 |
+
[
|
| 565 |
+
DataFrame([["foo", "bar"], ["spam", "eggs"]]),
|
| 566 |
+
Series([{0: "foo", 1: "spam"}, {0: "bar", 1: "eggs"}]),
|
| 567 |
+
],
|
| 568 |
+
[DataFrame([[0, 1], [2, 3]]), Series([{0: 0, 1: 2}, {0: 1, 1: 3}])],
|
| 569 |
+
],
|
| 570 |
+
)
|
| 571 |
+
def test_apply_dict(df, dicts):
|
| 572 |
+
# GH 8735
|
| 573 |
+
fn = lambda x: x.to_dict()
|
| 574 |
+
reduce_true = df.apply(fn, result_type="reduce")
|
| 575 |
+
reduce_false = df.apply(fn, result_type="expand")
|
| 576 |
+
reduce_none = df.apply(fn)
|
| 577 |
+
|
| 578 |
+
tm.assert_series_equal(reduce_true, dicts)
|
| 579 |
+
tm.assert_frame_equal(reduce_false, df)
|
| 580 |
+
tm.assert_series_equal(reduce_none, dicts)
|
| 581 |
+
|
| 582 |
+
|
| 583 |
+
def test_apply_non_numpy_dtype():
|
| 584 |
+
# GH 12244
|
| 585 |
+
df = DataFrame({"dt": date_range("2015-01-01", periods=3, tz="Europe/Brussels")})
|
| 586 |
+
result = df.apply(lambda x: x)
|
| 587 |
+
tm.assert_frame_equal(result, df)
|
| 588 |
+
|
| 589 |
+
result = df.apply(lambda x: x + pd.Timedelta("1day"))
|
| 590 |
+
expected = DataFrame(
|
| 591 |
+
{"dt": date_range("2015-01-02", periods=3, tz="Europe/Brussels")}
|
| 592 |
+
)
|
| 593 |
+
tm.assert_frame_equal(result, expected)
|
| 594 |
+
|
| 595 |
+
|
| 596 |
+
def test_apply_non_numpy_dtype_category():
|
| 597 |
+
df = DataFrame({"dt": ["a", "b", "c", "a"]}, dtype="category")
|
| 598 |
+
result = df.apply(lambda x: x)
|
| 599 |
+
tm.assert_frame_equal(result, df)
|
| 600 |
+
|
| 601 |
+
|
| 602 |
+
def test_apply_dup_names_multi_agg():
|
| 603 |
+
# GH 21063
|
| 604 |
+
df = DataFrame([[0, 1], [2, 3]], columns=["a", "a"])
|
| 605 |
+
expected = DataFrame([[0, 1]], columns=["a", "a"], index=["min"])
|
| 606 |
+
result = df.agg(["min"])
|
| 607 |
+
|
| 608 |
+
tm.assert_frame_equal(result, expected)
|
| 609 |
+
|
| 610 |
+
|
| 611 |
+
@pytest.mark.parametrize("op", ["apply", "agg"])
|
| 612 |
+
def test_apply_nested_result_axis_1(op):
|
| 613 |
+
# GH 13820
|
| 614 |
+
def apply_list(row):
|
| 615 |
+
return [2 * row["A"], 2 * row["C"], 2 * row["B"]]
|
| 616 |
+
|
| 617 |
+
df = DataFrame(np.zeros((4, 4)), columns=list("ABCD"))
|
| 618 |
+
result = getattr(df, op)(apply_list, axis=1)
|
| 619 |
+
expected = Series(
|
| 620 |
+
[[0.0, 0.0, 0.0], [0.0, 0.0, 0.0], [0.0, 0.0, 0.0], [0.0, 0.0, 0.0]]
|
| 621 |
+
)
|
| 622 |
+
tm.assert_series_equal(result, expected)
|
| 623 |
+
|
| 624 |
+
|
| 625 |
+
def test_apply_noreduction_tzaware_object():
|
| 626 |
+
# https://github.com/pandas-dev/pandas/issues/31505
|
| 627 |
+
expected = DataFrame(
|
| 628 |
+
{"foo": [Timestamp("2020", tz="UTC")]}, dtype="datetime64[ns, UTC]"
|
| 629 |
+
)
|
| 630 |
+
result = expected.apply(lambda x: x)
|
| 631 |
+
tm.assert_frame_equal(result, expected)
|
| 632 |
+
result = expected.apply(lambda x: x.copy())
|
| 633 |
+
tm.assert_frame_equal(result, expected)
|
| 634 |
+
|
| 635 |
+
|
| 636 |
+
def test_apply_function_runs_once():
|
| 637 |
+
# https://github.com/pandas-dev/pandas/issues/30815
|
| 638 |
+
|
| 639 |
+
df = DataFrame({"a": [1, 2, 3]})
|
| 640 |
+
names = [] # Save row names function is applied to
|
| 641 |
+
|
| 642 |
+
def reducing_function(row):
|
| 643 |
+
names.append(row.name)
|
| 644 |
+
|
| 645 |
+
def non_reducing_function(row):
|
| 646 |
+
names.append(row.name)
|
| 647 |
+
return row
|
| 648 |
+
|
| 649 |
+
for func in [reducing_function, non_reducing_function]:
|
| 650 |
+
del names[:]
|
| 651 |
+
|
| 652 |
+
df.apply(func, axis=1)
|
| 653 |
+
assert names == list(df.index)
|
| 654 |
+
|
| 655 |
+
|
| 656 |
+
def test_apply_raw_function_runs_once(engine):
|
| 657 |
+
# https://github.com/pandas-dev/pandas/issues/34506
|
| 658 |
+
if engine == "numba":
|
| 659 |
+
pytest.skip("appending to list outside of numba func is not supported")
|
| 660 |
+
|
| 661 |
+
df = DataFrame({"a": [1, 2, 3]})
|
| 662 |
+
values = [] # Save row values function is applied to
|
| 663 |
+
|
| 664 |
+
def reducing_function(row):
|
| 665 |
+
values.extend(row)
|
| 666 |
+
|
| 667 |
+
def non_reducing_function(row):
|
| 668 |
+
values.extend(row)
|
| 669 |
+
return row
|
| 670 |
+
|
| 671 |
+
for func in [reducing_function, non_reducing_function]:
|
| 672 |
+
del values[:]
|
| 673 |
+
|
| 674 |
+
df.apply(func, engine=engine, raw=True, axis=1)
|
| 675 |
+
assert values == list(df.a.to_list())
|
| 676 |
+
|
| 677 |
+
|
| 678 |
+
def test_apply_with_byte_string():
|
| 679 |
+
# GH 34529
|
| 680 |
+
df = DataFrame(np.array([b"abcd", b"efgh"]), columns=["col"])
|
| 681 |
+
expected = DataFrame(np.array([b"abcd", b"efgh"]), columns=["col"], dtype=object)
|
| 682 |
+
# After we make the apply we expect a dataframe just
|
| 683 |
+
# like the original but with the object datatype
|
| 684 |
+
result = df.apply(lambda x: x.astype("object"))
|
| 685 |
+
tm.assert_frame_equal(result, expected)
|
| 686 |
+
|
| 687 |
+
|
| 688 |
+
@pytest.mark.parametrize("val", ["asd", 12, None, np.nan])
|
| 689 |
+
def test_apply_category_equalness(val):
|
| 690 |
+
# Check if categorical comparisons on apply, GH 21239
|
| 691 |
+
df_values = ["asd", None, 12, "asd", "cde", np.nan]
|
| 692 |
+
df = DataFrame({"a": df_values}, dtype="category")
|
| 693 |
+
|
| 694 |
+
result = df.a.apply(lambda x: x == val)
|
| 695 |
+
expected = Series(
|
| 696 |
+
[np.nan if pd.isnull(x) else x == val for x in df_values], name="a"
|
| 697 |
+
)
|
| 698 |
+
tm.assert_series_equal(result, expected)
|
| 699 |
+
|
| 700 |
+
|
| 701 |
+
# the user has supplied an opaque UDF where
|
| 702 |
+
# they are transforming the input that requires
|
| 703 |
+
# us to infer the output
|
| 704 |
+
|
| 705 |
+
|
| 706 |
+
def test_infer_row_shape():
|
| 707 |
+
# GH 17437
|
| 708 |
+
# if row shape is changing, infer it
|
| 709 |
+
df = DataFrame(np.random.default_rng(2).random((10, 2)))
|
| 710 |
+
result = df.apply(np.fft.fft, axis=0).shape
|
| 711 |
+
assert result == (10, 2)
|
| 712 |
+
|
| 713 |
+
result = df.apply(np.fft.rfft, axis=0).shape
|
| 714 |
+
assert result == (6, 2)
|
| 715 |
+
|
| 716 |
+
|
| 717 |
+
@pytest.mark.parametrize(
|
| 718 |
+
"ops, by_row, expected",
|
| 719 |
+
[
|
| 720 |
+
({"a": lambda x: x + 1}, "compat", DataFrame({"a": [2, 3]})),
|
| 721 |
+
({"a": lambda x: x + 1}, False, DataFrame({"a": [2, 3]})),
|
| 722 |
+
({"a": lambda x: x.sum()}, "compat", Series({"a": 3})),
|
| 723 |
+
({"a": lambda x: x.sum()}, False, Series({"a": 3})),
|
| 724 |
+
(
|
| 725 |
+
{"a": ["sum", np.sum, lambda x: x.sum()]},
|
| 726 |
+
"compat",
|
| 727 |
+
DataFrame({"a": [3, 3, 3]}, index=["sum", "sum", "<lambda>"]),
|
| 728 |
+
),
|
| 729 |
+
(
|
| 730 |
+
{"a": ["sum", np.sum, lambda x: x.sum()]},
|
| 731 |
+
False,
|
| 732 |
+
DataFrame({"a": [3, 3, 3]}, index=["sum", "sum", "<lambda>"]),
|
| 733 |
+
),
|
| 734 |
+
({"a": lambda x: 1}, "compat", DataFrame({"a": [1, 1]})),
|
| 735 |
+
({"a": lambda x: 1}, False, Series({"a": 1})),
|
| 736 |
+
],
|
| 737 |
+
)
|
| 738 |
+
def test_dictlike_lambda(ops, by_row, expected):
|
| 739 |
+
# GH53601
|
| 740 |
+
df = DataFrame({"a": [1, 2]})
|
| 741 |
+
result = df.apply(ops, by_row=by_row)
|
| 742 |
+
tm.assert_equal(result, expected)
|
| 743 |
+
|
| 744 |
+
|
| 745 |
+
@pytest.mark.parametrize(
|
| 746 |
+
"ops",
|
| 747 |
+
[
|
| 748 |
+
{"a": lambda x: x + 1},
|
| 749 |
+
{"a": lambda x: x.sum()},
|
| 750 |
+
{"a": ["sum", np.sum, lambda x: x.sum()]},
|
| 751 |
+
{"a": lambda x: 1},
|
| 752 |
+
],
|
| 753 |
+
)
|
| 754 |
+
def test_dictlike_lambda_raises(ops):
|
| 755 |
+
# GH53601
|
| 756 |
+
df = DataFrame({"a": [1, 2]})
|
| 757 |
+
with pytest.raises(ValueError, match="by_row=True not allowed"):
|
| 758 |
+
df.apply(ops, by_row=True)
|
| 759 |
+
|
| 760 |
+
|
| 761 |
+
def test_with_dictlike_columns():
|
| 762 |
+
# GH 17602
|
| 763 |
+
df = DataFrame([[1, 2], [1, 2]], columns=["a", "b"])
|
| 764 |
+
result = df.apply(lambda x: {"s": x["a"] + x["b"]}, axis=1)
|
| 765 |
+
expected = Series([{"s": 3} for t in df.itertuples()])
|
| 766 |
+
tm.assert_series_equal(result, expected)
|
| 767 |
+
|
| 768 |
+
df["tm"] = [
|
| 769 |
+
Timestamp("2017-05-01 00:00:00"),
|
| 770 |
+
Timestamp("2017-05-02 00:00:00"),
|
| 771 |
+
]
|
| 772 |
+
result = df.apply(lambda x: {"s": x["a"] + x["b"]}, axis=1)
|
| 773 |
+
tm.assert_series_equal(result, expected)
|
| 774 |
+
|
| 775 |
+
# compose a series
|
| 776 |
+
result = (df["a"] + df["b"]).apply(lambda x: {"s": x})
|
| 777 |
+
expected = Series([{"s": 3}, {"s": 3}])
|
| 778 |
+
tm.assert_series_equal(result, expected)
|
| 779 |
+
|
| 780 |
+
|
| 781 |
+
def test_with_dictlike_columns_with_datetime():
|
| 782 |
+
# GH 18775
|
| 783 |
+
df = DataFrame()
|
| 784 |
+
df["author"] = ["X", "Y", "Z"]
|
| 785 |
+
df["publisher"] = ["BBC", "NBC", "N24"]
|
| 786 |
+
df["date"] = pd.to_datetime(
|
| 787 |
+
["17-10-2010 07:15:30", "13-05-2011 08:20:35", "15-01-2013 09:09:09"],
|
| 788 |
+
dayfirst=True,
|
| 789 |
+
)
|
| 790 |
+
result = df.apply(lambda x: {}, axis=1)
|
| 791 |
+
expected = Series([{}, {}, {}])
|
| 792 |
+
tm.assert_series_equal(result, expected)
|
| 793 |
+
|
| 794 |
+
|
| 795 |
+
def test_with_dictlike_columns_with_infer():
|
| 796 |
+
# GH 17602
|
| 797 |
+
df = DataFrame([[1, 2], [1, 2]], columns=["a", "b"])
|
| 798 |
+
result = df.apply(lambda x: {"s": x["a"] + x["b"]}, axis=1, result_type="expand")
|
| 799 |
+
expected = DataFrame({"s": [3, 3]})
|
| 800 |
+
tm.assert_frame_equal(result, expected)
|
| 801 |
+
|
| 802 |
+
df["tm"] = [
|
| 803 |
+
Timestamp("2017-05-01 00:00:00"),
|
| 804 |
+
Timestamp("2017-05-02 00:00:00"),
|
| 805 |
+
]
|
| 806 |
+
result = df.apply(lambda x: {"s": x["a"] + x["b"]}, axis=1, result_type="expand")
|
| 807 |
+
tm.assert_frame_equal(result, expected)
|
| 808 |
+
|
| 809 |
+
|
| 810 |
+
@pytest.mark.parametrize(
|
| 811 |
+
"ops, by_row, expected",
|
| 812 |
+
[
|
| 813 |
+
([lambda x: x + 1], "compat", DataFrame({("a", "<lambda>"): [2, 3]})),
|
| 814 |
+
([lambda x: x + 1], False, DataFrame({("a", "<lambda>"): [2, 3]})),
|
| 815 |
+
([lambda x: x.sum()], "compat", DataFrame({"a": [3]}, index=["<lambda>"])),
|
| 816 |
+
([lambda x: x.sum()], False, DataFrame({"a": [3]}, index=["<lambda>"])),
|
| 817 |
+
(
|
| 818 |
+
["sum", np.sum, lambda x: x.sum()],
|
| 819 |
+
"compat",
|
| 820 |
+
DataFrame({"a": [3, 3, 3]}, index=["sum", "sum", "<lambda>"]),
|
| 821 |
+
),
|
| 822 |
+
(
|
| 823 |
+
["sum", np.sum, lambda x: x.sum()],
|
| 824 |
+
False,
|
| 825 |
+
DataFrame({"a": [3, 3, 3]}, index=["sum", "sum", "<lambda>"]),
|
| 826 |
+
),
|
| 827 |
+
(
|
| 828 |
+
[lambda x: x + 1, lambda x: 3],
|
| 829 |
+
"compat",
|
| 830 |
+
DataFrame([[2, 3], [3, 3]], columns=[["a", "a"], ["<lambda>", "<lambda>"]]),
|
| 831 |
+
),
|
| 832 |
+
(
|
| 833 |
+
[lambda x: 2, lambda x: 3],
|
| 834 |
+
False,
|
| 835 |
+
DataFrame({"a": [2, 3]}, ["<lambda>", "<lambda>"]),
|
| 836 |
+
),
|
| 837 |
+
],
|
| 838 |
+
)
|
| 839 |
+
def test_listlike_lambda(ops, by_row, expected):
|
| 840 |
+
# GH53601
|
| 841 |
+
df = DataFrame({"a": [1, 2]})
|
| 842 |
+
result = df.apply(ops, by_row=by_row)
|
| 843 |
+
tm.assert_equal(result, expected)
|
| 844 |
+
|
| 845 |
+
|
| 846 |
+
@pytest.mark.parametrize(
|
| 847 |
+
"ops",
|
| 848 |
+
[
|
| 849 |
+
[lambda x: x + 1],
|
| 850 |
+
[lambda x: x.sum()],
|
| 851 |
+
["sum", np.sum, lambda x: x.sum()],
|
| 852 |
+
[lambda x: x + 1, lambda x: 3],
|
| 853 |
+
],
|
| 854 |
+
)
|
| 855 |
+
def test_listlike_lambda_raises(ops):
|
| 856 |
+
# GH53601
|
| 857 |
+
df = DataFrame({"a": [1, 2]})
|
| 858 |
+
with pytest.raises(ValueError, match="by_row=True not allowed"):
|
| 859 |
+
df.apply(ops, by_row=True)
|
| 860 |
+
|
| 861 |
+
|
| 862 |
+
def test_with_listlike_columns():
|
| 863 |
+
# GH 17348
|
| 864 |
+
df = DataFrame(
|
| 865 |
+
{
|
| 866 |
+
"a": Series(np.random.default_rng(2).standard_normal(4)),
|
| 867 |
+
"b": ["a", "list", "of", "words"],
|
| 868 |
+
"ts": date_range("2016-10-01", periods=4, freq="h"),
|
| 869 |
+
}
|
| 870 |
+
)
|
| 871 |
+
|
| 872 |
+
result = df[["a", "b"]].apply(tuple, axis=1)
|
| 873 |
+
expected = Series([t[1:] for t in df[["a", "b"]].itertuples()])
|
| 874 |
+
tm.assert_series_equal(result, expected)
|
| 875 |
+
|
| 876 |
+
result = df[["a", "ts"]].apply(tuple, axis=1)
|
| 877 |
+
expected = Series([t[1:] for t in df[["a", "ts"]].itertuples()])
|
| 878 |
+
tm.assert_series_equal(result, expected)
|
| 879 |
+
|
| 880 |
+
|
| 881 |
+
def test_with_listlike_columns_returning_list():
|
| 882 |
+
# GH 18919
|
| 883 |
+
df = DataFrame({"x": Series([["a", "b"], ["q"]]), "y": Series([["z"], ["q", "t"]])})
|
| 884 |
+
df.index = MultiIndex.from_tuples([("i0", "j0"), ("i1", "j1")])
|
| 885 |
+
|
| 886 |
+
result = df.apply(lambda row: [el for el in row["x"] if el in row["y"]], axis=1)
|
| 887 |
+
expected = Series([[], ["q"]], index=df.index)
|
| 888 |
+
tm.assert_series_equal(result, expected)
|
| 889 |
+
|
| 890 |
+
|
| 891 |
+
def test_infer_output_shape_columns():
|
| 892 |
+
# GH 18573
|
| 893 |
+
|
| 894 |
+
df = DataFrame(
|
| 895 |
+
{
|
| 896 |
+
"number": [1.0, 2.0],
|
| 897 |
+
"string": ["foo", "bar"],
|
| 898 |
+
"datetime": [
|
| 899 |
+
Timestamp("2017-11-29 03:30:00"),
|
| 900 |
+
Timestamp("2017-11-29 03:45:00"),
|
| 901 |
+
],
|
| 902 |
+
}
|
| 903 |
+
)
|
| 904 |
+
result = df.apply(lambda row: (row.number, row.string), axis=1)
|
| 905 |
+
expected = Series([(t.number, t.string) for t in df.itertuples()])
|
| 906 |
+
tm.assert_series_equal(result, expected)
|
| 907 |
+
|
| 908 |
+
|
| 909 |
+
def test_infer_output_shape_listlike_columns():
|
| 910 |
+
# GH 16353
|
| 911 |
+
|
| 912 |
+
df = DataFrame(
|
| 913 |
+
np.random.default_rng(2).standard_normal((6, 3)), columns=["A", "B", "C"]
|
| 914 |
+
)
|
| 915 |
+
|
| 916 |
+
result = df.apply(lambda x: [1, 2, 3], axis=1)
|
| 917 |
+
expected = Series([[1, 2, 3] for t in df.itertuples()])
|
| 918 |
+
tm.assert_series_equal(result, expected)
|
| 919 |
+
|
| 920 |
+
result = df.apply(lambda x: [1, 2], axis=1)
|
| 921 |
+
expected = Series([[1, 2] for t in df.itertuples()])
|
| 922 |
+
tm.assert_series_equal(result, expected)
|
| 923 |
+
|
| 924 |
+
|
| 925 |
+
@pytest.mark.parametrize("val", [1, 2])
|
| 926 |
+
def test_infer_output_shape_listlike_columns_np_func(val):
|
| 927 |
+
# GH 17970
|
| 928 |
+
df = DataFrame({"a": [1, 2, 3]}, index=list("abc"))
|
| 929 |
+
|
| 930 |
+
result = df.apply(lambda row: np.ones(val), axis=1)
|
| 931 |
+
expected = Series([np.ones(val) for t in df.itertuples()], index=df.index)
|
| 932 |
+
tm.assert_series_equal(result, expected)
|
| 933 |
+
|
| 934 |
+
|
| 935 |
+
def test_infer_output_shape_listlike_columns_with_timestamp():
|
| 936 |
+
# GH 17892
|
| 937 |
+
df = DataFrame(
|
| 938 |
+
{
|
| 939 |
+
"a": [
|
| 940 |
+
Timestamp("2010-02-01"),
|
| 941 |
+
Timestamp("2010-02-04"),
|
| 942 |
+
Timestamp("2010-02-05"),
|
| 943 |
+
Timestamp("2010-02-06"),
|
| 944 |
+
],
|
| 945 |
+
"b": [9, 5, 4, 3],
|
| 946 |
+
"c": [5, 3, 4, 2],
|
| 947 |
+
"d": [1, 2, 3, 4],
|
| 948 |
+
}
|
| 949 |
+
)
|
| 950 |
+
|
| 951 |
+
def fun(x):
|
| 952 |
+
return (1, 2)
|
| 953 |
+
|
| 954 |
+
result = df.apply(fun, axis=1)
|
| 955 |
+
expected = Series([(1, 2) for t in df.itertuples()])
|
| 956 |
+
tm.assert_series_equal(result, expected)
|
| 957 |
+
|
| 958 |
+
|
| 959 |
+
@pytest.mark.parametrize("lst", [[1, 2, 3], [1, 2]])
|
| 960 |
+
def test_consistent_coerce_for_shapes(lst):
|
| 961 |
+
# we want column names to NOT be propagated
|
| 962 |
+
# just because the shape matches the input shape
|
| 963 |
+
df = DataFrame(
|
| 964 |
+
np.random.default_rng(2).standard_normal((4, 3)), columns=["A", "B", "C"]
|
| 965 |
+
)
|
| 966 |
+
|
| 967 |
+
result = df.apply(lambda x: lst, axis=1)
|
| 968 |
+
expected = Series([lst for t in df.itertuples()])
|
| 969 |
+
tm.assert_series_equal(result, expected)
|
| 970 |
+
|
| 971 |
+
|
| 972 |
+
def test_consistent_names(int_frame_const_col):
|
| 973 |
+
# if a Series is returned, we should use the resulting index names
|
| 974 |
+
df = int_frame_const_col
|
| 975 |
+
|
| 976 |
+
result = df.apply(
|
| 977 |
+
lambda x: Series([1, 2, 3], index=["test", "other", "cols"]), axis=1
|
| 978 |
+
)
|
| 979 |
+
expected = int_frame_const_col.rename(
|
| 980 |
+
columns={"A": "test", "B": "other", "C": "cols"}
|
| 981 |
+
)
|
| 982 |
+
tm.assert_frame_equal(result, expected)
|
| 983 |
+
|
| 984 |
+
result = df.apply(lambda x: Series([1, 2], index=["test", "other"]), axis=1)
|
| 985 |
+
expected = expected[["test", "other"]]
|
| 986 |
+
tm.assert_frame_equal(result, expected)
|
| 987 |
+
|
| 988 |
+
|
| 989 |
+
def test_result_type(int_frame_const_col):
|
| 990 |
+
# result_type should be consistent no matter which
|
| 991 |
+
# path we take in the code
|
| 992 |
+
df = int_frame_const_col
|
| 993 |
+
|
| 994 |
+
result = df.apply(lambda x: [1, 2, 3], axis=1, result_type="expand")
|
| 995 |
+
expected = df.copy()
|
| 996 |
+
expected.columns = [0, 1, 2]
|
| 997 |
+
tm.assert_frame_equal(result, expected)
|
| 998 |
+
|
| 999 |
+
|
| 1000 |
+
def test_result_type_shorter_list(int_frame_const_col):
|
| 1001 |
+
# result_type should be consistent no matter which
|
| 1002 |
+
# path we take in the code
|
| 1003 |
+
df = int_frame_const_col
|
| 1004 |
+
result = df.apply(lambda x: [1, 2], axis=1, result_type="expand")
|
| 1005 |
+
expected = df[["A", "B"]].copy()
|
| 1006 |
+
expected.columns = [0, 1]
|
| 1007 |
+
tm.assert_frame_equal(result, expected)
|
| 1008 |
+
|
| 1009 |
+
|
| 1010 |
+
def test_result_type_broadcast(int_frame_const_col, request, engine):
|
| 1011 |
+
# result_type should be consistent no matter which
|
| 1012 |
+
# path we take in the code
|
| 1013 |
+
if engine == "numba":
|
| 1014 |
+
mark = pytest.mark.xfail(reason="numba engine doesn't support list return")
|
| 1015 |
+
request.node.add_marker(mark)
|
| 1016 |
+
df = int_frame_const_col
|
| 1017 |
+
# broadcast result
|
| 1018 |
+
result = df.apply(
|
| 1019 |
+
lambda x: [1, 2, 3], axis=1, result_type="broadcast", engine=engine
|
| 1020 |
+
)
|
| 1021 |
+
expected = df.copy()
|
| 1022 |
+
tm.assert_frame_equal(result, expected)
|
| 1023 |
+
|
| 1024 |
+
|
| 1025 |
+
def test_result_type_broadcast_series_func(int_frame_const_col, engine, request):
|
| 1026 |
+
# result_type should be consistent no matter which
|
| 1027 |
+
# path we take in the code
|
| 1028 |
+
if engine == "numba":
|
| 1029 |
+
mark = pytest.mark.xfail(
|
| 1030 |
+
reason="numba Series constructor only support ndarrays not list data"
|
| 1031 |
+
)
|
| 1032 |
+
request.node.add_marker(mark)
|
| 1033 |
+
df = int_frame_const_col
|
| 1034 |
+
columns = ["other", "col", "names"]
|
| 1035 |
+
result = df.apply(
|
| 1036 |
+
lambda x: Series([1, 2, 3], index=columns),
|
| 1037 |
+
axis=1,
|
| 1038 |
+
result_type="broadcast",
|
| 1039 |
+
engine=engine,
|
| 1040 |
+
)
|
| 1041 |
+
expected = df.copy()
|
| 1042 |
+
tm.assert_frame_equal(result, expected)
|
| 1043 |
+
|
| 1044 |
+
|
| 1045 |
+
def test_result_type_series_result(int_frame_const_col, engine, request):
|
| 1046 |
+
# result_type should be consistent no matter which
|
| 1047 |
+
# path we take in the code
|
| 1048 |
+
if engine == "numba":
|
| 1049 |
+
mark = pytest.mark.xfail(
|
| 1050 |
+
reason="numba Series constructor only support ndarrays not list data"
|
| 1051 |
+
)
|
| 1052 |
+
request.node.add_marker(mark)
|
| 1053 |
+
df = int_frame_const_col
|
| 1054 |
+
# series result
|
| 1055 |
+
result = df.apply(lambda x: Series([1, 2, 3], index=x.index), axis=1, engine=engine)
|
| 1056 |
+
expected = df.copy()
|
| 1057 |
+
tm.assert_frame_equal(result, expected)
|
| 1058 |
+
|
| 1059 |
+
|
| 1060 |
+
def test_result_type_series_result_other_index(int_frame_const_col, engine, request):
|
| 1061 |
+
# result_type should be consistent no matter which
|
| 1062 |
+
# path we take in the code
|
| 1063 |
+
|
| 1064 |
+
if engine == "numba":
|
| 1065 |
+
mark = pytest.mark.xfail(
|
| 1066 |
+
reason="no support in numba Series constructor for list of columns"
|
| 1067 |
+
)
|
| 1068 |
+
request.node.add_marker(mark)
|
| 1069 |
+
df = int_frame_const_col
|
| 1070 |
+
# series result with other index
|
| 1071 |
+
columns = ["other", "col", "names"]
|
| 1072 |
+
result = df.apply(lambda x: Series([1, 2, 3], index=columns), axis=1, engine=engine)
|
| 1073 |
+
expected = df.copy()
|
| 1074 |
+
expected.columns = columns
|
| 1075 |
+
tm.assert_frame_equal(result, expected)
|
| 1076 |
+
|
| 1077 |
+
|
| 1078 |
+
@pytest.mark.parametrize(
|
| 1079 |
+
"box",
|
| 1080 |
+
[lambda x: list(x), lambda x: tuple(x), lambda x: np.array(x, dtype="int64")],
|
| 1081 |
+
ids=["list", "tuple", "array"],
|
| 1082 |
+
)
|
| 1083 |
+
def test_consistency_for_boxed(box, int_frame_const_col):
|
| 1084 |
+
# passing an array or list should not affect the output shape
|
| 1085 |
+
df = int_frame_const_col
|
| 1086 |
+
|
| 1087 |
+
result = df.apply(lambda x: box([1, 2]), axis=1)
|
| 1088 |
+
expected = Series([box([1, 2]) for t in df.itertuples()])
|
| 1089 |
+
tm.assert_series_equal(result, expected)
|
| 1090 |
+
|
| 1091 |
+
result = df.apply(lambda x: box([1, 2]), axis=1, result_type="expand")
|
| 1092 |
+
expected = int_frame_const_col[["A", "B"]].rename(columns={"A": 0, "B": 1})
|
| 1093 |
+
tm.assert_frame_equal(result, expected)
|
| 1094 |
+
|
| 1095 |
+
|
| 1096 |
+
def test_agg_transform(axis, float_frame):
|
| 1097 |
+
other_axis = 1 if axis in {0, "index"} else 0
|
| 1098 |
+
|
| 1099 |
+
with np.errstate(all="ignore"):
|
| 1100 |
+
f_abs = np.abs(float_frame)
|
| 1101 |
+
f_sqrt = np.sqrt(float_frame)
|
| 1102 |
+
|
| 1103 |
+
# ufunc
|
| 1104 |
+
expected = f_sqrt.copy()
|
| 1105 |
+
result = float_frame.apply(np.sqrt, axis=axis)
|
| 1106 |
+
tm.assert_frame_equal(result, expected)
|
| 1107 |
+
|
| 1108 |
+
# list-like
|
| 1109 |
+
result = float_frame.apply([np.sqrt], axis=axis)
|
| 1110 |
+
expected = f_sqrt.copy()
|
| 1111 |
+
if axis in {0, "index"}:
|
| 1112 |
+
expected.columns = MultiIndex.from_product([float_frame.columns, ["sqrt"]])
|
| 1113 |
+
else:
|
| 1114 |
+
expected.index = MultiIndex.from_product([float_frame.index, ["sqrt"]])
|
| 1115 |
+
tm.assert_frame_equal(result, expected)
|
| 1116 |
+
|
| 1117 |
+
# multiple items in list
|
| 1118 |
+
# these are in the order as if we are applying both
|
| 1119 |
+
# functions per series and then concatting
|
| 1120 |
+
result = float_frame.apply([np.abs, np.sqrt], axis=axis)
|
| 1121 |
+
expected = zip_frames([f_abs, f_sqrt], axis=other_axis)
|
| 1122 |
+
if axis in {0, "index"}:
|
| 1123 |
+
expected.columns = MultiIndex.from_product(
|
| 1124 |
+
[float_frame.columns, ["absolute", "sqrt"]]
|
| 1125 |
+
)
|
| 1126 |
+
else:
|
| 1127 |
+
expected.index = MultiIndex.from_product(
|
| 1128 |
+
[float_frame.index, ["absolute", "sqrt"]]
|
| 1129 |
+
)
|
| 1130 |
+
tm.assert_frame_equal(result, expected)
|
| 1131 |
+
|
| 1132 |
+
|
| 1133 |
+
def test_demo():
|
| 1134 |
+
# demonstration tests
|
| 1135 |
+
df = DataFrame({"A": range(5), "B": 5})
|
| 1136 |
+
|
| 1137 |
+
result = df.agg(["min", "max"])
|
| 1138 |
+
expected = DataFrame(
|
| 1139 |
+
{"A": [0, 4], "B": [5, 5]}, columns=["A", "B"], index=["min", "max"]
|
| 1140 |
+
)
|
| 1141 |
+
tm.assert_frame_equal(result, expected)
|
| 1142 |
+
|
| 1143 |
+
|
| 1144 |
+
def test_demo_dict_agg():
|
| 1145 |
+
# demonstration tests
|
| 1146 |
+
df = DataFrame({"A": range(5), "B": 5})
|
| 1147 |
+
result = df.agg({"A": ["min", "max"], "B": ["sum", "max"]})
|
| 1148 |
+
expected = DataFrame(
|
| 1149 |
+
{"A": [4.0, 0.0, np.nan], "B": [5.0, np.nan, 25.0]},
|
| 1150 |
+
columns=["A", "B"],
|
| 1151 |
+
index=["max", "min", "sum"],
|
| 1152 |
+
)
|
| 1153 |
+
tm.assert_frame_equal(result.reindex_like(expected), expected)
|
| 1154 |
+
|
| 1155 |
+
|
| 1156 |
+
def test_agg_with_name_as_column_name():
|
| 1157 |
+
# GH 36212 - Column name is "name"
|
| 1158 |
+
data = {"name": ["foo", "bar"]}
|
| 1159 |
+
df = DataFrame(data)
|
| 1160 |
+
|
| 1161 |
+
# result's name should be None
|
| 1162 |
+
result = df.agg({"name": "count"})
|
| 1163 |
+
expected = Series({"name": 2})
|
| 1164 |
+
tm.assert_series_equal(result, expected)
|
| 1165 |
+
|
| 1166 |
+
# Check if name is still preserved when aggregating series instead
|
| 1167 |
+
result = df["name"].agg({"name": "count"})
|
| 1168 |
+
expected = Series({"name": 2}, name="name")
|
| 1169 |
+
tm.assert_series_equal(result, expected)
|
| 1170 |
+
|
| 1171 |
+
|
| 1172 |
+
def test_agg_multiple_mixed():
|
| 1173 |
+
# GH 20909
|
| 1174 |
+
mdf = DataFrame(
|
| 1175 |
+
{
|
| 1176 |
+
"A": [1, 2, 3],
|
| 1177 |
+
"B": [1.0, 2.0, 3.0],
|
| 1178 |
+
"C": ["foo", "bar", "baz"],
|
| 1179 |
+
}
|
| 1180 |
+
)
|
| 1181 |
+
expected = DataFrame(
|
| 1182 |
+
{
|
| 1183 |
+
"A": [1, 6],
|
| 1184 |
+
"B": [1.0, 6.0],
|
| 1185 |
+
"C": ["bar", "foobarbaz"],
|
| 1186 |
+
},
|
| 1187 |
+
index=["min", "sum"],
|
| 1188 |
+
)
|
| 1189 |
+
# sorted index
|
| 1190 |
+
result = mdf.agg(["min", "sum"])
|
| 1191 |
+
tm.assert_frame_equal(result, expected)
|
| 1192 |
+
|
| 1193 |
+
result = mdf[["C", "B", "A"]].agg(["sum", "min"])
|
| 1194 |
+
# GH40420: the result of .agg should have an index that is sorted
|
| 1195 |
+
# according to the arguments provided to agg.
|
| 1196 |
+
expected = expected[["C", "B", "A"]].reindex(["sum", "min"])
|
| 1197 |
+
tm.assert_frame_equal(result, expected)
|
| 1198 |
+
|
| 1199 |
+
|
| 1200 |
+
def test_agg_multiple_mixed_raises():
|
| 1201 |
+
# GH 20909
|
| 1202 |
+
mdf = DataFrame(
|
| 1203 |
+
{
|
| 1204 |
+
"A": [1, 2, 3],
|
| 1205 |
+
"B": [1.0, 2.0, 3.0],
|
| 1206 |
+
"C": ["foo", "bar", "baz"],
|
| 1207 |
+
"D": date_range("20130101", periods=3),
|
| 1208 |
+
}
|
| 1209 |
+
)
|
| 1210 |
+
|
| 1211 |
+
# sorted index
|
| 1212 |
+
msg = "does not support reduction"
|
| 1213 |
+
with pytest.raises(TypeError, match=msg):
|
| 1214 |
+
mdf.agg(["min", "sum"])
|
| 1215 |
+
|
| 1216 |
+
with pytest.raises(TypeError, match=msg):
|
| 1217 |
+
mdf[["D", "C", "B", "A"]].agg(["sum", "min"])
|
| 1218 |
+
|
| 1219 |
+
|
| 1220 |
+
def test_agg_reduce(axis, float_frame):
|
| 1221 |
+
other_axis = 1 if axis in {0, "index"} else 0
|
| 1222 |
+
name1, name2 = float_frame.axes[other_axis].unique()[:2].sort_values()
|
| 1223 |
+
|
| 1224 |
+
# all reducers
|
| 1225 |
+
expected = pd.concat(
|
| 1226 |
+
[
|
| 1227 |
+
float_frame.mean(axis=axis),
|
| 1228 |
+
float_frame.max(axis=axis),
|
| 1229 |
+
float_frame.sum(axis=axis),
|
| 1230 |
+
],
|
| 1231 |
+
axis=1,
|
| 1232 |
+
)
|
| 1233 |
+
expected.columns = ["mean", "max", "sum"]
|
| 1234 |
+
expected = expected.T if axis in {0, "index"} else expected
|
| 1235 |
+
|
| 1236 |
+
result = float_frame.agg(["mean", "max", "sum"], axis=axis)
|
| 1237 |
+
tm.assert_frame_equal(result, expected)
|
| 1238 |
+
|
| 1239 |
+
# dict input with scalars
|
| 1240 |
+
func = {name1: "mean", name2: "sum"}
|
| 1241 |
+
result = float_frame.agg(func, axis=axis)
|
| 1242 |
+
expected = Series(
|
| 1243 |
+
[
|
| 1244 |
+
float_frame.loc(other_axis)[name1].mean(),
|
| 1245 |
+
float_frame.loc(other_axis)[name2].sum(),
|
| 1246 |
+
],
|
| 1247 |
+
index=[name1, name2],
|
| 1248 |
+
)
|
| 1249 |
+
tm.assert_series_equal(result, expected)
|
| 1250 |
+
|
| 1251 |
+
# dict input with lists
|
| 1252 |
+
func = {name1: ["mean"], name2: ["sum"]}
|
| 1253 |
+
result = float_frame.agg(func, axis=axis)
|
| 1254 |
+
expected = DataFrame(
|
| 1255 |
+
{
|
| 1256 |
+
name1: Series([float_frame.loc(other_axis)[name1].mean()], index=["mean"]),
|
| 1257 |
+
name2: Series([float_frame.loc(other_axis)[name2].sum()], index=["sum"]),
|
| 1258 |
+
}
|
| 1259 |
+
)
|
| 1260 |
+
expected = expected.T if axis in {1, "columns"} else expected
|
| 1261 |
+
tm.assert_frame_equal(result, expected)
|
| 1262 |
+
|
| 1263 |
+
# dict input with lists with multiple
|
| 1264 |
+
func = {name1: ["mean", "sum"], name2: ["sum", "max"]}
|
| 1265 |
+
result = float_frame.agg(func, axis=axis)
|
| 1266 |
+
expected = pd.concat(
|
| 1267 |
+
{
|
| 1268 |
+
name1: Series(
|
| 1269 |
+
[
|
| 1270 |
+
float_frame.loc(other_axis)[name1].mean(),
|
| 1271 |
+
float_frame.loc(other_axis)[name1].sum(),
|
| 1272 |
+
],
|
| 1273 |
+
index=["mean", "sum"],
|
| 1274 |
+
),
|
| 1275 |
+
name2: Series(
|
| 1276 |
+
[
|
| 1277 |
+
float_frame.loc(other_axis)[name2].sum(),
|
| 1278 |
+
float_frame.loc(other_axis)[name2].max(),
|
| 1279 |
+
],
|
| 1280 |
+
index=["sum", "max"],
|
| 1281 |
+
),
|
| 1282 |
+
},
|
| 1283 |
+
axis=1,
|
| 1284 |
+
)
|
| 1285 |
+
expected = expected.T if axis in {1, "columns"} else expected
|
| 1286 |
+
tm.assert_frame_equal(result, expected)
|
| 1287 |
+
|
| 1288 |
+
|
| 1289 |
+
def test_nuiscance_columns():
|
| 1290 |
+
# GH 15015
|
| 1291 |
+
df = DataFrame(
|
| 1292 |
+
{
|
| 1293 |
+
"A": [1, 2, 3],
|
| 1294 |
+
"B": [1.0, 2.0, 3.0],
|
| 1295 |
+
"C": ["foo", "bar", "baz"],
|
| 1296 |
+
"D": date_range("20130101", periods=3),
|
| 1297 |
+
}
|
| 1298 |
+
)
|
| 1299 |
+
|
| 1300 |
+
result = df.agg("min")
|
| 1301 |
+
expected = Series([1, 1.0, "bar", Timestamp("20130101")], index=df.columns)
|
| 1302 |
+
tm.assert_series_equal(result, expected)
|
| 1303 |
+
|
| 1304 |
+
result = df.agg(["min"])
|
| 1305 |
+
expected = DataFrame(
|
| 1306 |
+
[[1, 1.0, "bar", Timestamp("20130101").as_unit("ns")]],
|
| 1307 |
+
index=["min"],
|
| 1308 |
+
columns=df.columns,
|
| 1309 |
+
)
|
| 1310 |
+
tm.assert_frame_equal(result, expected)
|
| 1311 |
+
|
| 1312 |
+
msg = "does not support reduction"
|
| 1313 |
+
with pytest.raises(TypeError, match=msg):
|
| 1314 |
+
df.agg("sum")
|
| 1315 |
+
|
| 1316 |
+
result = df[["A", "B", "C"]].agg("sum")
|
| 1317 |
+
expected = Series([6, 6.0, "foobarbaz"], index=["A", "B", "C"])
|
| 1318 |
+
tm.assert_series_equal(result, expected)
|
| 1319 |
+
|
| 1320 |
+
msg = "does not support reduction"
|
| 1321 |
+
with pytest.raises(TypeError, match=msg):
|
| 1322 |
+
df.agg(["sum"])
|
| 1323 |
+
|
| 1324 |
+
|
| 1325 |
+
@pytest.mark.parametrize("how", ["agg", "apply"])
|
| 1326 |
+
def test_non_callable_aggregates(how):
|
| 1327 |
+
# GH 16405
|
| 1328 |
+
# 'size' is a property of frame/series
|
| 1329 |
+
# validate that this is working
|
| 1330 |
+
# GH 39116 - expand to apply
|
| 1331 |
+
df = DataFrame(
|
| 1332 |
+
{"A": [None, 2, 3], "B": [1.0, np.nan, 3.0], "C": ["foo", None, "bar"]}
|
| 1333 |
+
)
|
| 1334 |
+
|
| 1335 |
+
# Function aggregate
|
| 1336 |
+
result = getattr(df, how)({"A": "count"})
|
| 1337 |
+
expected = Series({"A": 2})
|
| 1338 |
+
|
| 1339 |
+
tm.assert_series_equal(result, expected)
|
| 1340 |
+
|
| 1341 |
+
# Non-function aggregate
|
| 1342 |
+
result = getattr(df, how)({"A": "size"})
|
| 1343 |
+
expected = Series({"A": 3})
|
| 1344 |
+
|
| 1345 |
+
tm.assert_series_equal(result, expected)
|
| 1346 |
+
|
| 1347 |
+
# Mix function and non-function aggs
|
| 1348 |
+
result1 = getattr(df, how)(["count", "size"])
|
| 1349 |
+
result2 = getattr(df, how)(
|
| 1350 |
+
{"A": ["count", "size"], "B": ["count", "size"], "C": ["count", "size"]}
|
| 1351 |
+
)
|
| 1352 |
+
expected = DataFrame(
|
| 1353 |
+
{
|
| 1354 |
+
"A": {"count": 2, "size": 3},
|
| 1355 |
+
"B": {"count": 2, "size": 3},
|
| 1356 |
+
"C": {"count": 2, "size": 3},
|
| 1357 |
+
}
|
| 1358 |
+
)
|
| 1359 |
+
|
| 1360 |
+
tm.assert_frame_equal(result1, result2, check_like=True)
|
| 1361 |
+
tm.assert_frame_equal(result2, expected, check_like=True)
|
| 1362 |
+
|
| 1363 |
+
# Just functional string arg is same as calling df.arg()
|
| 1364 |
+
result = getattr(df, how)("count")
|
| 1365 |
+
expected = df.count()
|
| 1366 |
+
|
| 1367 |
+
tm.assert_series_equal(result, expected)
|
| 1368 |
+
|
| 1369 |
+
|
| 1370 |
+
@pytest.mark.parametrize("how", ["agg", "apply"])
|
| 1371 |
+
def test_size_as_str(how, axis):
|
| 1372 |
+
# GH 39934
|
| 1373 |
+
df = DataFrame(
|
| 1374 |
+
{"A": [None, 2, 3], "B": [1.0, np.nan, 3.0], "C": ["foo", None, "bar"]}
|
| 1375 |
+
)
|
| 1376 |
+
# Just a string attribute arg same as calling df.arg
|
| 1377 |
+
# on the columns
|
| 1378 |
+
result = getattr(df, how)("size", axis=axis)
|
| 1379 |
+
if axis in (0, "index"):
|
| 1380 |
+
expected = Series(df.shape[0], index=df.columns)
|
| 1381 |
+
else:
|
| 1382 |
+
expected = Series(df.shape[1], index=df.index)
|
| 1383 |
+
tm.assert_series_equal(result, expected)
|
| 1384 |
+
|
| 1385 |
+
|
| 1386 |
+
def test_agg_listlike_result():
|
| 1387 |
+
# GH-29587 user defined function returning list-likes
|
| 1388 |
+
df = DataFrame({"A": [2, 2, 3], "B": [1.5, np.nan, 1.5], "C": ["foo", None, "bar"]})
|
| 1389 |
+
|
| 1390 |
+
def func(group_col):
|
| 1391 |
+
return list(group_col.dropna().unique())
|
| 1392 |
+
|
| 1393 |
+
result = df.agg(func)
|
| 1394 |
+
expected = Series([[2, 3], [1.5], ["foo", "bar"]], index=["A", "B", "C"])
|
| 1395 |
+
tm.assert_series_equal(result, expected)
|
| 1396 |
+
|
| 1397 |
+
result = df.agg([func])
|
| 1398 |
+
expected = expected.to_frame("func").T
|
| 1399 |
+
tm.assert_frame_equal(result, expected)
|
| 1400 |
+
|
| 1401 |
+
|
| 1402 |
+
@pytest.mark.parametrize("axis", [0, 1])
|
| 1403 |
+
@pytest.mark.parametrize(
|
| 1404 |
+
"args, kwargs",
|
| 1405 |
+
[
|
| 1406 |
+
((1, 2, 3), {}),
|
| 1407 |
+
((8, 7, 15), {}),
|
| 1408 |
+
((1, 2), {}),
|
| 1409 |
+
((1,), {"b": 2}),
|
| 1410 |
+
((), {"a": 1, "b": 2}),
|
| 1411 |
+
((), {"a": 2, "b": 1}),
|
| 1412 |
+
((), {"a": 1, "b": 2, "c": 3}),
|
| 1413 |
+
],
|
| 1414 |
+
)
|
| 1415 |
+
def test_agg_args_kwargs(axis, args, kwargs):
|
| 1416 |
+
def f(x, a, b, c=3):
|
| 1417 |
+
return x.sum() + (a + b) / c
|
| 1418 |
+
|
| 1419 |
+
df = DataFrame([[1, 2], [3, 4]])
|
| 1420 |
+
|
| 1421 |
+
if axis == 0:
|
| 1422 |
+
expected = Series([5.0, 7.0])
|
| 1423 |
+
else:
|
| 1424 |
+
expected = Series([4.0, 8.0])
|
| 1425 |
+
|
| 1426 |
+
result = df.agg(f, axis, *args, **kwargs)
|
| 1427 |
+
|
| 1428 |
+
tm.assert_series_equal(result, expected)
|
| 1429 |
+
|
| 1430 |
+
|
| 1431 |
+
@pytest.mark.parametrize("num_cols", [2, 3, 5])
|
| 1432 |
+
def test_frequency_is_original(num_cols, engine, request):
|
| 1433 |
+
# GH 22150
|
| 1434 |
+
if engine == "numba":
|
| 1435 |
+
mark = pytest.mark.xfail(reason="numba engine only supports numeric indices")
|
| 1436 |
+
request.node.add_marker(mark)
|
| 1437 |
+
index = pd.DatetimeIndex(["1950-06-30", "1952-10-24", "1953-05-29"])
|
| 1438 |
+
original = index.copy()
|
| 1439 |
+
df = DataFrame(1, index=index, columns=range(num_cols))
|
| 1440 |
+
df.apply(lambda x: x, engine=engine)
|
| 1441 |
+
assert index.freq == original.freq
|
| 1442 |
+
|
| 1443 |
+
|
| 1444 |
+
def test_apply_datetime_tz_issue(engine, request):
|
| 1445 |
+
# GH 29052
|
| 1446 |
+
|
| 1447 |
+
if engine == "numba":
|
| 1448 |
+
mark = pytest.mark.xfail(
|
| 1449 |
+
reason="numba engine doesn't support non-numeric indexes"
|
| 1450 |
+
)
|
| 1451 |
+
request.node.add_marker(mark)
|
| 1452 |
+
|
| 1453 |
+
timestamps = [
|
| 1454 |
+
Timestamp("2019-03-15 12:34:31.909000+0000", tz="UTC"),
|
| 1455 |
+
Timestamp("2019-03-15 12:34:34.359000+0000", tz="UTC"),
|
| 1456 |
+
Timestamp("2019-03-15 12:34:34.660000+0000", tz="UTC"),
|
| 1457 |
+
]
|
| 1458 |
+
df = DataFrame(data=[0, 1, 2], index=timestamps)
|
| 1459 |
+
result = df.apply(lambda x: x.name, axis=1, engine=engine)
|
| 1460 |
+
expected = Series(index=timestamps, data=timestamps)
|
| 1461 |
+
|
| 1462 |
+
tm.assert_series_equal(result, expected)
|
| 1463 |
+
|
| 1464 |
+
|
| 1465 |
+
@pytest.mark.parametrize("df", [DataFrame({"A": ["a", None], "B": ["c", "d"]})])
|
| 1466 |
+
@pytest.mark.parametrize("method", ["min", "max", "sum"])
|
| 1467 |
+
def test_mixed_column_raises(df, method, using_infer_string):
|
| 1468 |
+
# GH 16832
|
| 1469 |
+
if method == "sum":
|
| 1470 |
+
msg = r'can only concatenate str \(not "int"\) to str|does not support'
|
| 1471 |
+
else:
|
| 1472 |
+
msg = "not supported between instances of 'str' and 'float'"
|
| 1473 |
+
if not using_infer_string:
|
| 1474 |
+
with pytest.raises(TypeError, match=msg):
|
| 1475 |
+
getattr(df, method)()
|
| 1476 |
+
else:
|
| 1477 |
+
getattr(df, method)()
|
| 1478 |
+
|
| 1479 |
+
|
| 1480 |
+
@pytest.mark.parametrize("col", [1, 1.0, True, "a", np.nan])
|
| 1481 |
+
def test_apply_dtype(col):
|
| 1482 |
+
# GH 31466
|
| 1483 |
+
df = DataFrame([[1.0, col]], columns=["a", "b"])
|
| 1484 |
+
result = df.apply(lambda x: x.dtype)
|
| 1485 |
+
expected = df.dtypes
|
| 1486 |
+
|
| 1487 |
+
tm.assert_series_equal(result, expected)
|
| 1488 |
+
|
| 1489 |
+
|
| 1490 |
+
def test_apply_mutating(using_array_manager, using_copy_on_write, warn_copy_on_write):
|
| 1491 |
+
# GH#35462 case where applied func pins a new BlockManager to a row
|
| 1492 |
+
df = DataFrame({"a": range(100), "b": range(100, 200)})
|
| 1493 |
+
df_orig = df.copy()
|
| 1494 |
+
|
| 1495 |
+
def func(row):
|
| 1496 |
+
mgr = row._mgr
|
| 1497 |
+
row.loc["a"] += 1
|
| 1498 |
+
assert row._mgr is not mgr
|
| 1499 |
+
return row
|
| 1500 |
+
|
| 1501 |
+
expected = df.copy()
|
| 1502 |
+
expected["a"] += 1
|
| 1503 |
+
|
| 1504 |
+
with tm.assert_cow_warning(warn_copy_on_write):
|
| 1505 |
+
result = df.apply(func, axis=1)
|
| 1506 |
+
|
| 1507 |
+
tm.assert_frame_equal(result, expected)
|
| 1508 |
+
if using_copy_on_write or using_array_manager:
|
| 1509 |
+
# INFO(CoW) With copy on write, mutating a viewing row doesn't mutate the parent
|
| 1510 |
+
# INFO(ArrayManager) With BlockManager, the row is a view and mutated in place,
|
| 1511 |
+
# with ArrayManager the row is not a view, and thus not mutated in place
|
| 1512 |
+
tm.assert_frame_equal(df, df_orig)
|
| 1513 |
+
else:
|
| 1514 |
+
tm.assert_frame_equal(df, result)
|
| 1515 |
+
|
| 1516 |
+
|
| 1517 |
+
def test_apply_empty_list_reduce():
|
| 1518 |
+
# GH#35683 get columns correct
|
| 1519 |
+
df = DataFrame([[1, 2], [3, 4], [5, 6], [7, 8], [9, 10]], columns=["a", "b"])
|
| 1520 |
+
|
| 1521 |
+
result = df.apply(lambda x: [], result_type="reduce")
|
| 1522 |
+
expected = Series({"a": [], "b": []}, dtype=object)
|
| 1523 |
+
tm.assert_series_equal(result, expected)
|
| 1524 |
+
|
| 1525 |
+
|
| 1526 |
+
def test_apply_no_suffix_index(engine, request):
|
| 1527 |
+
# GH36189
|
| 1528 |
+
if engine == "numba":
|
| 1529 |
+
mark = pytest.mark.xfail(
|
| 1530 |
+
reason="numba engine doesn't support list-likes/dict-like callables"
|
| 1531 |
+
)
|
| 1532 |
+
request.node.add_marker(mark)
|
| 1533 |
+
pdf = DataFrame([[4, 9]] * 3, columns=["A", "B"])
|
| 1534 |
+
result = pdf.apply(["sum", lambda x: x.sum(), lambda x: x.sum()], engine=engine)
|
| 1535 |
+
expected = DataFrame(
|
| 1536 |
+
{"A": [12, 12, 12], "B": [27, 27, 27]}, index=["sum", "<lambda>", "<lambda>"]
|
| 1537 |
+
)
|
| 1538 |
+
|
| 1539 |
+
tm.assert_frame_equal(result, expected)
|
| 1540 |
+
|
| 1541 |
+
|
| 1542 |
+
def test_apply_raw_returns_string(engine):
|
| 1543 |
+
# https://github.com/pandas-dev/pandas/issues/35940
|
| 1544 |
+
if engine == "numba":
|
| 1545 |
+
pytest.skip("No object dtype support in numba")
|
| 1546 |
+
df = DataFrame({"A": ["aa", "bbb"]})
|
| 1547 |
+
result = df.apply(lambda x: x[0], engine=engine, axis=1, raw=True)
|
| 1548 |
+
expected = Series(["aa", "bbb"])
|
| 1549 |
+
tm.assert_series_equal(result, expected)
|
| 1550 |
+
|
| 1551 |
+
|
| 1552 |
+
def test_aggregation_func_column_order():
|
| 1553 |
+
# GH40420: the result of .agg should have an index that is sorted
|
| 1554 |
+
# according to the arguments provided to agg.
|
| 1555 |
+
df = DataFrame(
|
| 1556 |
+
[
|
| 1557 |
+
(1, 0, 0),
|
| 1558 |
+
(2, 0, 0),
|
| 1559 |
+
(3, 0, 0),
|
| 1560 |
+
(4, 5, 4),
|
| 1561 |
+
(5, 6, 6),
|
| 1562 |
+
(6, 7, 7),
|
| 1563 |
+
],
|
| 1564 |
+
columns=("att1", "att2", "att3"),
|
| 1565 |
+
)
|
| 1566 |
+
|
| 1567 |
+
def sum_div2(s):
|
| 1568 |
+
return s.sum() / 2
|
| 1569 |
+
|
| 1570 |
+
aggs = ["sum", sum_div2, "count", "min"]
|
| 1571 |
+
result = df.agg(aggs)
|
| 1572 |
+
expected = DataFrame(
|
| 1573 |
+
{
|
| 1574 |
+
"att1": [21.0, 10.5, 6.0, 1.0],
|
| 1575 |
+
"att2": [18.0, 9.0, 6.0, 0.0],
|
| 1576 |
+
"att3": [17.0, 8.5, 6.0, 0.0],
|
| 1577 |
+
},
|
| 1578 |
+
index=["sum", "sum_div2", "count", "min"],
|
| 1579 |
+
)
|
| 1580 |
+
tm.assert_frame_equal(result, expected)
|
| 1581 |
+
|
| 1582 |
+
|
| 1583 |
+
def test_apply_getitem_axis_1(engine, request):
|
| 1584 |
+
# GH 13427
|
| 1585 |
+
if engine == "numba":
|
| 1586 |
+
mark = pytest.mark.xfail(
|
| 1587 |
+
reason="numba engine not supporting duplicate index values"
|
| 1588 |
+
)
|
| 1589 |
+
request.node.add_marker(mark)
|
| 1590 |
+
df = DataFrame({"a": [0, 1, 2], "b": [1, 2, 3]})
|
| 1591 |
+
result = df[["a", "a"]].apply(
|
| 1592 |
+
lambda x: x.iloc[0] + x.iloc[1], axis=1, engine=engine
|
| 1593 |
+
)
|
| 1594 |
+
expected = Series([0, 2, 4])
|
| 1595 |
+
tm.assert_series_equal(result, expected)
|
| 1596 |
+
|
| 1597 |
+
|
| 1598 |
+
def test_nuisance_depr_passes_through_warnings():
|
| 1599 |
+
# GH 43740
|
| 1600 |
+
# DataFrame.agg with list-likes may emit warnings for both individual
|
| 1601 |
+
# args and for entire columns, but we only want to emit once. We
|
| 1602 |
+
# catch and suppress the warnings for individual args, but need to make
|
| 1603 |
+
# sure if some other warnings were raised, they get passed through to
|
| 1604 |
+
# the user.
|
| 1605 |
+
|
| 1606 |
+
def expected_warning(x):
|
| 1607 |
+
warnings.warn("Hello, World!")
|
| 1608 |
+
return x.sum()
|
| 1609 |
+
|
| 1610 |
+
df = DataFrame({"a": [1, 2, 3]})
|
| 1611 |
+
with tm.assert_produces_warning(UserWarning, match="Hello, World!"):
|
| 1612 |
+
df.agg([expected_warning])
|
| 1613 |
+
|
| 1614 |
+
|
| 1615 |
+
def test_apply_type():
|
| 1616 |
+
# GH 46719
|
| 1617 |
+
df = DataFrame(
|
| 1618 |
+
{"col1": [3, "string", float], "col2": [0.25, datetime(2020, 1, 1), np.nan]},
|
| 1619 |
+
index=["a", "b", "c"],
|
| 1620 |
+
)
|
| 1621 |
+
|
| 1622 |
+
# axis=0
|
| 1623 |
+
result = df.apply(type, axis=0)
|
| 1624 |
+
expected = Series({"col1": Series, "col2": Series})
|
| 1625 |
+
tm.assert_series_equal(result, expected)
|
| 1626 |
+
|
| 1627 |
+
# axis=1
|
| 1628 |
+
result = df.apply(type, axis=1)
|
| 1629 |
+
expected = Series({"a": Series, "b": Series, "c": Series})
|
| 1630 |
+
tm.assert_series_equal(result, expected)
|
| 1631 |
+
|
| 1632 |
+
|
| 1633 |
+
def test_apply_on_empty_dataframe(engine):
|
| 1634 |
+
# GH 39111
|
| 1635 |
+
df = DataFrame({"a": [1, 2], "b": [3, 0]})
|
| 1636 |
+
result = df.head(0).apply(lambda x: max(x["a"], x["b"]), axis=1, engine=engine)
|
| 1637 |
+
expected = Series([], dtype=np.float64)
|
| 1638 |
+
tm.assert_series_equal(result, expected)
|
| 1639 |
+
|
| 1640 |
+
|
| 1641 |
+
def test_apply_return_list():
|
| 1642 |
+
df = DataFrame({"a": [1, 2], "b": [2, 3]})
|
| 1643 |
+
result = df.apply(lambda x: [x.values])
|
| 1644 |
+
expected = DataFrame({"a": [[1, 2]], "b": [[2, 3]]})
|
| 1645 |
+
tm.assert_frame_equal(result, expected)
|
| 1646 |
+
|
| 1647 |
+
|
| 1648 |
+
@pytest.mark.parametrize(
|
| 1649 |
+
"test, constant",
|
| 1650 |
+
[
|
| 1651 |
+
({"a": [1, 2, 3], "b": [1, 1, 1]}, {"a": [1, 2, 3], "b": [1]}),
|
| 1652 |
+
({"a": [2, 2, 2], "b": [1, 1, 1]}, {"a": [2], "b": [1]}),
|
| 1653 |
+
],
|
| 1654 |
+
)
|
| 1655 |
+
def test_unique_agg_type_is_series(test, constant):
|
| 1656 |
+
# GH#22558
|
| 1657 |
+
df1 = DataFrame(test)
|
| 1658 |
+
expected = Series(data=constant, index=["a", "b"], dtype="object")
|
| 1659 |
+
aggregation = {"a": "unique", "b": "unique"}
|
| 1660 |
+
|
| 1661 |
+
result = df1.agg(aggregation)
|
| 1662 |
+
|
| 1663 |
+
tm.assert_series_equal(result, expected)
|
| 1664 |
+
|
| 1665 |
+
|
| 1666 |
+
def test_any_apply_keyword_non_zero_axis_regression():
|
| 1667 |
+
# https://github.com/pandas-dev/pandas/issues/48656
|
| 1668 |
+
df = DataFrame({"A": [1, 2, 0], "B": [0, 2, 0], "C": [0, 0, 0]})
|
| 1669 |
+
expected = Series([True, True, False])
|
| 1670 |
+
tm.assert_series_equal(df.any(axis=1), expected)
|
| 1671 |
+
|
| 1672 |
+
result = df.apply("any", axis=1)
|
| 1673 |
+
tm.assert_series_equal(result, expected)
|
| 1674 |
+
|
| 1675 |
+
result = df.apply("any", 1)
|
| 1676 |
+
tm.assert_series_equal(result, expected)
|
| 1677 |
+
|
| 1678 |
+
|
| 1679 |
+
def test_agg_mapping_func_deprecated():
|
| 1680 |
+
# GH 53325
|
| 1681 |
+
df = DataFrame({"x": [1, 2, 3]})
|
| 1682 |
+
|
| 1683 |
+
def foo1(x, a=1, c=0):
|
| 1684 |
+
return x + a + c
|
| 1685 |
+
|
| 1686 |
+
def foo2(x, b=2, c=0):
|
| 1687 |
+
return x + b + c
|
| 1688 |
+
|
| 1689 |
+
# single func already takes the vectorized path
|
| 1690 |
+
result = df.agg(foo1, 0, 3, c=4)
|
| 1691 |
+
expected = df + 7
|
| 1692 |
+
tm.assert_frame_equal(result, expected)
|
| 1693 |
+
|
| 1694 |
+
msg = "using .+ in Series.agg cannot aggregate and"
|
| 1695 |
+
|
| 1696 |
+
with tm.assert_produces_warning(FutureWarning, match=msg):
|
| 1697 |
+
result = df.agg([foo1, foo2], 0, 3, c=4)
|
| 1698 |
+
expected = DataFrame(
|
| 1699 |
+
[[8, 8], [9, 9], [10, 10]], columns=[["x", "x"], ["foo1", "foo2"]]
|
| 1700 |
+
)
|
| 1701 |
+
tm.assert_frame_equal(result, expected)
|
| 1702 |
+
|
| 1703 |
+
# TODO: the result below is wrong, should be fixed (GH53325)
|
| 1704 |
+
with tm.assert_produces_warning(FutureWarning, match=msg):
|
| 1705 |
+
result = df.agg({"x": foo1}, 0, 3, c=4)
|
| 1706 |
+
expected = DataFrame([2, 3, 4], columns=["x"])
|
| 1707 |
+
tm.assert_frame_equal(result, expected)
|
| 1708 |
+
|
| 1709 |
+
|
| 1710 |
+
def test_agg_std():
|
| 1711 |
+
df = DataFrame(np.arange(6).reshape(3, 2), columns=["A", "B"])
|
| 1712 |
+
|
| 1713 |
+
with tm.assert_produces_warning(FutureWarning, match="using DataFrame.std"):
|
| 1714 |
+
result = df.agg(np.std)
|
| 1715 |
+
expected = Series({"A": 2.0, "B": 2.0}, dtype=float)
|
| 1716 |
+
tm.assert_series_equal(result, expected)
|
| 1717 |
+
|
| 1718 |
+
with tm.assert_produces_warning(FutureWarning, match="using Series.std"):
|
| 1719 |
+
result = df.agg([np.std])
|
| 1720 |
+
expected = DataFrame({"A": 2.0, "B": 2.0}, index=["std"])
|
| 1721 |
+
tm.assert_frame_equal(result, expected)
|
| 1722 |
+
|
| 1723 |
+
|
| 1724 |
+
def test_agg_dist_like_and_nonunique_columns():
|
| 1725 |
+
# GH#51099
|
| 1726 |
+
df = DataFrame(
|
| 1727 |
+
{"A": [None, 2, 3], "B": [1.0, np.nan, 3.0], "C": ["foo", None, "bar"]}
|
| 1728 |
+
)
|
| 1729 |
+
df.columns = ["A", "A", "C"]
|
| 1730 |
+
|
| 1731 |
+
result = df.agg({"A": "count"})
|
| 1732 |
+
expected = df["A"].count()
|
| 1733 |
+
tm.assert_series_equal(result, expected)
|
env-llmeval/lib/python3.10/site-packages/pandas/tests/apply/test_frame_apply_relabeling.py
ADDED
|
@@ -0,0 +1,113 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import numpy as np
|
| 2 |
+
import pytest
|
| 3 |
+
|
| 4 |
+
from pandas.compat.numpy import np_version_gte1p25
|
| 5 |
+
|
| 6 |
+
import pandas as pd
|
| 7 |
+
import pandas._testing as tm
|
| 8 |
+
|
| 9 |
+
|
| 10 |
+
def test_agg_relabel():
|
| 11 |
+
# GH 26513
|
| 12 |
+
df = pd.DataFrame({"A": [1, 2, 1, 2], "B": [1, 2, 3, 4], "C": [3, 4, 5, 6]})
|
| 13 |
+
|
| 14 |
+
# simplest case with one column, one func
|
| 15 |
+
result = df.agg(foo=("B", "sum"))
|
| 16 |
+
expected = pd.DataFrame({"B": [10]}, index=pd.Index(["foo"]))
|
| 17 |
+
tm.assert_frame_equal(result, expected)
|
| 18 |
+
|
| 19 |
+
# test on same column with different methods
|
| 20 |
+
result = df.agg(foo=("B", "sum"), bar=("B", "min"))
|
| 21 |
+
expected = pd.DataFrame({"B": [10, 1]}, index=pd.Index(["foo", "bar"]))
|
| 22 |
+
|
| 23 |
+
tm.assert_frame_equal(result, expected)
|
| 24 |
+
|
| 25 |
+
|
| 26 |
+
def test_agg_relabel_multi_columns_multi_methods():
|
| 27 |
+
# GH 26513, test on multiple columns with multiple methods
|
| 28 |
+
df = pd.DataFrame({"A": [1, 2, 1, 2], "B": [1, 2, 3, 4], "C": [3, 4, 5, 6]})
|
| 29 |
+
result = df.agg(
|
| 30 |
+
foo=("A", "sum"),
|
| 31 |
+
bar=("B", "mean"),
|
| 32 |
+
cat=("A", "min"),
|
| 33 |
+
dat=("B", "max"),
|
| 34 |
+
f=("A", "max"),
|
| 35 |
+
g=("C", "min"),
|
| 36 |
+
)
|
| 37 |
+
expected = pd.DataFrame(
|
| 38 |
+
{
|
| 39 |
+
"A": [6.0, np.nan, 1.0, np.nan, 2.0, np.nan],
|
| 40 |
+
"B": [np.nan, 2.5, np.nan, 4.0, np.nan, np.nan],
|
| 41 |
+
"C": [np.nan, np.nan, np.nan, np.nan, np.nan, 3.0],
|
| 42 |
+
},
|
| 43 |
+
index=pd.Index(["foo", "bar", "cat", "dat", "f", "g"]),
|
| 44 |
+
)
|
| 45 |
+
tm.assert_frame_equal(result, expected)
|
| 46 |
+
|
| 47 |
+
|
| 48 |
+
@pytest.mark.xfail(np_version_gte1p25, reason="name of min now equals name of np.min")
|
| 49 |
+
def test_agg_relabel_partial_functions():
|
| 50 |
+
# GH 26513, test on partial, functools or more complex cases
|
| 51 |
+
df = pd.DataFrame({"A": [1, 2, 1, 2], "B": [1, 2, 3, 4], "C": [3, 4, 5, 6]})
|
| 52 |
+
msg = "using Series.[mean|min]"
|
| 53 |
+
with tm.assert_produces_warning(FutureWarning, match=msg):
|
| 54 |
+
result = df.agg(foo=("A", np.mean), bar=("A", "mean"), cat=("A", min))
|
| 55 |
+
expected = pd.DataFrame(
|
| 56 |
+
{"A": [1.5, 1.5, 1.0]}, index=pd.Index(["foo", "bar", "cat"])
|
| 57 |
+
)
|
| 58 |
+
tm.assert_frame_equal(result, expected)
|
| 59 |
+
|
| 60 |
+
msg = "using Series.[mean|min|max|sum]"
|
| 61 |
+
with tm.assert_produces_warning(FutureWarning, match=msg):
|
| 62 |
+
result = df.agg(
|
| 63 |
+
foo=("A", min),
|
| 64 |
+
bar=("A", np.min),
|
| 65 |
+
cat=("B", max),
|
| 66 |
+
dat=("C", "min"),
|
| 67 |
+
f=("B", np.sum),
|
| 68 |
+
kk=("B", lambda x: min(x)),
|
| 69 |
+
)
|
| 70 |
+
expected = pd.DataFrame(
|
| 71 |
+
{
|
| 72 |
+
"A": [1.0, 1.0, np.nan, np.nan, np.nan, np.nan],
|
| 73 |
+
"B": [np.nan, np.nan, 4.0, np.nan, 10.0, 1.0],
|
| 74 |
+
"C": [np.nan, np.nan, np.nan, 3.0, np.nan, np.nan],
|
| 75 |
+
},
|
| 76 |
+
index=pd.Index(["foo", "bar", "cat", "dat", "f", "kk"]),
|
| 77 |
+
)
|
| 78 |
+
tm.assert_frame_equal(result, expected)
|
| 79 |
+
|
| 80 |
+
|
| 81 |
+
def test_agg_namedtuple():
|
| 82 |
+
# GH 26513
|
| 83 |
+
df = pd.DataFrame({"A": [0, 1], "B": [1, 2]})
|
| 84 |
+
result = df.agg(
|
| 85 |
+
foo=pd.NamedAgg("B", "sum"),
|
| 86 |
+
bar=pd.NamedAgg("B", "min"),
|
| 87 |
+
cat=pd.NamedAgg(column="B", aggfunc="count"),
|
| 88 |
+
fft=pd.NamedAgg("B", aggfunc="max"),
|
| 89 |
+
)
|
| 90 |
+
|
| 91 |
+
expected = pd.DataFrame(
|
| 92 |
+
{"B": [3, 1, 2, 2]}, index=pd.Index(["foo", "bar", "cat", "fft"])
|
| 93 |
+
)
|
| 94 |
+
tm.assert_frame_equal(result, expected)
|
| 95 |
+
|
| 96 |
+
result = df.agg(
|
| 97 |
+
foo=pd.NamedAgg("A", "min"),
|
| 98 |
+
bar=pd.NamedAgg(column="B", aggfunc="max"),
|
| 99 |
+
cat=pd.NamedAgg(column="A", aggfunc="max"),
|
| 100 |
+
)
|
| 101 |
+
expected = pd.DataFrame(
|
| 102 |
+
{"A": [0.0, np.nan, 1.0], "B": [np.nan, 2.0, np.nan]},
|
| 103 |
+
index=pd.Index(["foo", "bar", "cat"]),
|
| 104 |
+
)
|
| 105 |
+
tm.assert_frame_equal(result, expected)
|
| 106 |
+
|
| 107 |
+
|
| 108 |
+
def test_reconstruct_func():
|
| 109 |
+
# GH 28472, test to ensure reconstruct_func isn't moved;
|
| 110 |
+
# This method is used by other libraries (e.g. dask)
|
| 111 |
+
result = pd.core.apply.reconstruct_func("min")
|
| 112 |
+
expected = (False, "min", None, None)
|
| 113 |
+
tm.assert_equal(result, expected)
|
env-llmeval/lib/python3.10/site-packages/pandas/tests/apply/test_frame_transform.py
ADDED
|
@@ -0,0 +1,264 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import numpy as np
|
| 2 |
+
import pytest
|
| 3 |
+
|
| 4 |
+
from pandas import (
|
| 5 |
+
DataFrame,
|
| 6 |
+
MultiIndex,
|
| 7 |
+
Series,
|
| 8 |
+
)
|
| 9 |
+
import pandas._testing as tm
|
| 10 |
+
from pandas.tests.apply.common import frame_transform_kernels
|
| 11 |
+
from pandas.tests.frame.common import zip_frames
|
| 12 |
+
|
| 13 |
+
|
| 14 |
+
def unpack_obj(obj, klass, axis):
|
| 15 |
+
"""
|
| 16 |
+
Helper to ensure we have the right type of object for a test parametrized
|
| 17 |
+
over frame_or_series.
|
| 18 |
+
"""
|
| 19 |
+
if klass is not DataFrame:
|
| 20 |
+
obj = obj["A"]
|
| 21 |
+
if axis != 0:
|
| 22 |
+
pytest.skip(f"Test is only for DataFrame with axis={axis}")
|
| 23 |
+
return obj
|
| 24 |
+
|
| 25 |
+
|
| 26 |
+
def test_transform_ufunc(axis, float_frame, frame_or_series):
|
| 27 |
+
# GH 35964
|
| 28 |
+
obj = unpack_obj(float_frame, frame_or_series, axis)
|
| 29 |
+
|
| 30 |
+
with np.errstate(all="ignore"):
|
| 31 |
+
f_sqrt = np.sqrt(obj)
|
| 32 |
+
|
| 33 |
+
# ufunc
|
| 34 |
+
result = obj.transform(np.sqrt, axis=axis)
|
| 35 |
+
expected = f_sqrt
|
| 36 |
+
tm.assert_equal(result, expected)
|
| 37 |
+
|
| 38 |
+
|
| 39 |
+
@pytest.mark.parametrize(
|
| 40 |
+
"ops, names",
|
| 41 |
+
[
|
| 42 |
+
([np.sqrt], ["sqrt"]),
|
| 43 |
+
([np.abs, np.sqrt], ["absolute", "sqrt"]),
|
| 44 |
+
(np.array([np.sqrt]), ["sqrt"]),
|
| 45 |
+
(np.array([np.abs, np.sqrt]), ["absolute", "sqrt"]),
|
| 46 |
+
],
|
| 47 |
+
)
|
| 48 |
+
def test_transform_listlike(axis, float_frame, ops, names):
|
| 49 |
+
# GH 35964
|
| 50 |
+
other_axis = 1 if axis in {0, "index"} else 0
|
| 51 |
+
with np.errstate(all="ignore"):
|
| 52 |
+
expected = zip_frames([op(float_frame) for op in ops], axis=other_axis)
|
| 53 |
+
if axis in {0, "index"}:
|
| 54 |
+
expected.columns = MultiIndex.from_product([float_frame.columns, names])
|
| 55 |
+
else:
|
| 56 |
+
expected.index = MultiIndex.from_product([float_frame.index, names])
|
| 57 |
+
result = float_frame.transform(ops, axis=axis)
|
| 58 |
+
tm.assert_frame_equal(result, expected)
|
| 59 |
+
|
| 60 |
+
|
| 61 |
+
@pytest.mark.parametrize("ops", [[], np.array([])])
|
| 62 |
+
def test_transform_empty_listlike(float_frame, ops, frame_or_series):
|
| 63 |
+
obj = unpack_obj(float_frame, frame_or_series, 0)
|
| 64 |
+
|
| 65 |
+
with pytest.raises(ValueError, match="No transform functions were provided"):
|
| 66 |
+
obj.transform(ops)
|
| 67 |
+
|
| 68 |
+
|
| 69 |
+
def test_transform_listlike_func_with_args():
|
| 70 |
+
# GH 50624
|
| 71 |
+
df = DataFrame({"x": [1, 2, 3]})
|
| 72 |
+
|
| 73 |
+
def foo1(x, a=1, c=0):
|
| 74 |
+
return x + a + c
|
| 75 |
+
|
| 76 |
+
def foo2(x, b=2, c=0):
|
| 77 |
+
return x + b + c
|
| 78 |
+
|
| 79 |
+
msg = r"foo1\(\) got an unexpected keyword argument 'b'"
|
| 80 |
+
with pytest.raises(TypeError, match=msg):
|
| 81 |
+
df.transform([foo1, foo2], 0, 3, b=3, c=4)
|
| 82 |
+
|
| 83 |
+
result = df.transform([foo1, foo2], 0, 3, c=4)
|
| 84 |
+
expected = DataFrame(
|
| 85 |
+
[[8, 8], [9, 9], [10, 10]],
|
| 86 |
+
columns=MultiIndex.from_tuples([("x", "foo1"), ("x", "foo2")]),
|
| 87 |
+
)
|
| 88 |
+
tm.assert_frame_equal(result, expected)
|
| 89 |
+
|
| 90 |
+
|
| 91 |
+
@pytest.mark.parametrize("box", [dict, Series])
|
| 92 |
+
def test_transform_dictlike(axis, float_frame, box):
|
| 93 |
+
# GH 35964
|
| 94 |
+
if axis in (0, "index"):
|
| 95 |
+
e = float_frame.columns[0]
|
| 96 |
+
expected = float_frame[[e]].transform(np.abs)
|
| 97 |
+
else:
|
| 98 |
+
e = float_frame.index[0]
|
| 99 |
+
expected = float_frame.iloc[[0]].transform(np.abs)
|
| 100 |
+
result = float_frame.transform(box({e: np.abs}), axis=axis)
|
| 101 |
+
tm.assert_frame_equal(result, expected)
|
| 102 |
+
|
| 103 |
+
|
| 104 |
+
def test_transform_dictlike_mixed():
|
| 105 |
+
# GH 40018 - mix of lists and non-lists in values of a dictionary
|
| 106 |
+
df = DataFrame({"a": [1, 2], "b": [1, 4], "c": [1, 4]})
|
| 107 |
+
result = df.transform({"b": ["sqrt", "abs"], "c": "sqrt"})
|
| 108 |
+
expected = DataFrame(
|
| 109 |
+
[[1.0, 1, 1.0], [2.0, 4, 2.0]],
|
| 110 |
+
columns=MultiIndex([("b", "c"), ("sqrt", "abs")], [(0, 0, 1), (0, 1, 0)]),
|
| 111 |
+
)
|
| 112 |
+
tm.assert_frame_equal(result, expected)
|
| 113 |
+
|
| 114 |
+
|
| 115 |
+
@pytest.mark.parametrize(
|
| 116 |
+
"ops",
|
| 117 |
+
[
|
| 118 |
+
{},
|
| 119 |
+
{"A": []},
|
| 120 |
+
{"A": [], "B": "cumsum"},
|
| 121 |
+
{"A": "cumsum", "B": []},
|
| 122 |
+
{"A": [], "B": ["cumsum"]},
|
| 123 |
+
{"A": ["cumsum"], "B": []},
|
| 124 |
+
],
|
| 125 |
+
)
|
| 126 |
+
def test_transform_empty_dictlike(float_frame, ops, frame_or_series):
|
| 127 |
+
obj = unpack_obj(float_frame, frame_or_series, 0)
|
| 128 |
+
|
| 129 |
+
with pytest.raises(ValueError, match="No transform functions were provided"):
|
| 130 |
+
obj.transform(ops)
|
| 131 |
+
|
| 132 |
+
|
| 133 |
+
@pytest.mark.parametrize("use_apply", [True, False])
|
| 134 |
+
def test_transform_udf(axis, float_frame, use_apply, frame_or_series):
|
| 135 |
+
# GH 35964
|
| 136 |
+
obj = unpack_obj(float_frame, frame_or_series, axis)
|
| 137 |
+
|
| 138 |
+
# transform uses UDF either via apply or passing the entire DataFrame
|
| 139 |
+
def func(x):
|
| 140 |
+
# transform is using apply iff x is not a DataFrame
|
| 141 |
+
if use_apply == isinstance(x, frame_or_series):
|
| 142 |
+
# Force transform to fallback
|
| 143 |
+
raise ValueError
|
| 144 |
+
return x + 1
|
| 145 |
+
|
| 146 |
+
result = obj.transform(func, axis=axis)
|
| 147 |
+
expected = obj + 1
|
| 148 |
+
tm.assert_equal(result, expected)
|
| 149 |
+
|
| 150 |
+
|
| 151 |
+
wont_fail = ["ffill", "bfill", "fillna", "pad", "backfill", "shift"]
|
| 152 |
+
frame_kernels_raise = [x for x in frame_transform_kernels if x not in wont_fail]
|
| 153 |
+
|
| 154 |
+
|
| 155 |
+
@pytest.mark.parametrize("op", [*frame_kernels_raise, lambda x: x + 1])
|
| 156 |
+
def test_transform_bad_dtype(op, frame_or_series, request):
|
| 157 |
+
# GH 35964
|
| 158 |
+
if op == "ngroup":
|
| 159 |
+
request.applymarker(
|
| 160 |
+
pytest.mark.xfail(raises=ValueError, reason="ngroup not valid for NDFrame")
|
| 161 |
+
)
|
| 162 |
+
|
| 163 |
+
obj = DataFrame({"A": 3 * [object]}) # DataFrame that will fail on most transforms
|
| 164 |
+
obj = tm.get_obj(obj, frame_or_series)
|
| 165 |
+
error = TypeError
|
| 166 |
+
msg = "|".join(
|
| 167 |
+
[
|
| 168 |
+
"not supported between instances of 'type' and 'type'",
|
| 169 |
+
"unsupported operand type",
|
| 170 |
+
]
|
| 171 |
+
)
|
| 172 |
+
|
| 173 |
+
with pytest.raises(error, match=msg):
|
| 174 |
+
obj.transform(op)
|
| 175 |
+
with pytest.raises(error, match=msg):
|
| 176 |
+
obj.transform([op])
|
| 177 |
+
with pytest.raises(error, match=msg):
|
| 178 |
+
obj.transform({"A": op})
|
| 179 |
+
with pytest.raises(error, match=msg):
|
| 180 |
+
obj.transform({"A": [op]})
|
| 181 |
+
|
| 182 |
+
|
| 183 |
+
@pytest.mark.parametrize("op", frame_kernels_raise)
|
| 184 |
+
def test_transform_failure_typeerror(request, op):
|
| 185 |
+
# GH 35964
|
| 186 |
+
|
| 187 |
+
if op == "ngroup":
|
| 188 |
+
request.applymarker(
|
| 189 |
+
pytest.mark.xfail(raises=ValueError, reason="ngroup not valid for NDFrame")
|
| 190 |
+
)
|
| 191 |
+
|
| 192 |
+
# Using object makes most transform kernels fail
|
| 193 |
+
df = DataFrame({"A": 3 * [object], "B": [1, 2, 3]})
|
| 194 |
+
error = TypeError
|
| 195 |
+
msg = "|".join(
|
| 196 |
+
[
|
| 197 |
+
"not supported between instances of 'type' and 'type'",
|
| 198 |
+
"unsupported operand type",
|
| 199 |
+
]
|
| 200 |
+
)
|
| 201 |
+
|
| 202 |
+
with pytest.raises(error, match=msg):
|
| 203 |
+
df.transform([op])
|
| 204 |
+
|
| 205 |
+
with pytest.raises(error, match=msg):
|
| 206 |
+
df.transform({"A": op, "B": op})
|
| 207 |
+
|
| 208 |
+
with pytest.raises(error, match=msg):
|
| 209 |
+
df.transform({"A": [op], "B": [op]})
|
| 210 |
+
|
| 211 |
+
with pytest.raises(error, match=msg):
|
| 212 |
+
df.transform({"A": [op, "shift"], "B": [op]})
|
| 213 |
+
|
| 214 |
+
|
| 215 |
+
def test_transform_failure_valueerror():
|
| 216 |
+
# GH 40211
|
| 217 |
+
def op(x):
|
| 218 |
+
if np.sum(np.sum(x)) < 10:
|
| 219 |
+
raise ValueError
|
| 220 |
+
return x
|
| 221 |
+
|
| 222 |
+
df = DataFrame({"A": [1, 2, 3], "B": [400, 500, 600]})
|
| 223 |
+
msg = "Transform function failed"
|
| 224 |
+
|
| 225 |
+
with pytest.raises(ValueError, match=msg):
|
| 226 |
+
df.transform([op])
|
| 227 |
+
|
| 228 |
+
with pytest.raises(ValueError, match=msg):
|
| 229 |
+
df.transform({"A": op, "B": op})
|
| 230 |
+
|
| 231 |
+
with pytest.raises(ValueError, match=msg):
|
| 232 |
+
df.transform({"A": [op], "B": [op]})
|
| 233 |
+
|
| 234 |
+
with pytest.raises(ValueError, match=msg):
|
| 235 |
+
df.transform({"A": [op, "shift"], "B": [op]})
|
| 236 |
+
|
| 237 |
+
|
| 238 |
+
@pytest.mark.parametrize("use_apply", [True, False])
|
| 239 |
+
def test_transform_passes_args(use_apply, frame_or_series):
|
| 240 |
+
# GH 35964
|
| 241 |
+
# transform uses UDF either via apply or passing the entire DataFrame
|
| 242 |
+
expected_args = [1, 2]
|
| 243 |
+
expected_kwargs = {"c": 3}
|
| 244 |
+
|
| 245 |
+
def f(x, a, b, c):
|
| 246 |
+
# transform is using apply iff x is not a DataFrame
|
| 247 |
+
if use_apply == isinstance(x, frame_or_series):
|
| 248 |
+
# Force transform to fallback
|
| 249 |
+
raise ValueError
|
| 250 |
+
assert [a, b] == expected_args
|
| 251 |
+
assert c == expected_kwargs["c"]
|
| 252 |
+
return x
|
| 253 |
+
|
| 254 |
+
frame_or_series([1]).transform(f, 0, *expected_args, **expected_kwargs)
|
| 255 |
+
|
| 256 |
+
|
| 257 |
+
def test_transform_empty_dataframe():
|
| 258 |
+
# https://github.com/pandas-dev/pandas/issues/39636
|
| 259 |
+
df = DataFrame([], columns=["col1", "col2"])
|
| 260 |
+
result = df.transform(lambda x: x + 10)
|
| 261 |
+
tm.assert_frame_equal(result, df)
|
| 262 |
+
|
| 263 |
+
result = df["col1"].transform(lambda x: x + 10)
|
| 264 |
+
tm.assert_series_equal(result, df["col1"])
|
env-llmeval/lib/python3.10/site-packages/pandas/tests/apply/test_invalid_arg.py
ADDED
|
@@ -0,0 +1,361 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Tests specifically aimed at detecting bad arguments.
|
| 2 |
+
# This file is organized by reason for exception.
|
| 3 |
+
# 1. always invalid argument values
|
| 4 |
+
# 2. missing column(s)
|
| 5 |
+
# 3. incompatible ops/dtype/args/kwargs
|
| 6 |
+
# 4. invalid result shape/type
|
| 7 |
+
# If your test does not fit into one of these categories, add to this list.
|
| 8 |
+
|
| 9 |
+
from itertools import chain
|
| 10 |
+
import re
|
| 11 |
+
|
| 12 |
+
import numpy as np
|
| 13 |
+
import pytest
|
| 14 |
+
|
| 15 |
+
from pandas.errors import SpecificationError
|
| 16 |
+
|
| 17 |
+
from pandas import (
|
| 18 |
+
DataFrame,
|
| 19 |
+
Series,
|
| 20 |
+
date_range,
|
| 21 |
+
)
|
| 22 |
+
import pandas._testing as tm
|
| 23 |
+
|
| 24 |
+
|
| 25 |
+
@pytest.mark.parametrize("result_type", ["foo", 1])
|
| 26 |
+
def test_result_type_error(result_type):
|
| 27 |
+
# allowed result_type
|
| 28 |
+
df = DataFrame(
|
| 29 |
+
np.tile(np.arange(3, dtype="int64"), 6).reshape(6, -1) + 1,
|
| 30 |
+
columns=["A", "B", "C"],
|
| 31 |
+
)
|
| 32 |
+
|
| 33 |
+
msg = (
|
| 34 |
+
"invalid value for result_type, must be one of "
|
| 35 |
+
"{None, 'reduce', 'broadcast', 'expand'}"
|
| 36 |
+
)
|
| 37 |
+
with pytest.raises(ValueError, match=msg):
|
| 38 |
+
df.apply(lambda x: [1, 2, 3], axis=1, result_type=result_type)
|
| 39 |
+
|
| 40 |
+
|
| 41 |
+
def test_apply_invalid_axis_value():
|
| 42 |
+
df = DataFrame([[1, 2, 3], [4, 5, 6], [7, 8, 9]], index=["a", "a", "c"])
|
| 43 |
+
msg = "No axis named 2 for object type DataFrame"
|
| 44 |
+
with pytest.raises(ValueError, match=msg):
|
| 45 |
+
df.apply(lambda x: x, 2)
|
| 46 |
+
|
| 47 |
+
|
| 48 |
+
def test_agg_raises():
|
| 49 |
+
# GH 26513
|
| 50 |
+
df = DataFrame({"A": [0, 1], "B": [1, 2]})
|
| 51 |
+
msg = "Must provide"
|
| 52 |
+
|
| 53 |
+
with pytest.raises(TypeError, match=msg):
|
| 54 |
+
df.agg()
|
| 55 |
+
|
| 56 |
+
|
| 57 |
+
def test_map_with_invalid_na_action_raises():
|
| 58 |
+
# https://github.com/pandas-dev/pandas/issues/32815
|
| 59 |
+
s = Series([1, 2, 3])
|
| 60 |
+
msg = "na_action must either be 'ignore' or None"
|
| 61 |
+
with pytest.raises(ValueError, match=msg):
|
| 62 |
+
s.map(lambda x: x, na_action="____")
|
| 63 |
+
|
| 64 |
+
|
| 65 |
+
@pytest.mark.parametrize("input_na_action", ["____", True])
|
| 66 |
+
def test_map_arg_is_dict_with_invalid_na_action_raises(input_na_action):
|
| 67 |
+
# https://github.com/pandas-dev/pandas/issues/46588
|
| 68 |
+
s = Series([1, 2, 3])
|
| 69 |
+
msg = f"na_action must either be 'ignore' or None, {input_na_action} was passed"
|
| 70 |
+
with pytest.raises(ValueError, match=msg):
|
| 71 |
+
s.map({1: 2}, na_action=input_na_action)
|
| 72 |
+
|
| 73 |
+
|
| 74 |
+
@pytest.mark.parametrize("method", ["apply", "agg", "transform"])
|
| 75 |
+
@pytest.mark.parametrize("func", [{"A": {"B": "sum"}}, {"A": {"B": ["sum"]}}])
|
| 76 |
+
def test_nested_renamer(frame_or_series, method, func):
|
| 77 |
+
# GH 35964
|
| 78 |
+
obj = frame_or_series({"A": [1]})
|
| 79 |
+
match = "nested renamer is not supported"
|
| 80 |
+
with pytest.raises(SpecificationError, match=match):
|
| 81 |
+
getattr(obj, method)(func)
|
| 82 |
+
|
| 83 |
+
|
| 84 |
+
@pytest.mark.parametrize(
|
| 85 |
+
"renamer",
|
| 86 |
+
[{"foo": ["min", "max"]}, {"foo": ["min", "max"], "bar": ["sum", "mean"]}],
|
| 87 |
+
)
|
| 88 |
+
def test_series_nested_renamer(renamer):
|
| 89 |
+
s = Series(range(6), dtype="int64", name="series")
|
| 90 |
+
msg = "nested renamer is not supported"
|
| 91 |
+
with pytest.raises(SpecificationError, match=msg):
|
| 92 |
+
s.agg(renamer)
|
| 93 |
+
|
| 94 |
+
|
| 95 |
+
def test_apply_dict_depr():
|
| 96 |
+
tsdf = DataFrame(
|
| 97 |
+
np.random.default_rng(2).standard_normal((10, 3)),
|
| 98 |
+
columns=["A", "B", "C"],
|
| 99 |
+
index=date_range("1/1/2000", periods=10),
|
| 100 |
+
)
|
| 101 |
+
msg = "nested renamer is not supported"
|
| 102 |
+
with pytest.raises(SpecificationError, match=msg):
|
| 103 |
+
tsdf.A.agg({"foo": ["sum", "mean"]})
|
| 104 |
+
|
| 105 |
+
|
| 106 |
+
@pytest.mark.parametrize("method", ["agg", "transform"])
|
| 107 |
+
def test_dict_nested_renaming_depr(method):
|
| 108 |
+
df = DataFrame({"A": range(5), "B": 5})
|
| 109 |
+
|
| 110 |
+
# nested renaming
|
| 111 |
+
msg = r"nested renamer is not supported"
|
| 112 |
+
with pytest.raises(SpecificationError, match=msg):
|
| 113 |
+
getattr(df, method)({"A": {"foo": "min"}, "B": {"bar": "max"}})
|
| 114 |
+
|
| 115 |
+
|
| 116 |
+
@pytest.mark.parametrize("method", ["apply", "agg", "transform"])
|
| 117 |
+
@pytest.mark.parametrize("func", [{"B": "sum"}, {"B": ["sum"]}])
|
| 118 |
+
def test_missing_column(method, func):
|
| 119 |
+
# GH 40004
|
| 120 |
+
obj = DataFrame({"A": [1]})
|
| 121 |
+
match = re.escape("Column(s) ['B'] do not exist")
|
| 122 |
+
with pytest.raises(KeyError, match=match):
|
| 123 |
+
getattr(obj, method)(func)
|
| 124 |
+
|
| 125 |
+
|
| 126 |
+
def test_transform_mixed_column_name_dtypes():
|
| 127 |
+
# GH39025
|
| 128 |
+
df = DataFrame({"a": ["1"]})
|
| 129 |
+
msg = r"Column\(s\) \[1, 'b'\] do not exist"
|
| 130 |
+
with pytest.raises(KeyError, match=msg):
|
| 131 |
+
df.transform({"a": int, 1: str, "b": int})
|
| 132 |
+
|
| 133 |
+
|
| 134 |
+
@pytest.mark.parametrize(
|
| 135 |
+
"how, args", [("pct_change", ()), ("nsmallest", (1, ["a", "b"])), ("tail", 1)]
|
| 136 |
+
)
|
| 137 |
+
def test_apply_str_axis_1_raises(how, args):
|
| 138 |
+
# GH 39211 - some ops don't support axis=1
|
| 139 |
+
df = DataFrame({"a": [1, 2], "b": [3, 4]})
|
| 140 |
+
msg = f"Operation {how} does not support axis=1"
|
| 141 |
+
with pytest.raises(ValueError, match=msg):
|
| 142 |
+
df.apply(how, axis=1, args=args)
|
| 143 |
+
|
| 144 |
+
|
| 145 |
+
def test_transform_axis_1_raises():
|
| 146 |
+
# GH 35964
|
| 147 |
+
msg = "No axis named 1 for object type Series"
|
| 148 |
+
with pytest.raises(ValueError, match=msg):
|
| 149 |
+
Series([1]).transform("sum", axis=1)
|
| 150 |
+
|
| 151 |
+
|
| 152 |
+
def test_apply_modify_traceback():
|
| 153 |
+
data = DataFrame(
|
| 154 |
+
{
|
| 155 |
+
"A": [
|
| 156 |
+
"foo",
|
| 157 |
+
"foo",
|
| 158 |
+
"foo",
|
| 159 |
+
"foo",
|
| 160 |
+
"bar",
|
| 161 |
+
"bar",
|
| 162 |
+
"bar",
|
| 163 |
+
"bar",
|
| 164 |
+
"foo",
|
| 165 |
+
"foo",
|
| 166 |
+
"foo",
|
| 167 |
+
],
|
| 168 |
+
"B": [
|
| 169 |
+
"one",
|
| 170 |
+
"one",
|
| 171 |
+
"one",
|
| 172 |
+
"two",
|
| 173 |
+
"one",
|
| 174 |
+
"one",
|
| 175 |
+
"one",
|
| 176 |
+
"two",
|
| 177 |
+
"two",
|
| 178 |
+
"two",
|
| 179 |
+
"one",
|
| 180 |
+
],
|
| 181 |
+
"C": [
|
| 182 |
+
"dull",
|
| 183 |
+
"dull",
|
| 184 |
+
"shiny",
|
| 185 |
+
"dull",
|
| 186 |
+
"dull",
|
| 187 |
+
"shiny",
|
| 188 |
+
"shiny",
|
| 189 |
+
"dull",
|
| 190 |
+
"shiny",
|
| 191 |
+
"shiny",
|
| 192 |
+
"shiny",
|
| 193 |
+
],
|
| 194 |
+
"D": np.random.default_rng(2).standard_normal(11),
|
| 195 |
+
"E": np.random.default_rng(2).standard_normal(11),
|
| 196 |
+
"F": np.random.default_rng(2).standard_normal(11),
|
| 197 |
+
}
|
| 198 |
+
)
|
| 199 |
+
|
| 200 |
+
data.loc[4, "C"] = np.nan
|
| 201 |
+
|
| 202 |
+
def transform(row):
|
| 203 |
+
if row["C"].startswith("shin") and row["A"] == "foo":
|
| 204 |
+
row["D"] = 7
|
| 205 |
+
return row
|
| 206 |
+
|
| 207 |
+
msg = "'float' object has no attribute 'startswith'"
|
| 208 |
+
with pytest.raises(AttributeError, match=msg):
|
| 209 |
+
data.apply(transform, axis=1)
|
| 210 |
+
|
| 211 |
+
|
| 212 |
+
@pytest.mark.parametrize(
|
| 213 |
+
"df, func, expected",
|
| 214 |
+
tm.get_cython_table_params(
|
| 215 |
+
DataFrame([["a", "b"], ["b", "a"]]), [["cumprod", TypeError]]
|
| 216 |
+
),
|
| 217 |
+
)
|
| 218 |
+
def test_agg_cython_table_raises_frame(df, func, expected, axis, using_infer_string):
|
| 219 |
+
# GH 21224
|
| 220 |
+
if using_infer_string:
|
| 221 |
+
import pyarrow as pa
|
| 222 |
+
|
| 223 |
+
expected = (expected, pa.lib.ArrowNotImplementedError)
|
| 224 |
+
|
| 225 |
+
msg = "can't multiply sequence by non-int of type 'str'|has no kernel"
|
| 226 |
+
warn = None if isinstance(func, str) else FutureWarning
|
| 227 |
+
with pytest.raises(expected, match=msg):
|
| 228 |
+
with tm.assert_produces_warning(warn, match="using DataFrame.cumprod"):
|
| 229 |
+
df.agg(func, axis=axis)
|
| 230 |
+
|
| 231 |
+
|
| 232 |
+
@pytest.mark.parametrize(
|
| 233 |
+
"series, func, expected",
|
| 234 |
+
chain(
|
| 235 |
+
tm.get_cython_table_params(
|
| 236 |
+
Series("a b c".split()),
|
| 237 |
+
[
|
| 238 |
+
("mean", TypeError), # mean raises TypeError
|
| 239 |
+
("prod", TypeError),
|
| 240 |
+
("std", TypeError),
|
| 241 |
+
("var", TypeError),
|
| 242 |
+
("median", TypeError),
|
| 243 |
+
("cumprod", TypeError),
|
| 244 |
+
],
|
| 245 |
+
)
|
| 246 |
+
),
|
| 247 |
+
)
|
| 248 |
+
def test_agg_cython_table_raises_series(series, func, expected, using_infer_string):
|
| 249 |
+
# GH21224
|
| 250 |
+
msg = r"[Cc]ould not convert|can't multiply sequence by non-int of type"
|
| 251 |
+
if func == "median" or func is np.nanmedian or func is np.median:
|
| 252 |
+
msg = r"Cannot convert \['a' 'b' 'c'\] to numeric"
|
| 253 |
+
|
| 254 |
+
if using_infer_string:
|
| 255 |
+
import pyarrow as pa
|
| 256 |
+
|
| 257 |
+
expected = (expected, pa.lib.ArrowNotImplementedError)
|
| 258 |
+
|
| 259 |
+
msg = msg + "|does not support|has no kernel"
|
| 260 |
+
warn = None if isinstance(func, str) else FutureWarning
|
| 261 |
+
|
| 262 |
+
with pytest.raises(expected, match=msg):
|
| 263 |
+
# e.g. Series('a b'.split()).cumprod() will raise
|
| 264 |
+
with tm.assert_produces_warning(warn, match="is currently using Series.*"):
|
| 265 |
+
series.agg(func)
|
| 266 |
+
|
| 267 |
+
|
| 268 |
+
def test_agg_none_to_type():
|
| 269 |
+
# GH 40543
|
| 270 |
+
df = DataFrame({"a": [None]})
|
| 271 |
+
msg = re.escape("int() argument must be a string")
|
| 272 |
+
with pytest.raises(TypeError, match=msg):
|
| 273 |
+
df.agg({"a": lambda x: int(x.iloc[0])})
|
| 274 |
+
|
| 275 |
+
|
| 276 |
+
def test_transform_none_to_type():
|
| 277 |
+
# GH#34377
|
| 278 |
+
df = DataFrame({"a": [None]})
|
| 279 |
+
msg = "argument must be a"
|
| 280 |
+
with pytest.raises(TypeError, match=msg):
|
| 281 |
+
df.transform({"a": lambda x: int(x.iloc[0])})
|
| 282 |
+
|
| 283 |
+
|
| 284 |
+
@pytest.mark.parametrize(
|
| 285 |
+
"func",
|
| 286 |
+
[
|
| 287 |
+
lambda x: np.array([1, 2]).reshape(-1, 2),
|
| 288 |
+
lambda x: [1, 2],
|
| 289 |
+
lambda x: Series([1, 2]),
|
| 290 |
+
],
|
| 291 |
+
)
|
| 292 |
+
def test_apply_broadcast_error(func):
|
| 293 |
+
df = DataFrame(
|
| 294 |
+
np.tile(np.arange(3, dtype="int64"), 6).reshape(6, -1) + 1,
|
| 295 |
+
columns=["A", "B", "C"],
|
| 296 |
+
)
|
| 297 |
+
|
| 298 |
+
# > 1 ndim
|
| 299 |
+
msg = "too many dims to broadcast|cannot broadcast result"
|
| 300 |
+
with pytest.raises(ValueError, match=msg):
|
| 301 |
+
df.apply(func, axis=1, result_type="broadcast")
|
| 302 |
+
|
| 303 |
+
|
| 304 |
+
def test_transform_and_agg_err_agg(axis, float_frame):
|
| 305 |
+
# cannot both transform and agg
|
| 306 |
+
msg = "cannot combine transform and aggregation operations"
|
| 307 |
+
with pytest.raises(ValueError, match=msg):
|
| 308 |
+
with np.errstate(all="ignore"):
|
| 309 |
+
float_frame.agg(["max", "sqrt"], axis=axis)
|
| 310 |
+
|
| 311 |
+
|
| 312 |
+
@pytest.mark.filterwarnings("ignore::FutureWarning") # GH53325
|
| 313 |
+
@pytest.mark.parametrize(
|
| 314 |
+
"func, msg",
|
| 315 |
+
[
|
| 316 |
+
(["sqrt", "max"], "cannot combine transform and aggregation"),
|
| 317 |
+
(
|
| 318 |
+
{"foo": np.sqrt, "bar": "sum"},
|
| 319 |
+
"cannot perform both aggregation and transformation",
|
| 320 |
+
),
|
| 321 |
+
],
|
| 322 |
+
)
|
| 323 |
+
def test_transform_and_agg_err_series(string_series, func, msg):
|
| 324 |
+
# we are trying to transform with an aggregator
|
| 325 |
+
with pytest.raises(ValueError, match=msg):
|
| 326 |
+
with np.errstate(all="ignore"):
|
| 327 |
+
string_series.agg(func)
|
| 328 |
+
|
| 329 |
+
|
| 330 |
+
@pytest.mark.parametrize("func", [["max", "min"], ["max", "sqrt"]])
|
| 331 |
+
def test_transform_wont_agg_frame(axis, float_frame, func):
|
| 332 |
+
# GH 35964
|
| 333 |
+
# cannot both transform and agg
|
| 334 |
+
msg = "Function did not transform"
|
| 335 |
+
with pytest.raises(ValueError, match=msg):
|
| 336 |
+
float_frame.transform(func, axis=axis)
|
| 337 |
+
|
| 338 |
+
|
| 339 |
+
@pytest.mark.parametrize("func", [["min", "max"], ["sqrt", "max"]])
|
| 340 |
+
def test_transform_wont_agg_series(string_series, func):
|
| 341 |
+
# GH 35964
|
| 342 |
+
# we are trying to transform with an aggregator
|
| 343 |
+
msg = "Function did not transform"
|
| 344 |
+
|
| 345 |
+
with pytest.raises(ValueError, match=msg):
|
| 346 |
+
string_series.transform(func)
|
| 347 |
+
|
| 348 |
+
|
| 349 |
+
@pytest.mark.parametrize(
|
| 350 |
+
"op_wrapper", [lambda x: x, lambda x: [x], lambda x: {"A": x}, lambda x: {"A": [x]}]
|
| 351 |
+
)
|
| 352 |
+
def test_transform_reducer_raises(all_reductions, frame_or_series, op_wrapper):
|
| 353 |
+
# GH 35964
|
| 354 |
+
op = op_wrapper(all_reductions)
|
| 355 |
+
|
| 356 |
+
obj = DataFrame({"A": [1, 2, 3]})
|
| 357 |
+
obj = tm.get_obj(obj, frame_or_series)
|
| 358 |
+
|
| 359 |
+
msg = "Function did not transform"
|
| 360 |
+
with pytest.raises(ValueError, match=msg):
|
| 361 |
+
obj.transform(op)
|
env-llmeval/lib/python3.10/site-packages/pandas/tests/apply/test_numba.py
ADDED
|
@@ -0,0 +1,118 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import numpy as np
|
| 2 |
+
import pytest
|
| 3 |
+
|
| 4 |
+
import pandas.util._test_decorators as td
|
| 5 |
+
|
| 6 |
+
from pandas import (
|
| 7 |
+
DataFrame,
|
| 8 |
+
Index,
|
| 9 |
+
)
|
| 10 |
+
import pandas._testing as tm
|
| 11 |
+
|
| 12 |
+
pytestmark = [td.skip_if_no("numba"), pytest.mark.single_cpu]
|
| 13 |
+
|
| 14 |
+
|
| 15 |
+
@pytest.fixture(params=[0, 1])
|
| 16 |
+
def apply_axis(request):
|
| 17 |
+
return request.param
|
| 18 |
+
|
| 19 |
+
|
| 20 |
+
def test_numba_vs_python_noop(float_frame, apply_axis):
|
| 21 |
+
func = lambda x: x
|
| 22 |
+
result = float_frame.apply(func, engine="numba", axis=apply_axis)
|
| 23 |
+
expected = float_frame.apply(func, engine="python", axis=apply_axis)
|
| 24 |
+
tm.assert_frame_equal(result, expected)
|
| 25 |
+
|
| 26 |
+
|
| 27 |
+
def test_numba_vs_python_string_index():
|
| 28 |
+
# GH#56189
|
| 29 |
+
pytest.importorskip("pyarrow")
|
| 30 |
+
df = DataFrame(
|
| 31 |
+
1,
|
| 32 |
+
index=Index(["a", "b"], dtype="string[pyarrow_numpy]"),
|
| 33 |
+
columns=Index(["x", "y"], dtype="string[pyarrow_numpy]"),
|
| 34 |
+
)
|
| 35 |
+
func = lambda x: x
|
| 36 |
+
result = df.apply(func, engine="numba", axis=0)
|
| 37 |
+
expected = df.apply(func, engine="python", axis=0)
|
| 38 |
+
tm.assert_frame_equal(
|
| 39 |
+
result, expected, check_column_type=False, check_index_type=False
|
| 40 |
+
)
|
| 41 |
+
|
| 42 |
+
|
| 43 |
+
def test_numba_vs_python_indexing():
|
| 44 |
+
frame = DataFrame(
|
| 45 |
+
{"a": [1, 2, 3], "b": [4, 5, 6], "c": [7.0, 8.0, 9.0]},
|
| 46 |
+
index=Index(["A", "B", "C"]),
|
| 47 |
+
)
|
| 48 |
+
row_func = lambda x: x["c"]
|
| 49 |
+
result = frame.apply(row_func, engine="numba", axis=1)
|
| 50 |
+
expected = frame.apply(row_func, engine="python", axis=1)
|
| 51 |
+
tm.assert_series_equal(result, expected)
|
| 52 |
+
|
| 53 |
+
col_func = lambda x: x["A"]
|
| 54 |
+
result = frame.apply(col_func, engine="numba", axis=0)
|
| 55 |
+
expected = frame.apply(col_func, engine="python", axis=0)
|
| 56 |
+
tm.assert_series_equal(result, expected)
|
| 57 |
+
|
| 58 |
+
|
| 59 |
+
@pytest.mark.parametrize(
|
| 60 |
+
"reduction",
|
| 61 |
+
[lambda x: x.mean(), lambda x: x.min(), lambda x: x.max(), lambda x: x.sum()],
|
| 62 |
+
)
|
| 63 |
+
def test_numba_vs_python_reductions(reduction, apply_axis):
|
| 64 |
+
df = DataFrame(np.ones((4, 4), dtype=np.float64))
|
| 65 |
+
result = df.apply(reduction, engine="numba", axis=apply_axis)
|
| 66 |
+
expected = df.apply(reduction, engine="python", axis=apply_axis)
|
| 67 |
+
tm.assert_series_equal(result, expected)
|
| 68 |
+
|
| 69 |
+
|
| 70 |
+
@pytest.mark.parametrize("colnames", [[1, 2, 3], [1.0, 2.0, 3.0]])
|
| 71 |
+
def test_numba_numeric_colnames(colnames):
|
| 72 |
+
# Check that numeric column names lower properly and can be indxed on
|
| 73 |
+
df = DataFrame(
|
| 74 |
+
np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]], dtype=np.int64), columns=colnames
|
| 75 |
+
)
|
| 76 |
+
first_col = colnames[0]
|
| 77 |
+
f = lambda x: x[first_col] # Get the first column
|
| 78 |
+
result = df.apply(f, engine="numba", axis=1)
|
| 79 |
+
expected = df.apply(f, engine="python", axis=1)
|
| 80 |
+
tm.assert_series_equal(result, expected)
|
| 81 |
+
|
| 82 |
+
|
| 83 |
+
def test_numba_parallel_unsupported(float_frame):
|
| 84 |
+
f = lambda x: x
|
| 85 |
+
with pytest.raises(
|
| 86 |
+
NotImplementedError,
|
| 87 |
+
match="Parallel apply is not supported when raw=False and engine='numba'",
|
| 88 |
+
):
|
| 89 |
+
float_frame.apply(f, engine="numba", engine_kwargs={"parallel": True})
|
| 90 |
+
|
| 91 |
+
|
| 92 |
+
def test_numba_nonunique_unsupported(apply_axis):
|
| 93 |
+
f = lambda x: x
|
| 94 |
+
df = DataFrame({"a": [1, 2]}, index=Index(["a", "a"]))
|
| 95 |
+
with pytest.raises(
|
| 96 |
+
NotImplementedError,
|
| 97 |
+
match="The index/columns must be unique when raw=False and engine='numba'",
|
| 98 |
+
):
|
| 99 |
+
df.apply(f, engine="numba", axis=apply_axis)
|
| 100 |
+
|
| 101 |
+
|
| 102 |
+
def test_numba_unsupported_dtypes(apply_axis):
|
| 103 |
+
f = lambda x: x
|
| 104 |
+
df = DataFrame({"a": [1, 2], "b": ["a", "b"], "c": [4, 5]})
|
| 105 |
+
df["c"] = df["c"].astype("double[pyarrow]")
|
| 106 |
+
|
| 107 |
+
with pytest.raises(
|
| 108 |
+
ValueError,
|
| 109 |
+
match="Column b must have a numeric dtype. Found 'object|string' instead",
|
| 110 |
+
):
|
| 111 |
+
df.apply(f, engine="numba", axis=apply_axis)
|
| 112 |
+
|
| 113 |
+
with pytest.raises(
|
| 114 |
+
ValueError,
|
| 115 |
+
match="Column c is backed by an extension array, "
|
| 116 |
+
"which is not supported by the numba engine.",
|
| 117 |
+
):
|
| 118 |
+
df["c"].to_frame().apply(f, engine="numba", axis=apply_axis)
|
env-llmeval/lib/python3.10/site-packages/pandas/tests/apply/test_series_apply.py
ADDED
|
@@ -0,0 +1,701 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import numpy as np
|
| 2 |
+
import pytest
|
| 3 |
+
|
| 4 |
+
import pandas as pd
|
| 5 |
+
from pandas import (
|
| 6 |
+
DataFrame,
|
| 7 |
+
Index,
|
| 8 |
+
MultiIndex,
|
| 9 |
+
Series,
|
| 10 |
+
concat,
|
| 11 |
+
date_range,
|
| 12 |
+
timedelta_range,
|
| 13 |
+
)
|
| 14 |
+
import pandas._testing as tm
|
| 15 |
+
from pandas.tests.apply.common import series_transform_kernels
|
| 16 |
+
|
| 17 |
+
|
| 18 |
+
@pytest.fixture(params=[False, "compat"])
|
| 19 |
+
def by_row(request):
|
| 20 |
+
return request.param
|
| 21 |
+
|
| 22 |
+
|
| 23 |
+
def test_series_map_box_timedelta(by_row):
|
| 24 |
+
# GH#11349
|
| 25 |
+
ser = Series(timedelta_range("1 day 1 s", periods=3, freq="h"))
|
| 26 |
+
|
| 27 |
+
def f(x):
|
| 28 |
+
return x.total_seconds() if by_row else x.dt.total_seconds()
|
| 29 |
+
|
| 30 |
+
result = ser.apply(f, by_row=by_row)
|
| 31 |
+
|
| 32 |
+
expected = ser.map(lambda x: x.total_seconds())
|
| 33 |
+
tm.assert_series_equal(result, expected)
|
| 34 |
+
|
| 35 |
+
expected = Series([86401.0, 90001.0, 93601.0])
|
| 36 |
+
tm.assert_series_equal(result, expected)
|
| 37 |
+
|
| 38 |
+
|
| 39 |
+
def test_apply(datetime_series, by_row):
|
| 40 |
+
result = datetime_series.apply(np.sqrt, by_row=by_row)
|
| 41 |
+
with np.errstate(all="ignore"):
|
| 42 |
+
expected = np.sqrt(datetime_series)
|
| 43 |
+
tm.assert_series_equal(result, expected)
|
| 44 |
+
|
| 45 |
+
# element-wise apply (ufunc)
|
| 46 |
+
result = datetime_series.apply(np.exp, by_row=by_row)
|
| 47 |
+
expected = np.exp(datetime_series)
|
| 48 |
+
tm.assert_series_equal(result, expected)
|
| 49 |
+
|
| 50 |
+
# empty series
|
| 51 |
+
s = Series(dtype=object, name="foo", index=Index([], name="bar"))
|
| 52 |
+
rs = s.apply(lambda x: x, by_row=by_row)
|
| 53 |
+
tm.assert_series_equal(s, rs)
|
| 54 |
+
|
| 55 |
+
# check all metadata (GH 9322)
|
| 56 |
+
assert s is not rs
|
| 57 |
+
assert s.index is rs.index
|
| 58 |
+
assert s.dtype == rs.dtype
|
| 59 |
+
assert s.name == rs.name
|
| 60 |
+
|
| 61 |
+
# index but no data
|
| 62 |
+
s = Series(index=[1, 2, 3], dtype=np.float64)
|
| 63 |
+
rs = s.apply(lambda x: x, by_row=by_row)
|
| 64 |
+
tm.assert_series_equal(s, rs)
|
| 65 |
+
|
| 66 |
+
|
| 67 |
+
def test_apply_map_same_length_inference_bug():
|
| 68 |
+
s = Series([1, 2])
|
| 69 |
+
|
| 70 |
+
def f(x):
|
| 71 |
+
return (x, x + 1)
|
| 72 |
+
|
| 73 |
+
result = s.apply(f, by_row="compat")
|
| 74 |
+
expected = s.map(f)
|
| 75 |
+
tm.assert_series_equal(result, expected)
|
| 76 |
+
|
| 77 |
+
|
| 78 |
+
@pytest.mark.parametrize("convert_dtype", [True, False])
|
| 79 |
+
def test_apply_convert_dtype_deprecated(convert_dtype):
|
| 80 |
+
ser = Series(np.random.default_rng(2).standard_normal(10))
|
| 81 |
+
|
| 82 |
+
def func(x):
|
| 83 |
+
return x if x > 0 else np.nan
|
| 84 |
+
|
| 85 |
+
with tm.assert_produces_warning(FutureWarning):
|
| 86 |
+
ser.apply(func, convert_dtype=convert_dtype, by_row="compat")
|
| 87 |
+
|
| 88 |
+
|
| 89 |
+
def test_apply_args():
|
| 90 |
+
s = Series(["foo,bar"])
|
| 91 |
+
|
| 92 |
+
result = s.apply(str.split, args=(",",))
|
| 93 |
+
assert result[0] == ["foo", "bar"]
|
| 94 |
+
assert isinstance(result[0], list)
|
| 95 |
+
|
| 96 |
+
|
| 97 |
+
@pytest.mark.parametrize(
|
| 98 |
+
"args, kwargs, increment",
|
| 99 |
+
[((), {}, 0), ((), {"a": 1}, 1), ((2, 3), {}, 32), ((1,), {"c": 2}, 201)],
|
| 100 |
+
)
|
| 101 |
+
def test_agg_args(args, kwargs, increment):
|
| 102 |
+
# GH 43357
|
| 103 |
+
def f(x, a=0, b=0, c=0):
|
| 104 |
+
return x + a + 10 * b + 100 * c
|
| 105 |
+
|
| 106 |
+
s = Series([1, 2])
|
| 107 |
+
msg = (
|
| 108 |
+
"in Series.agg cannot aggregate and has been deprecated. "
|
| 109 |
+
"Use Series.transform to keep behavior unchanged."
|
| 110 |
+
)
|
| 111 |
+
with tm.assert_produces_warning(FutureWarning, match=msg):
|
| 112 |
+
result = s.agg(f, 0, *args, **kwargs)
|
| 113 |
+
expected = s + increment
|
| 114 |
+
tm.assert_series_equal(result, expected)
|
| 115 |
+
|
| 116 |
+
|
| 117 |
+
def test_agg_mapping_func_deprecated():
|
| 118 |
+
# GH 53325
|
| 119 |
+
s = Series([1, 2, 3])
|
| 120 |
+
|
| 121 |
+
def foo1(x, a=1, c=0):
|
| 122 |
+
return x + a + c
|
| 123 |
+
|
| 124 |
+
def foo2(x, b=2, c=0):
|
| 125 |
+
return x + b + c
|
| 126 |
+
|
| 127 |
+
msg = "using .+ in Series.agg cannot aggregate and"
|
| 128 |
+
with tm.assert_produces_warning(FutureWarning, match=msg):
|
| 129 |
+
s.agg(foo1, 0, 3, c=4)
|
| 130 |
+
with tm.assert_produces_warning(FutureWarning, match=msg):
|
| 131 |
+
s.agg([foo1, foo2], 0, 3, c=4)
|
| 132 |
+
with tm.assert_produces_warning(FutureWarning, match=msg):
|
| 133 |
+
s.agg({"a": foo1, "b": foo2}, 0, 3, c=4)
|
| 134 |
+
|
| 135 |
+
|
| 136 |
+
def test_series_apply_map_box_timestamps(by_row):
|
| 137 |
+
# GH#2689, GH#2627
|
| 138 |
+
ser = Series(date_range("1/1/2000", periods=10))
|
| 139 |
+
|
| 140 |
+
def func(x):
|
| 141 |
+
return (x.hour, x.day, x.month)
|
| 142 |
+
|
| 143 |
+
if not by_row:
|
| 144 |
+
msg = "Series' object has no attribute 'hour'"
|
| 145 |
+
with pytest.raises(AttributeError, match=msg):
|
| 146 |
+
ser.apply(func, by_row=by_row)
|
| 147 |
+
return
|
| 148 |
+
|
| 149 |
+
result = ser.apply(func, by_row=by_row)
|
| 150 |
+
expected = ser.map(func)
|
| 151 |
+
tm.assert_series_equal(result, expected)
|
| 152 |
+
|
| 153 |
+
|
| 154 |
+
def test_apply_box_dt64():
|
| 155 |
+
# ufunc will not be boxed. Same test cases as the test_map_box
|
| 156 |
+
vals = [pd.Timestamp("2011-01-01"), pd.Timestamp("2011-01-02")]
|
| 157 |
+
ser = Series(vals, dtype="M8[ns]")
|
| 158 |
+
assert ser.dtype == "datetime64[ns]"
|
| 159 |
+
# boxed value must be Timestamp instance
|
| 160 |
+
res = ser.apply(lambda x: f"{type(x).__name__}_{x.day}_{x.tz}", by_row="compat")
|
| 161 |
+
exp = Series(["Timestamp_1_None", "Timestamp_2_None"])
|
| 162 |
+
tm.assert_series_equal(res, exp)
|
| 163 |
+
|
| 164 |
+
|
| 165 |
+
def test_apply_box_dt64tz():
|
| 166 |
+
vals = [
|
| 167 |
+
pd.Timestamp("2011-01-01", tz="US/Eastern"),
|
| 168 |
+
pd.Timestamp("2011-01-02", tz="US/Eastern"),
|
| 169 |
+
]
|
| 170 |
+
ser = Series(vals, dtype="M8[ns, US/Eastern]")
|
| 171 |
+
assert ser.dtype == "datetime64[ns, US/Eastern]"
|
| 172 |
+
res = ser.apply(lambda x: f"{type(x).__name__}_{x.day}_{x.tz}", by_row="compat")
|
| 173 |
+
exp = Series(["Timestamp_1_US/Eastern", "Timestamp_2_US/Eastern"])
|
| 174 |
+
tm.assert_series_equal(res, exp)
|
| 175 |
+
|
| 176 |
+
|
| 177 |
+
def test_apply_box_td64():
|
| 178 |
+
# timedelta
|
| 179 |
+
vals = [pd.Timedelta("1 days"), pd.Timedelta("2 days")]
|
| 180 |
+
ser = Series(vals)
|
| 181 |
+
assert ser.dtype == "timedelta64[ns]"
|
| 182 |
+
res = ser.apply(lambda x: f"{type(x).__name__}_{x.days}", by_row="compat")
|
| 183 |
+
exp = Series(["Timedelta_1", "Timedelta_2"])
|
| 184 |
+
tm.assert_series_equal(res, exp)
|
| 185 |
+
|
| 186 |
+
|
| 187 |
+
def test_apply_box_period():
|
| 188 |
+
# period
|
| 189 |
+
vals = [pd.Period("2011-01-01", freq="M"), pd.Period("2011-01-02", freq="M")]
|
| 190 |
+
ser = Series(vals)
|
| 191 |
+
assert ser.dtype == "Period[M]"
|
| 192 |
+
res = ser.apply(lambda x: f"{type(x).__name__}_{x.freqstr}", by_row="compat")
|
| 193 |
+
exp = Series(["Period_M", "Period_M"])
|
| 194 |
+
tm.assert_series_equal(res, exp)
|
| 195 |
+
|
| 196 |
+
|
| 197 |
+
def test_apply_datetimetz(by_row):
|
| 198 |
+
values = date_range("2011-01-01", "2011-01-02", freq="h").tz_localize("Asia/Tokyo")
|
| 199 |
+
s = Series(values, name="XX")
|
| 200 |
+
|
| 201 |
+
result = s.apply(lambda x: x + pd.offsets.Day(), by_row=by_row)
|
| 202 |
+
exp_values = date_range("2011-01-02", "2011-01-03", freq="h").tz_localize(
|
| 203 |
+
"Asia/Tokyo"
|
| 204 |
+
)
|
| 205 |
+
exp = Series(exp_values, name="XX")
|
| 206 |
+
tm.assert_series_equal(result, exp)
|
| 207 |
+
|
| 208 |
+
result = s.apply(lambda x: x.hour if by_row else x.dt.hour, by_row=by_row)
|
| 209 |
+
exp = Series(list(range(24)) + [0], name="XX", dtype="int64" if by_row else "int32")
|
| 210 |
+
tm.assert_series_equal(result, exp)
|
| 211 |
+
|
| 212 |
+
# not vectorized
|
| 213 |
+
def f(x):
|
| 214 |
+
return str(x.tz) if by_row else str(x.dt.tz)
|
| 215 |
+
|
| 216 |
+
result = s.apply(f, by_row=by_row)
|
| 217 |
+
if by_row:
|
| 218 |
+
exp = Series(["Asia/Tokyo"] * 25, name="XX")
|
| 219 |
+
tm.assert_series_equal(result, exp)
|
| 220 |
+
else:
|
| 221 |
+
assert result == "Asia/Tokyo"
|
| 222 |
+
|
| 223 |
+
|
| 224 |
+
def test_apply_categorical(by_row, using_infer_string):
|
| 225 |
+
values = pd.Categorical(list("ABBABCD"), categories=list("DCBA"), ordered=True)
|
| 226 |
+
ser = Series(values, name="XX", index=list("abcdefg"))
|
| 227 |
+
|
| 228 |
+
if not by_row:
|
| 229 |
+
msg = "Series' object has no attribute 'lower"
|
| 230 |
+
with pytest.raises(AttributeError, match=msg):
|
| 231 |
+
ser.apply(lambda x: x.lower(), by_row=by_row)
|
| 232 |
+
assert ser.apply(lambda x: "A", by_row=by_row) == "A"
|
| 233 |
+
return
|
| 234 |
+
|
| 235 |
+
result = ser.apply(lambda x: x.lower(), by_row=by_row)
|
| 236 |
+
|
| 237 |
+
# should be categorical dtype when the number of categories are
|
| 238 |
+
# the same
|
| 239 |
+
values = pd.Categorical(list("abbabcd"), categories=list("dcba"), ordered=True)
|
| 240 |
+
exp = Series(values, name="XX", index=list("abcdefg"))
|
| 241 |
+
tm.assert_series_equal(result, exp)
|
| 242 |
+
tm.assert_categorical_equal(result.values, exp.values)
|
| 243 |
+
|
| 244 |
+
result = ser.apply(lambda x: "A")
|
| 245 |
+
exp = Series(["A"] * 7, name="XX", index=list("abcdefg"))
|
| 246 |
+
tm.assert_series_equal(result, exp)
|
| 247 |
+
assert result.dtype == object if not using_infer_string else "string[pyarrow_numpy]"
|
| 248 |
+
|
| 249 |
+
|
| 250 |
+
@pytest.mark.parametrize("series", [["1-1", "1-1", np.nan], ["1-1", "1-2", np.nan]])
|
| 251 |
+
def test_apply_categorical_with_nan_values(series, by_row):
|
| 252 |
+
# GH 20714 bug fixed in: GH 24275
|
| 253 |
+
s = Series(series, dtype="category")
|
| 254 |
+
if not by_row:
|
| 255 |
+
msg = "'Series' object has no attribute 'split'"
|
| 256 |
+
with pytest.raises(AttributeError, match=msg):
|
| 257 |
+
s.apply(lambda x: x.split("-")[0], by_row=by_row)
|
| 258 |
+
return
|
| 259 |
+
|
| 260 |
+
result = s.apply(lambda x: x.split("-")[0], by_row=by_row)
|
| 261 |
+
result = result.astype(object)
|
| 262 |
+
expected = Series(["1", "1", np.nan], dtype="category")
|
| 263 |
+
expected = expected.astype(object)
|
| 264 |
+
tm.assert_series_equal(result, expected)
|
| 265 |
+
|
| 266 |
+
|
| 267 |
+
def test_apply_empty_integer_series_with_datetime_index(by_row):
|
| 268 |
+
# GH 21245
|
| 269 |
+
s = Series([], index=date_range(start="2018-01-01", periods=0), dtype=int)
|
| 270 |
+
result = s.apply(lambda x: x, by_row=by_row)
|
| 271 |
+
tm.assert_series_equal(result, s)
|
| 272 |
+
|
| 273 |
+
|
| 274 |
+
def test_apply_dataframe_iloc():
|
| 275 |
+
uintDF = DataFrame(np.uint64([1, 2, 3, 4, 5]), columns=["Numbers"])
|
| 276 |
+
indexDF = DataFrame([2, 3, 2, 1, 2], columns=["Indices"])
|
| 277 |
+
|
| 278 |
+
def retrieve(targetRow, targetDF):
|
| 279 |
+
val = targetDF["Numbers"].iloc[targetRow]
|
| 280 |
+
return val
|
| 281 |
+
|
| 282 |
+
result = indexDF["Indices"].apply(retrieve, args=(uintDF,))
|
| 283 |
+
expected = Series([3, 4, 3, 2, 3], name="Indices", dtype="uint64")
|
| 284 |
+
tm.assert_series_equal(result, expected)
|
| 285 |
+
|
| 286 |
+
|
| 287 |
+
def test_transform(string_series, by_row):
|
| 288 |
+
# transforming functions
|
| 289 |
+
|
| 290 |
+
with np.errstate(all="ignore"):
|
| 291 |
+
f_sqrt = np.sqrt(string_series)
|
| 292 |
+
f_abs = np.abs(string_series)
|
| 293 |
+
|
| 294 |
+
# ufunc
|
| 295 |
+
result = string_series.apply(np.sqrt, by_row=by_row)
|
| 296 |
+
expected = f_sqrt.copy()
|
| 297 |
+
tm.assert_series_equal(result, expected)
|
| 298 |
+
|
| 299 |
+
# list-like
|
| 300 |
+
result = string_series.apply([np.sqrt], by_row=by_row)
|
| 301 |
+
expected = f_sqrt.to_frame().copy()
|
| 302 |
+
expected.columns = ["sqrt"]
|
| 303 |
+
tm.assert_frame_equal(result, expected)
|
| 304 |
+
|
| 305 |
+
result = string_series.apply(["sqrt"], by_row=by_row)
|
| 306 |
+
tm.assert_frame_equal(result, expected)
|
| 307 |
+
|
| 308 |
+
# multiple items in list
|
| 309 |
+
# these are in the order as if we are applying both functions per
|
| 310 |
+
# series and then concatting
|
| 311 |
+
expected = concat([f_sqrt, f_abs], axis=1)
|
| 312 |
+
expected.columns = ["sqrt", "absolute"]
|
| 313 |
+
result = string_series.apply([np.sqrt, np.abs], by_row=by_row)
|
| 314 |
+
tm.assert_frame_equal(result, expected)
|
| 315 |
+
|
| 316 |
+
# dict, provide renaming
|
| 317 |
+
expected = concat([f_sqrt, f_abs], axis=1)
|
| 318 |
+
expected.columns = ["foo", "bar"]
|
| 319 |
+
expected = expected.unstack().rename("series")
|
| 320 |
+
|
| 321 |
+
result = string_series.apply({"foo": np.sqrt, "bar": np.abs}, by_row=by_row)
|
| 322 |
+
tm.assert_series_equal(result.reindex_like(expected), expected)
|
| 323 |
+
|
| 324 |
+
|
| 325 |
+
@pytest.mark.parametrize("op", series_transform_kernels)
|
| 326 |
+
def test_transform_partial_failure(op, request):
|
| 327 |
+
# GH 35964
|
| 328 |
+
if op in ("ffill", "bfill", "pad", "backfill", "shift"):
|
| 329 |
+
request.applymarker(
|
| 330 |
+
pytest.mark.xfail(reason=f"{op} is successful on any dtype")
|
| 331 |
+
)
|
| 332 |
+
|
| 333 |
+
# Using object makes most transform kernels fail
|
| 334 |
+
ser = Series(3 * [object])
|
| 335 |
+
|
| 336 |
+
if op in ("fillna", "ngroup"):
|
| 337 |
+
error = ValueError
|
| 338 |
+
msg = "Transform function failed"
|
| 339 |
+
else:
|
| 340 |
+
error = TypeError
|
| 341 |
+
msg = "|".join(
|
| 342 |
+
[
|
| 343 |
+
"not supported between instances of 'type' and 'type'",
|
| 344 |
+
"unsupported operand type",
|
| 345 |
+
]
|
| 346 |
+
)
|
| 347 |
+
|
| 348 |
+
with pytest.raises(error, match=msg):
|
| 349 |
+
ser.transform([op, "shift"])
|
| 350 |
+
|
| 351 |
+
with pytest.raises(error, match=msg):
|
| 352 |
+
ser.transform({"A": op, "B": "shift"})
|
| 353 |
+
|
| 354 |
+
with pytest.raises(error, match=msg):
|
| 355 |
+
ser.transform({"A": [op], "B": ["shift"]})
|
| 356 |
+
|
| 357 |
+
with pytest.raises(error, match=msg):
|
| 358 |
+
ser.transform({"A": [op, "shift"], "B": [op]})
|
| 359 |
+
|
| 360 |
+
|
| 361 |
+
def test_transform_partial_failure_valueerror():
|
| 362 |
+
# GH 40211
|
| 363 |
+
def noop(x):
|
| 364 |
+
return x
|
| 365 |
+
|
| 366 |
+
def raising_op(_):
|
| 367 |
+
raise ValueError
|
| 368 |
+
|
| 369 |
+
ser = Series(3 * [object])
|
| 370 |
+
msg = "Transform function failed"
|
| 371 |
+
|
| 372 |
+
with pytest.raises(ValueError, match=msg):
|
| 373 |
+
ser.transform([noop, raising_op])
|
| 374 |
+
|
| 375 |
+
with pytest.raises(ValueError, match=msg):
|
| 376 |
+
ser.transform({"A": raising_op, "B": noop})
|
| 377 |
+
|
| 378 |
+
with pytest.raises(ValueError, match=msg):
|
| 379 |
+
ser.transform({"A": [raising_op], "B": [noop]})
|
| 380 |
+
|
| 381 |
+
with pytest.raises(ValueError, match=msg):
|
| 382 |
+
ser.transform({"A": [noop, raising_op], "B": [noop]})
|
| 383 |
+
|
| 384 |
+
|
| 385 |
+
def test_demo():
|
| 386 |
+
# demonstration tests
|
| 387 |
+
s = Series(range(6), dtype="int64", name="series")
|
| 388 |
+
|
| 389 |
+
result = s.agg(["min", "max"])
|
| 390 |
+
expected = Series([0, 5], index=["min", "max"], name="series")
|
| 391 |
+
tm.assert_series_equal(result, expected)
|
| 392 |
+
|
| 393 |
+
result = s.agg({"foo": "min"})
|
| 394 |
+
expected = Series([0], index=["foo"], name="series")
|
| 395 |
+
tm.assert_series_equal(result, expected)
|
| 396 |
+
|
| 397 |
+
|
| 398 |
+
@pytest.mark.parametrize("func", [str, lambda x: str(x)])
|
| 399 |
+
def test_apply_map_evaluate_lambdas_the_same(string_series, func, by_row):
|
| 400 |
+
# test that we are evaluating row-by-row first if by_row="compat"
|
| 401 |
+
# else vectorized evaluation
|
| 402 |
+
result = string_series.apply(func, by_row=by_row)
|
| 403 |
+
|
| 404 |
+
if by_row:
|
| 405 |
+
expected = string_series.map(func)
|
| 406 |
+
tm.assert_series_equal(result, expected)
|
| 407 |
+
else:
|
| 408 |
+
assert result == str(string_series)
|
| 409 |
+
|
| 410 |
+
|
| 411 |
+
def test_agg_evaluate_lambdas(string_series):
|
| 412 |
+
# GH53325
|
| 413 |
+
# in the future, the result will be a Series class.
|
| 414 |
+
|
| 415 |
+
with tm.assert_produces_warning(FutureWarning):
|
| 416 |
+
result = string_series.agg(lambda x: type(x))
|
| 417 |
+
assert isinstance(result, Series) and len(result) == len(string_series)
|
| 418 |
+
|
| 419 |
+
with tm.assert_produces_warning(FutureWarning):
|
| 420 |
+
result = string_series.agg(type)
|
| 421 |
+
assert isinstance(result, Series) and len(result) == len(string_series)
|
| 422 |
+
|
| 423 |
+
|
| 424 |
+
@pytest.mark.parametrize("op_name", ["agg", "apply"])
|
| 425 |
+
def test_with_nested_series(datetime_series, op_name):
|
| 426 |
+
# GH 2316
|
| 427 |
+
# .agg with a reducer and a transform, what to do
|
| 428 |
+
msg = "cannot aggregate"
|
| 429 |
+
warning = FutureWarning if op_name == "agg" else None
|
| 430 |
+
with tm.assert_produces_warning(warning, match=msg):
|
| 431 |
+
# GH52123
|
| 432 |
+
result = getattr(datetime_series, op_name)(
|
| 433 |
+
lambda x: Series([x, x**2], index=["x", "x^2"])
|
| 434 |
+
)
|
| 435 |
+
expected = DataFrame({"x": datetime_series, "x^2": datetime_series**2})
|
| 436 |
+
tm.assert_frame_equal(result, expected)
|
| 437 |
+
|
| 438 |
+
with tm.assert_produces_warning(FutureWarning, match=msg):
|
| 439 |
+
result = datetime_series.agg(lambda x: Series([x, x**2], index=["x", "x^2"]))
|
| 440 |
+
tm.assert_frame_equal(result, expected)
|
| 441 |
+
|
| 442 |
+
|
| 443 |
+
def test_replicate_describe(string_series):
|
| 444 |
+
# this also tests a result set that is all scalars
|
| 445 |
+
expected = string_series.describe()
|
| 446 |
+
result = string_series.apply(
|
| 447 |
+
{
|
| 448 |
+
"count": "count",
|
| 449 |
+
"mean": "mean",
|
| 450 |
+
"std": "std",
|
| 451 |
+
"min": "min",
|
| 452 |
+
"25%": lambda x: x.quantile(0.25),
|
| 453 |
+
"50%": "median",
|
| 454 |
+
"75%": lambda x: x.quantile(0.75),
|
| 455 |
+
"max": "max",
|
| 456 |
+
},
|
| 457 |
+
)
|
| 458 |
+
tm.assert_series_equal(result, expected)
|
| 459 |
+
|
| 460 |
+
|
| 461 |
+
def test_reduce(string_series):
|
| 462 |
+
# reductions with named functions
|
| 463 |
+
result = string_series.agg(["sum", "mean"])
|
| 464 |
+
expected = Series(
|
| 465 |
+
[string_series.sum(), string_series.mean()],
|
| 466 |
+
["sum", "mean"],
|
| 467 |
+
name=string_series.name,
|
| 468 |
+
)
|
| 469 |
+
tm.assert_series_equal(result, expected)
|
| 470 |
+
|
| 471 |
+
|
| 472 |
+
@pytest.mark.parametrize(
|
| 473 |
+
"how, kwds",
|
| 474 |
+
[("agg", {}), ("apply", {"by_row": "compat"}), ("apply", {"by_row": False})],
|
| 475 |
+
)
|
| 476 |
+
def test_non_callable_aggregates(how, kwds):
|
| 477 |
+
# test agg using non-callable series attributes
|
| 478 |
+
# GH 39116 - expand to apply
|
| 479 |
+
s = Series([1, 2, None])
|
| 480 |
+
|
| 481 |
+
# Calling agg w/ just a string arg same as calling s.arg
|
| 482 |
+
result = getattr(s, how)("size", **kwds)
|
| 483 |
+
expected = s.size
|
| 484 |
+
assert result == expected
|
| 485 |
+
|
| 486 |
+
# test when mixed w/ callable reducers
|
| 487 |
+
result = getattr(s, how)(["size", "count", "mean"], **kwds)
|
| 488 |
+
expected = Series({"size": 3.0, "count": 2.0, "mean": 1.5})
|
| 489 |
+
tm.assert_series_equal(result, expected)
|
| 490 |
+
|
| 491 |
+
result = getattr(s, how)({"size": "size", "count": "count", "mean": "mean"}, **kwds)
|
| 492 |
+
tm.assert_series_equal(result, expected)
|
| 493 |
+
|
| 494 |
+
|
| 495 |
+
def test_series_apply_no_suffix_index(by_row):
|
| 496 |
+
# GH36189
|
| 497 |
+
s = Series([4] * 3)
|
| 498 |
+
result = s.apply(["sum", lambda x: x.sum(), lambda x: x.sum()], by_row=by_row)
|
| 499 |
+
expected = Series([12, 12, 12], index=["sum", "<lambda>", "<lambda>"])
|
| 500 |
+
|
| 501 |
+
tm.assert_series_equal(result, expected)
|
| 502 |
+
|
| 503 |
+
|
| 504 |
+
@pytest.mark.parametrize(
|
| 505 |
+
"dti,exp",
|
| 506 |
+
[
|
| 507 |
+
(
|
| 508 |
+
Series([1, 2], index=pd.DatetimeIndex([0, 31536000000])),
|
| 509 |
+
DataFrame(np.repeat([[1, 2]], 2, axis=0), dtype="int64"),
|
| 510 |
+
),
|
| 511 |
+
(
|
| 512 |
+
Series(
|
| 513 |
+
np.arange(10, dtype=np.float64),
|
| 514 |
+
index=date_range("2020-01-01", periods=10),
|
| 515 |
+
name="ts",
|
| 516 |
+
),
|
| 517 |
+
DataFrame(np.repeat([[1, 2]], 10, axis=0), dtype="int64"),
|
| 518 |
+
),
|
| 519 |
+
],
|
| 520 |
+
)
|
| 521 |
+
@pytest.mark.parametrize("aware", [True, False])
|
| 522 |
+
def test_apply_series_on_date_time_index_aware_series(dti, exp, aware):
|
| 523 |
+
# GH 25959
|
| 524 |
+
# Calling apply on a localized time series should not cause an error
|
| 525 |
+
if aware:
|
| 526 |
+
index = dti.tz_localize("UTC").index
|
| 527 |
+
else:
|
| 528 |
+
index = dti.index
|
| 529 |
+
result = Series(index).apply(lambda x: Series([1, 2]))
|
| 530 |
+
tm.assert_frame_equal(result, exp)
|
| 531 |
+
|
| 532 |
+
|
| 533 |
+
@pytest.mark.parametrize(
|
| 534 |
+
"by_row, expected", [("compat", Series(np.ones(10), dtype="int64")), (False, 1)]
|
| 535 |
+
)
|
| 536 |
+
def test_apply_scalar_on_date_time_index_aware_series(by_row, expected):
|
| 537 |
+
# GH 25959
|
| 538 |
+
# Calling apply on a localized time series should not cause an error
|
| 539 |
+
series = Series(
|
| 540 |
+
np.arange(10, dtype=np.float64),
|
| 541 |
+
index=date_range("2020-01-01", periods=10, tz="UTC"),
|
| 542 |
+
)
|
| 543 |
+
result = Series(series.index).apply(lambda x: 1, by_row=by_row)
|
| 544 |
+
tm.assert_equal(result, expected)
|
| 545 |
+
|
| 546 |
+
|
| 547 |
+
def test_apply_to_timedelta(by_row):
|
| 548 |
+
list_of_valid_strings = ["00:00:01", "00:00:02"]
|
| 549 |
+
a = pd.to_timedelta(list_of_valid_strings)
|
| 550 |
+
b = Series(list_of_valid_strings).apply(pd.to_timedelta, by_row=by_row)
|
| 551 |
+
tm.assert_series_equal(Series(a), b)
|
| 552 |
+
|
| 553 |
+
list_of_strings = ["00:00:01", np.nan, pd.NaT, pd.NaT]
|
| 554 |
+
|
| 555 |
+
a = pd.to_timedelta(list_of_strings)
|
| 556 |
+
ser = Series(list_of_strings)
|
| 557 |
+
b = ser.apply(pd.to_timedelta, by_row=by_row)
|
| 558 |
+
tm.assert_series_equal(Series(a), b)
|
| 559 |
+
|
| 560 |
+
|
| 561 |
+
@pytest.mark.parametrize(
|
| 562 |
+
"ops, names",
|
| 563 |
+
[
|
| 564 |
+
([np.sum], ["sum"]),
|
| 565 |
+
([np.sum, np.mean], ["sum", "mean"]),
|
| 566 |
+
(np.array([np.sum]), ["sum"]),
|
| 567 |
+
(np.array([np.sum, np.mean]), ["sum", "mean"]),
|
| 568 |
+
],
|
| 569 |
+
)
|
| 570 |
+
@pytest.mark.parametrize(
|
| 571 |
+
"how, kwargs",
|
| 572 |
+
[["agg", {}], ["apply", {"by_row": "compat"}], ["apply", {"by_row": False}]],
|
| 573 |
+
)
|
| 574 |
+
def test_apply_listlike_reducer(string_series, ops, names, how, kwargs):
|
| 575 |
+
# GH 39140
|
| 576 |
+
expected = Series({name: op(string_series) for name, op in zip(names, ops)})
|
| 577 |
+
expected.name = "series"
|
| 578 |
+
warn = FutureWarning if how == "agg" else None
|
| 579 |
+
msg = f"using Series.[{'|'.join(names)}]"
|
| 580 |
+
with tm.assert_produces_warning(warn, match=msg):
|
| 581 |
+
result = getattr(string_series, how)(ops, **kwargs)
|
| 582 |
+
tm.assert_series_equal(result, expected)
|
| 583 |
+
|
| 584 |
+
|
| 585 |
+
@pytest.mark.parametrize(
|
| 586 |
+
"ops",
|
| 587 |
+
[
|
| 588 |
+
{"A": np.sum},
|
| 589 |
+
{"A": np.sum, "B": np.mean},
|
| 590 |
+
Series({"A": np.sum}),
|
| 591 |
+
Series({"A": np.sum, "B": np.mean}),
|
| 592 |
+
],
|
| 593 |
+
)
|
| 594 |
+
@pytest.mark.parametrize(
|
| 595 |
+
"how, kwargs",
|
| 596 |
+
[["agg", {}], ["apply", {"by_row": "compat"}], ["apply", {"by_row": False}]],
|
| 597 |
+
)
|
| 598 |
+
def test_apply_dictlike_reducer(string_series, ops, how, kwargs, by_row):
|
| 599 |
+
# GH 39140
|
| 600 |
+
expected = Series({name: op(string_series) for name, op in ops.items()})
|
| 601 |
+
expected.name = string_series.name
|
| 602 |
+
warn = FutureWarning if how == "agg" else None
|
| 603 |
+
msg = "using Series.[sum|mean]"
|
| 604 |
+
with tm.assert_produces_warning(warn, match=msg):
|
| 605 |
+
result = getattr(string_series, how)(ops, **kwargs)
|
| 606 |
+
tm.assert_series_equal(result, expected)
|
| 607 |
+
|
| 608 |
+
|
| 609 |
+
@pytest.mark.parametrize(
|
| 610 |
+
"ops, names",
|
| 611 |
+
[
|
| 612 |
+
([np.sqrt], ["sqrt"]),
|
| 613 |
+
([np.abs, np.sqrt], ["absolute", "sqrt"]),
|
| 614 |
+
(np.array([np.sqrt]), ["sqrt"]),
|
| 615 |
+
(np.array([np.abs, np.sqrt]), ["absolute", "sqrt"]),
|
| 616 |
+
],
|
| 617 |
+
)
|
| 618 |
+
def test_apply_listlike_transformer(string_series, ops, names, by_row):
|
| 619 |
+
# GH 39140
|
| 620 |
+
with np.errstate(all="ignore"):
|
| 621 |
+
expected = concat([op(string_series) for op in ops], axis=1)
|
| 622 |
+
expected.columns = names
|
| 623 |
+
result = string_series.apply(ops, by_row=by_row)
|
| 624 |
+
tm.assert_frame_equal(result, expected)
|
| 625 |
+
|
| 626 |
+
|
| 627 |
+
@pytest.mark.parametrize(
|
| 628 |
+
"ops, expected",
|
| 629 |
+
[
|
| 630 |
+
([lambda x: x], DataFrame({"<lambda>": [1, 2, 3]})),
|
| 631 |
+
([lambda x: x.sum()], Series([6], index=["<lambda>"])),
|
| 632 |
+
],
|
| 633 |
+
)
|
| 634 |
+
def test_apply_listlike_lambda(ops, expected, by_row):
|
| 635 |
+
# GH53400
|
| 636 |
+
ser = Series([1, 2, 3])
|
| 637 |
+
result = ser.apply(ops, by_row=by_row)
|
| 638 |
+
tm.assert_equal(result, expected)
|
| 639 |
+
|
| 640 |
+
|
| 641 |
+
@pytest.mark.parametrize(
|
| 642 |
+
"ops",
|
| 643 |
+
[
|
| 644 |
+
{"A": np.sqrt},
|
| 645 |
+
{"A": np.sqrt, "B": np.exp},
|
| 646 |
+
Series({"A": np.sqrt}),
|
| 647 |
+
Series({"A": np.sqrt, "B": np.exp}),
|
| 648 |
+
],
|
| 649 |
+
)
|
| 650 |
+
def test_apply_dictlike_transformer(string_series, ops, by_row):
|
| 651 |
+
# GH 39140
|
| 652 |
+
with np.errstate(all="ignore"):
|
| 653 |
+
expected = concat({name: op(string_series) for name, op in ops.items()})
|
| 654 |
+
expected.name = string_series.name
|
| 655 |
+
result = string_series.apply(ops, by_row=by_row)
|
| 656 |
+
tm.assert_series_equal(result, expected)
|
| 657 |
+
|
| 658 |
+
|
| 659 |
+
@pytest.mark.parametrize(
|
| 660 |
+
"ops, expected",
|
| 661 |
+
[
|
| 662 |
+
(
|
| 663 |
+
{"a": lambda x: x},
|
| 664 |
+
Series([1, 2, 3], index=MultiIndex.from_arrays([["a"] * 3, range(3)])),
|
| 665 |
+
),
|
| 666 |
+
({"a": lambda x: x.sum()}, Series([6], index=["a"])),
|
| 667 |
+
],
|
| 668 |
+
)
|
| 669 |
+
def test_apply_dictlike_lambda(ops, by_row, expected):
|
| 670 |
+
# GH53400
|
| 671 |
+
ser = Series([1, 2, 3])
|
| 672 |
+
result = ser.apply(ops, by_row=by_row)
|
| 673 |
+
tm.assert_equal(result, expected)
|
| 674 |
+
|
| 675 |
+
|
| 676 |
+
def test_apply_retains_column_name(by_row):
|
| 677 |
+
# GH 16380
|
| 678 |
+
df = DataFrame({"x": range(3)}, Index(range(3), name="x"))
|
| 679 |
+
result = df.x.apply(lambda x: Series(range(x + 1), Index(range(x + 1), name="y")))
|
| 680 |
+
expected = DataFrame(
|
| 681 |
+
[[0.0, np.nan, np.nan], [0.0, 1.0, np.nan], [0.0, 1.0, 2.0]],
|
| 682 |
+
columns=Index(range(3), name="y"),
|
| 683 |
+
index=Index(range(3), name="x"),
|
| 684 |
+
)
|
| 685 |
+
tm.assert_frame_equal(result, expected)
|
| 686 |
+
|
| 687 |
+
|
| 688 |
+
def test_apply_type():
|
| 689 |
+
# GH 46719
|
| 690 |
+
s = Series([3, "string", float], index=["a", "b", "c"])
|
| 691 |
+
result = s.apply(type)
|
| 692 |
+
expected = Series([int, str, type], index=["a", "b", "c"])
|
| 693 |
+
tm.assert_series_equal(result, expected)
|
| 694 |
+
|
| 695 |
+
|
| 696 |
+
def test_series_apply_unpack_nested_data():
|
| 697 |
+
# GH#55189
|
| 698 |
+
ser = Series([[1, 2, 3], [4, 5, 6, 7]])
|
| 699 |
+
result = ser.apply(lambda x: Series(x))
|
| 700 |
+
expected = DataFrame({0: [1.0, 4.0], 1: [2.0, 5.0], 2: [3.0, 6.0], 3: [np.nan, 7]})
|
| 701 |
+
tm.assert_frame_equal(result, expected)
|
env-llmeval/lib/python3.10/site-packages/pandas/tests/apply/test_series_apply_relabeling.py
ADDED
|
@@ -0,0 +1,39 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import pandas as pd
|
| 2 |
+
import pandas._testing as tm
|
| 3 |
+
|
| 4 |
+
|
| 5 |
+
def test_relabel_no_duplicated_method():
|
| 6 |
+
# this is to test there is no duplicated method used in agg
|
| 7 |
+
df = pd.DataFrame({"A": [1, 2, 1, 2], "B": [1, 2, 3, 4]})
|
| 8 |
+
|
| 9 |
+
result = df["A"].agg(foo="sum")
|
| 10 |
+
expected = df["A"].agg({"foo": "sum"})
|
| 11 |
+
tm.assert_series_equal(result, expected)
|
| 12 |
+
|
| 13 |
+
result = df["B"].agg(foo="min", bar="max")
|
| 14 |
+
expected = df["B"].agg({"foo": "min", "bar": "max"})
|
| 15 |
+
tm.assert_series_equal(result, expected)
|
| 16 |
+
|
| 17 |
+
msg = "using Series.[sum|min|max]"
|
| 18 |
+
with tm.assert_produces_warning(FutureWarning, match=msg):
|
| 19 |
+
result = df["B"].agg(foo=sum, bar=min, cat="max")
|
| 20 |
+
msg = "using Series.[sum|min|max]"
|
| 21 |
+
with tm.assert_produces_warning(FutureWarning, match=msg):
|
| 22 |
+
expected = df["B"].agg({"foo": sum, "bar": min, "cat": "max"})
|
| 23 |
+
tm.assert_series_equal(result, expected)
|
| 24 |
+
|
| 25 |
+
|
| 26 |
+
def test_relabel_duplicated_method():
|
| 27 |
+
# this is to test with nested renaming, duplicated method can be used
|
| 28 |
+
# if they are assigned with different new names
|
| 29 |
+
df = pd.DataFrame({"A": [1, 2, 1, 2], "B": [1, 2, 3, 4]})
|
| 30 |
+
|
| 31 |
+
result = df["A"].agg(foo="sum", bar="sum")
|
| 32 |
+
expected = pd.Series([6, 6], index=["foo", "bar"], name="A")
|
| 33 |
+
tm.assert_series_equal(result, expected)
|
| 34 |
+
|
| 35 |
+
msg = "using Series.min"
|
| 36 |
+
with tm.assert_produces_warning(FutureWarning, match=msg):
|
| 37 |
+
result = df["B"].agg(foo=min, bar="min")
|
| 38 |
+
expected = pd.Series([1, 1], index=["foo", "bar"], name="B")
|
| 39 |
+
tm.assert_series_equal(result, expected)
|
env-llmeval/lib/python3.10/site-packages/pandas/tests/apply/test_series_transform.py
ADDED
|
@@ -0,0 +1,84 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import numpy as np
|
| 2 |
+
import pytest
|
| 3 |
+
|
| 4 |
+
from pandas import (
|
| 5 |
+
DataFrame,
|
| 6 |
+
MultiIndex,
|
| 7 |
+
Series,
|
| 8 |
+
concat,
|
| 9 |
+
)
|
| 10 |
+
import pandas._testing as tm
|
| 11 |
+
|
| 12 |
+
|
| 13 |
+
@pytest.mark.parametrize(
|
| 14 |
+
"args, kwargs, increment",
|
| 15 |
+
[((), {}, 0), ((), {"a": 1}, 1), ((2, 3), {}, 32), ((1,), {"c": 2}, 201)],
|
| 16 |
+
)
|
| 17 |
+
def test_agg_args(args, kwargs, increment):
|
| 18 |
+
# GH 43357
|
| 19 |
+
def f(x, a=0, b=0, c=0):
|
| 20 |
+
return x + a + 10 * b + 100 * c
|
| 21 |
+
|
| 22 |
+
s = Series([1, 2])
|
| 23 |
+
result = s.transform(f, 0, *args, **kwargs)
|
| 24 |
+
expected = s + increment
|
| 25 |
+
tm.assert_series_equal(result, expected)
|
| 26 |
+
|
| 27 |
+
|
| 28 |
+
@pytest.mark.parametrize(
|
| 29 |
+
"ops, names",
|
| 30 |
+
[
|
| 31 |
+
([np.sqrt], ["sqrt"]),
|
| 32 |
+
([np.abs, np.sqrt], ["absolute", "sqrt"]),
|
| 33 |
+
(np.array([np.sqrt]), ["sqrt"]),
|
| 34 |
+
(np.array([np.abs, np.sqrt]), ["absolute", "sqrt"]),
|
| 35 |
+
],
|
| 36 |
+
)
|
| 37 |
+
def test_transform_listlike(string_series, ops, names):
|
| 38 |
+
# GH 35964
|
| 39 |
+
with np.errstate(all="ignore"):
|
| 40 |
+
expected = concat([op(string_series) for op in ops], axis=1)
|
| 41 |
+
expected.columns = names
|
| 42 |
+
result = string_series.transform(ops)
|
| 43 |
+
tm.assert_frame_equal(result, expected)
|
| 44 |
+
|
| 45 |
+
|
| 46 |
+
def test_transform_listlike_func_with_args():
|
| 47 |
+
# GH 50624
|
| 48 |
+
|
| 49 |
+
s = Series([1, 2, 3])
|
| 50 |
+
|
| 51 |
+
def foo1(x, a=1, c=0):
|
| 52 |
+
return x + a + c
|
| 53 |
+
|
| 54 |
+
def foo2(x, b=2, c=0):
|
| 55 |
+
return x + b + c
|
| 56 |
+
|
| 57 |
+
msg = r"foo1\(\) got an unexpected keyword argument 'b'"
|
| 58 |
+
with pytest.raises(TypeError, match=msg):
|
| 59 |
+
s.transform([foo1, foo2], 0, 3, b=3, c=4)
|
| 60 |
+
|
| 61 |
+
result = s.transform([foo1, foo2], 0, 3, c=4)
|
| 62 |
+
expected = DataFrame({"foo1": [8, 9, 10], "foo2": [8, 9, 10]})
|
| 63 |
+
tm.assert_frame_equal(result, expected)
|
| 64 |
+
|
| 65 |
+
|
| 66 |
+
@pytest.mark.parametrize("box", [dict, Series])
|
| 67 |
+
def test_transform_dictlike(string_series, box):
|
| 68 |
+
# GH 35964
|
| 69 |
+
with np.errstate(all="ignore"):
|
| 70 |
+
expected = concat([np.sqrt(string_series), np.abs(string_series)], axis=1)
|
| 71 |
+
expected.columns = ["foo", "bar"]
|
| 72 |
+
result = string_series.transform(box({"foo": np.sqrt, "bar": np.abs}))
|
| 73 |
+
tm.assert_frame_equal(result, expected)
|
| 74 |
+
|
| 75 |
+
|
| 76 |
+
def test_transform_dictlike_mixed():
|
| 77 |
+
# GH 40018 - mix of lists and non-lists in values of a dictionary
|
| 78 |
+
df = Series([1, 4])
|
| 79 |
+
result = df.transform({"b": ["sqrt", "abs"], "c": "sqrt"})
|
| 80 |
+
expected = DataFrame(
|
| 81 |
+
[[1.0, 1, 1.0], [2.0, 4, 2.0]],
|
| 82 |
+
columns=MultiIndex([("b", "c"), ("sqrt", "abs")], [(0, 0, 1), (0, 1, 0)]),
|
| 83 |
+
)
|
| 84 |
+
tm.assert_frame_equal(result, expected)
|
env-llmeval/lib/python3.10/site-packages/pandas/tests/apply/test_str.py
ADDED
|
@@ -0,0 +1,326 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from itertools import chain
|
| 2 |
+
import operator
|
| 3 |
+
|
| 4 |
+
import numpy as np
|
| 5 |
+
import pytest
|
| 6 |
+
|
| 7 |
+
from pandas.core.dtypes.common import is_number
|
| 8 |
+
|
| 9 |
+
from pandas import (
|
| 10 |
+
DataFrame,
|
| 11 |
+
Series,
|
| 12 |
+
)
|
| 13 |
+
import pandas._testing as tm
|
| 14 |
+
from pandas.tests.apply.common import (
|
| 15 |
+
frame_transform_kernels,
|
| 16 |
+
series_transform_kernels,
|
| 17 |
+
)
|
| 18 |
+
|
| 19 |
+
|
| 20 |
+
@pytest.mark.parametrize("func", ["sum", "mean", "min", "max", "std"])
|
| 21 |
+
@pytest.mark.parametrize(
|
| 22 |
+
"args,kwds",
|
| 23 |
+
[
|
| 24 |
+
pytest.param([], {}, id="no_args_or_kwds"),
|
| 25 |
+
pytest.param([1], {}, id="axis_from_args"),
|
| 26 |
+
pytest.param([], {"axis": 1}, id="axis_from_kwds"),
|
| 27 |
+
pytest.param([], {"numeric_only": True}, id="optional_kwds"),
|
| 28 |
+
pytest.param([1, True], {"numeric_only": True}, id="args_and_kwds"),
|
| 29 |
+
],
|
| 30 |
+
)
|
| 31 |
+
@pytest.mark.parametrize("how", ["agg", "apply"])
|
| 32 |
+
def test_apply_with_string_funcs(request, float_frame, func, args, kwds, how):
|
| 33 |
+
if len(args) > 1 and how == "agg":
|
| 34 |
+
request.applymarker(
|
| 35 |
+
pytest.mark.xfail(
|
| 36 |
+
raises=TypeError,
|
| 37 |
+
reason="agg/apply signature mismatch - agg passes 2nd "
|
| 38 |
+
"argument to func",
|
| 39 |
+
)
|
| 40 |
+
)
|
| 41 |
+
result = getattr(float_frame, how)(func, *args, **kwds)
|
| 42 |
+
expected = getattr(float_frame, func)(*args, **kwds)
|
| 43 |
+
tm.assert_series_equal(result, expected)
|
| 44 |
+
|
| 45 |
+
|
| 46 |
+
@pytest.mark.parametrize("arg", ["sum", "mean", "min", "max", "std"])
|
| 47 |
+
def test_with_string_args(datetime_series, arg):
|
| 48 |
+
result = datetime_series.apply(arg)
|
| 49 |
+
expected = getattr(datetime_series, arg)()
|
| 50 |
+
assert result == expected
|
| 51 |
+
|
| 52 |
+
|
| 53 |
+
@pytest.mark.parametrize("op", ["mean", "median", "std", "var"])
|
| 54 |
+
@pytest.mark.parametrize("how", ["agg", "apply"])
|
| 55 |
+
def test_apply_np_reducer(op, how):
|
| 56 |
+
# GH 39116
|
| 57 |
+
float_frame = DataFrame({"a": [1, 2], "b": [3, 4]})
|
| 58 |
+
result = getattr(float_frame, how)(op)
|
| 59 |
+
# pandas ddof defaults to 1, numpy to 0
|
| 60 |
+
kwargs = {"ddof": 1} if op in ("std", "var") else {}
|
| 61 |
+
expected = Series(
|
| 62 |
+
getattr(np, op)(float_frame, axis=0, **kwargs), index=float_frame.columns
|
| 63 |
+
)
|
| 64 |
+
tm.assert_series_equal(result, expected)
|
| 65 |
+
|
| 66 |
+
|
| 67 |
+
@pytest.mark.parametrize(
|
| 68 |
+
"op", ["abs", "ceil", "cos", "cumsum", "exp", "log", "sqrt", "square"]
|
| 69 |
+
)
|
| 70 |
+
@pytest.mark.parametrize("how", ["transform", "apply"])
|
| 71 |
+
def test_apply_np_transformer(float_frame, op, how):
|
| 72 |
+
# GH 39116
|
| 73 |
+
|
| 74 |
+
# float_frame will _usually_ have negative values, which will
|
| 75 |
+
# trigger the warning here, but let's put one in just to be sure
|
| 76 |
+
float_frame.iloc[0, 0] = -1.0
|
| 77 |
+
warn = None
|
| 78 |
+
if op in ["log", "sqrt"]:
|
| 79 |
+
warn = RuntimeWarning
|
| 80 |
+
|
| 81 |
+
with tm.assert_produces_warning(warn, check_stacklevel=False):
|
| 82 |
+
# float_frame fixture is defined in conftest.py, so we don't check the
|
| 83 |
+
# stacklevel as otherwise the test would fail.
|
| 84 |
+
result = getattr(float_frame, how)(op)
|
| 85 |
+
expected = getattr(np, op)(float_frame)
|
| 86 |
+
tm.assert_frame_equal(result, expected)
|
| 87 |
+
|
| 88 |
+
|
| 89 |
+
@pytest.mark.parametrize(
|
| 90 |
+
"series, func, expected",
|
| 91 |
+
chain(
|
| 92 |
+
tm.get_cython_table_params(
|
| 93 |
+
Series(dtype=np.float64),
|
| 94 |
+
[
|
| 95 |
+
("sum", 0),
|
| 96 |
+
("max", np.nan),
|
| 97 |
+
("min", np.nan),
|
| 98 |
+
("all", True),
|
| 99 |
+
("any", False),
|
| 100 |
+
("mean", np.nan),
|
| 101 |
+
("prod", 1),
|
| 102 |
+
("std", np.nan),
|
| 103 |
+
("var", np.nan),
|
| 104 |
+
("median", np.nan),
|
| 105 |
+
],
|
| 106 |
+
),
|
| 107 |
+
tm.get_cython_table_params(
|
| 108 |
+
Series([np.nan, 1, 2, 3]),
|
| 109 |
+
[
|
| 110 |
+
("sum", 6),
|
| 111 |
+
("max", 3),
|
| 112 |
+
("min", 1),
|
| 113 |
+
("all", True),
|
| 114 |
+
("any", True),
|
| 115 |
+
("mean", 2),
|
| 116 |
+
("prod", 6),
|
| 117 |
+
("std", 1),
|
| 118 |
+
("var", 1),
|
| 119 |
+
("median", 2),
|
| 120 |
+
],
|
| 121 |
+
),
|
| 122 |
+
tm.get_cython_table_params(
|
| 123 |
+
Series("a b c".split()),
|
| 124 |
+
[
|
| 125 |
+
("sum", "abc"),
|
| 126 |
+
("max", "c"),
|
| 127 |
+
("min", "a"),
|
| 128 |
+
("all", True),
|
| 129 |
+
("any", True),
|
| 130 |
+
],
|
| 131 |
+
),
|
| 132 |
+
),
|
| 133 |
+
)
|
| 134 |
+
def test_agg_cython_table_series(series, func, expected):
|
| 135 |
+
# GH21224
|
| 136 |
+
# test reducing functions in
|
| 137 |
+
# pandas.core.base.SelectionMixin._cython_table
|
| 138 |
+
warn = None if isinstance(func, str) else FutureWarning
|
| 139 |
+
with tm.assert_produces_warning(warn, match="is currently using Series.*"):
|
| 140 |
+
result = series.agg(func)
|
| 141 |
+
if is_number(expected):
|
| 142 |
+
assert np.isclose(result, expected, equal_nan=True)
|
| 143 |
+
else:
|
| 144 |
+
assert result == expected
|
| 145 |
+
|
| 146 |
+
|
| 147 |
+
@pytest.mark.parametrize(
|
| 148 |
+
"series, func, expected",
|
| 149 |
+
chain(
|
| 150 |
+
tm.get_cython_table_params(
|
| 151 |
+
Series(dtype=np.float64),
|
| 152 |
+
[
|
| 153 |
+
("cumprod", Series([], dtype=np.float64)),
|
| 154 |
+
("cumsum", Series([], dtype=np.float64)),
|
| 155 |
+
],
|
| 156 |
+
),
|
| 157 |
+
tm.get_cython_table_params(
|
| 158 |
+
Series([np.nan, 1, 2, 3]),
|
| 159 |
+
[
|
| 160 |
+
("cumprod", Series([np.nan, 1, 2, 6])),
|
| 161 |
+
("cumsum", Series([np.nan, 1, 3, 6])),
|
| 162 |
+
],
|
| 163 |
+
),
|
| 164 |
+
tm.get_cython_table_params(
|
| 165 |
+
Series("a b c".split()), [("cumsum", Series(["a", "ab", "abc"]))]
|
| 166 |
+
),
|
| 167 |
+
),
|
| 168 |
+
)
|
| 169 |
+
def test_agg_cython_table_transform_series(series, func, expected):
|
| 170 |
+
# GH21224
|
| 171 |
+
# test transforming functions in
|
| 172 |
+
# pandas.core.base.SelectionMixin._cython_table (cumprod, cumsum)
|
| 173 |
+
warn = None if isinstance(func, str) else FutureWarning
|
| 174 |
+
with tm.assert_produces_warning(warn, match="is currently using Series.*"):
|
| 175 |
+
result = series.agg(func)
|
| 176 |
+
tm.assert_series_equal(result, expected)
|
| 177 |
+
|
| 178 |
+
|
| 179 |
+
@pytest.mark.parametrize(
|
| 180 |
+
"df, func, expected",
|
| 181 |
+
chain(
|
| 182 |
+
tm.get_cython_table_params(
|
| 183 |
+
DataFrame(),
|
| 184 |
+
[
|
| 185 |
+
("sum", Series(dtype="float64")),
|
| 186 |
+
("max", Series(dtype="float64")),
|
| 187 |
+
("min", Series(dtype="float64")),
|
| 188 |
+
("all", Series(dtype=bool)),
|
| 189 |
+
("any", Series(dtype=bool)),
|
| 190 |
+
("mean", Series(dtype="float64")),
|
| 191 |
+
("prod", Series(dtype="float64")),
|
| 192 |
+
("std", Series(dtype="float64")),
|
| 193 |
+
("var", Series(dtype="float64")),
|
| 194 |
+
("median", Series(dtype="float64")),
|
| 195 |
+
],
|
| 196 |
+
),
|
| 197 |
+
tm.get_cython_table_params(
|
| 198 |
+
DataFrame([[np.nan, 1], [1, 2]]),
|
| 199 |
+
[
|
| 200 |
+
("sum", Series([1.0, 3])),
|
| 201 |
+
("max", Series([1.0, 2])),
|
| 202 |
+
("min", Series([1.0, 1])),
|
| 203 |
+
("all", Series([True, True])),
|
| 204 |
+
("any", Series([True, True])),
|
| 205 |
+
("mean", Series([1, 1.5])),
|
| 206 |
+
("prod", Series([1.0, 2])),
|
| 207 |
+
("std", Series([np.nan, 0.707107])),
|
| 208 |
+
("var", Series([np.nan, 0.5])),
|
| 209 |
+
("median", Series([1, 1.5])),
|
| 210 |
+
],
|
| 211 |
+
),
|
| 212 |
+
),
|
| 213 |
+
)
|
| 214 |
+
def test_agg_cython_table_frame(df, func, expected, axis):
|
| 215 |
+
# GH 21224
|
| 216 |
+
# test reducing functions in
|
| 217 |
+
# pandas.core.base.SelectionMixin._cython_table
|
| 218 |
+
warn = None if isinstance(func, str) else FutureWarning
|
| 219 |
+
with tm.assert_produces_warning(warn, match="is currently using DataFrame.*"):
|
| 220 |
+
# GH#53425
|
| 221 |
+
result = df.agg(func, axis=axis)
|
| 222 |
+
tm.assert_series_equal(result, expected)
|
| 223 |
+
|
| 224 |
+
|
| 225 |
+
@pytest.mark.parametrize(
|
| 226 |
+
"df, func, expected",
|
| 227 |
+
chain(
|
| 228 |
+
tm.get_cython_table_params(
|
| 229 |
+
DataFrame(), [("cumprod", DataFrame()), ("cumsum", DataFrame())]
|
| 230 |
+
),
|
| 231 |
+
tm.get_cython_table_params(
|
| 232 |
+
DataFrame([[np.nan, 1], [1, 2]]),
|
| 233 |
+
[
|
| 234 |
+
("cumprod", DataFrame([[np.nan, 1], [1, 2]])),
|
| 235 |
+
("cumsum", DataFrame([[np.nan, 1], [1, 3]])),
|
| 236 |
+
],
|
| 237 |
+
),
|
| 238 |
+
),
|
| 239 |
+
)
|
| 240 |
+
def test_agg_cython_table_transform_frame(df, func, expected, axis):
|
| 241 |
+
# GH 21224
|
| 242 |
+
# test transforming functions in
|
| 243 |
+
# pandas.core.base.SelectionMixin._cython_table (cumprod, cumsum)
|
| 244 |
+
if axis in ("columns", 1):
|
| 245 |
+
# operating blockwise doesn't let us preserve dtypes
|
| 246 |
+
expected = expected.astype("float64")
|
| 247 |
+
|
| 248 |
+
warn = None if isinstance(func, str) else FutureWarning
|
| 249 |
+
with tm.assert_produces_warning(warn, match="is currently using DataFrame.*"):
|
| 250 |
+
# GH#53425
|
| 251 |
+
result = df.agg(func, axis=axis)
|
| 252 |
+
tm.assert_frame_equal(result, expected)
|
| 253 |
+
|
| 254 |
+
|
| 255 |
+
@pytest.mark.parametrize("op", series_transform_kernels)
|
| 256 |
+
def test_transform_groupby_kernel_series(request, string_series, op):
|
| 257 |
+
# GH 35964
|
| 258 |
+
if op == "ngroup":
|
| 259 |
+
request.applymarker(
|
| 260 |
+
pytest.mark.xfail(raises=ValueError, reason="ngroup not valid for NDFrame")
|
| 261 |
+
)
|
| 262 |
+
args = [0.0] if op == "fillna" else []
|
| 263 |
+
ones = np.ones(string_series.shape[0])
|
| 264 |
+
|
| 265 |
+
warn = FutureWarning if op == "fillna" else None
|
| 266 |
+
msg = "SeriesGroupBy.fillna is deprecated"
|
| 267 |
+
with tm.assert_produces_warning(warn, match=msg):
|
| 268 |
+
expected = string_series.groupby(ones).transform(op, *args)
|
| 269 |
+
result = string_series.transform(op, 0, *args)
|
| 270 |
+
tm.assert_series_equal(result, expected)
|
| 271 |
+
|
| 272 |
+
|
| 273 |
+
@pytest.mark.parametrize("op", frame_transform_kernels)
|
| 274 |
+
def test_transform_groupby_kernel_frame(request, axis, float_frame, op):
|
| 275 |
+
if op == "ngroup":
|
| 276 |
+
request.applymarker(
|
| 277 |
+
pytest.mark.xfail(raises=ValueError, reason="ngroup not valid for NDFrame")
|
| 278 |
+
)
|
| 279 |
+
|
| 280 |
+
# GH 35964
|
| 281 |
+
|
| 282 |
+
args = [0.0] if op == "fillna" else []
|
| 283 |
+
if axis in (0, "index"):
|
| 284 |
+
ones = np.ones(float_frame.shape[0])
|
| 285 |
+
msg = "The 'axis' keyword in DataFrame.groupby is deprecated"
|
| 286 |
+
else:
|
| 287 |
+
ones = np.ones(float_frame.shape[1])
|
| 288 |
+
msg = "DataFrame.groupby with axis=1 is deprecated"
|
| 289 |
+
|
| 290 |
+
with tm.assert_produces_warning(FutureWarning, match=msg):
|
| 291 |
+
gb = float_frame.groupby(ones, axis=axis)
|
| 292 |
+
|
| 293 |
+
warn = FutureWarning if op == "fillna" else None
|
| 294 |
+
op_msg = "DataFrameGroupBy.fillna is deprecated"
|
| 295 |
+
with tm.assert_produces_warning(warn, match=op_msg):
|
| 296 |
+
expected = gb.transform(op, *args)
|
| 297 |
+
|
| 298 |
+
result = float_frame.transform(op, axis, *args)
|
| 299 |
+
tm.assert_frame_equal(result, expected)
|
| 300 |
+
|
| 301 |
+
# same thing, but ensuring we have multiple blocks
|
| 302 |
+
assert "E" not in float_frame.columns
|
| 303 |
+
float_frame["E"] = float_frame["A"].copy()
|
| 304 |
+
assert len(float_frame._mgr.arrays) > 1
|
| 305 |
+
|
| 306 |
+
if axis in (0, "index"):
|
| 307 |
+
ones = np.ones(float_frame.shape[0])
|
| 308 |
+
else:
|
| 309 |
+
ones = np.ones(float_frame.shape[1])
|
| 310 |
+
with tm.assert_produces_warning(FutureWarning, match=msg):
|
| 311 |
+
gb2 = float_frame.groupby(ones, axis=axis)
|
| 312 |
+
warn = FutureWarning if op == "fillna" else None
|
| 313 |
+
op_msg = "DataFrameGroupBy.fillna is deprecated"
|
| 314 |
+
with tm.assert_produces_warning(warn, match=op_msg):
|
| 315 |
+
expected2 = gb2.transform(op, *args)
|
| 316 |
+
result2 = float_frame.transform(op, axis, *args)
|
| 317 |
+
tm.assert_frame_equal(result2, expected2)
|
| 318 |
+
|
| 319 |
+
|
| 320 |
+
@pytest.mark.parametrize("method", ["abs", "shift", "pct_change", "cumsum", "rank"])
|
| 321 |
+
def test_transform_method_name(method):
|
| 322 |
+
# GH 19760
|
| 323 |
+
df = DataFrame({"A": [-1, 2]})
|
| 324 |
+
result = df.transform(method)
|
| 325 |
+
expected = operator.methodcaller(method)(df)
|
| 326 |
+
tm.assert_frame_equal(result, expected)
|
env-llmeval/lib/python3.10/site-packages/pandas/tests/extension/__pycache__/__init__.cpython-310.pyc
ADDED
|
Binary file (187 Bytes). View file
|
|
|
env-llmeval/lib/python3.10/site-packages/pandas/tests/extension/__pycache__/conftest.cpython-310.pyc
ADDED
|
Binary file (6.09 kB). View file
|
|
|
env-llmeval/lib/python3.10/site-packages/pandas/tests/extension/__pycache__/test_arrow.cpython-310.pyc
ADDED
|
Binary file (90.3 kB). View file
|
|
|
env-llmeval/lib/python3.10/site-packages/pandas/tests/extension/__pycache__/test_categorical.cpython-310.pyc
ADDED
|
Binary file (7.3 kB). View file
|
|
|
env-llmeval/lib/python3.10/site-packages/pandas/tests/extension/__pycache__/test_common.cpython-310.pyc
ADDED
|
Binary file (3.78 kB). View file
|
|
|
env-llmeval/lib/python3.10/site-packages/pandas/tests/extension/__pycache__/test_datetime.cpython-310.pyc
ADDED
|
Binary file (5.46 kB). View file
|
|
|
env-llmeval/lib/python3.10/site-packages/pandas/tests/extension/__pycache__/test_extension.cpython-310.pyc
ADDED
|
Binary file (1.25 kB). View file
|
|
|
env-llmeval/lib/python3.10/site-packages/pandas/tests/extension/__pycache__/test_interval.cpython-310.pyc
ADDED
|
Binary file (3.86 kB). View file
|
|
|
env-llmeval/lib/python3.10/site-packages/pandas/tests/extension/__pycache__/test_masked.cpython-310.pyc
ADDED
|
Binary file (10.9 kB). View file
|
|
|
env-llmeval/lib/python3.10/site-packages/pandas/tests/extension/__pycache__/test_numpy.cpython-310.pyc
ADDED
|
Binary file (15.1 kB). View file
|
|
|
env-llmeval/lib/python3.10/site-packages/pandas/tests/extension/__pycache__/test_period.cpython-310.pyc
ADDED
|
Binary file (4.65 kB). View file
|
|
|
env-llmeval/lib/python3.10/site-packages/pandas/tests/extension/__pycache__/test_sparse.cpython-310.pyc
ADDED
|
Binary file (17.1 kB). View file
|
|
|
env-llmeval/lib/python3.10/site-packages/pandas/tests/extension/__pycache__/test_string.cpython-310.pyc
ADDED
|
Binary file (7.9 kB). View file
|
|
|
env-llmeval/lib/python3.10/site-packages/pandas/tests/extension/array_with_attr/__init__.py
ADDED
|
@@ -0,0 +1,6 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from pandas.tests.extension.array_with_attr.array import (
|
| 2 |
+
FloatAttrArray,
|
| 3 |
+
FloatAttrDtype,
|
| 4 |
+
)
|
| 5 |
+
|
| 6 |
+
__all__ = ["FloatAttrArray", "FloatAttrDtype"]
|
env-llmeval/lib/python3.10/site-packages/pandas/tests/extension/array_with_attr/__pycache__/__init__.cpython-310.pyc
ADDED
|
Binary file (340 Bytes). View file
|
|
|
env-llmeval/lib/python3.10/site-packages/pandas/tests/extension/array_with_attr/__pycache__/array.cpython-310.pyc
ADDED
|
Binary file (3.47 kB). View file
|
|
|
env-llmeval/lib/python3.10/site-packages/pandas/tests/extension/array_with_attr/__pycache__/test_array_with_attr.cpython-310.pyc
ADDED
|
Binary file (1.21 kB). View file
|
|
|
env-llmeval/lib/python3.10/site-packages/pandas/tests/extension/array_with_attr/array.py
ADDED
|
@@ -0,0 +1,89 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""
|
| 2 |
+
Test extension array that has custom attribute information (not stored on the dtype).
|
| 3 |
+
|
| 4 |
+
"""
|
| 5 |
+
from __future__ import annotations
|
| 6 |
+
|
| 7 |
+
import numbers
|
| 8 |
+
from typing import TYPE_CHECKING
|
| 9 |
+
|
| 10 |
+
import numpy as np
|
| 11 |
+
|
| 12 |
+
from pandas.core.dtypes.base import ExtensionDtype
|
| 13 |
+
|
| 14 |
+
import pandas as pd
|
| 15 |
+
from pandas.core.arrays import ExtensionArray
|
| 16 |
+
|
| 17 |
+
if TYPE_CHECKING:
|
| 18 |
+
from pandas._typing import type_t
|
| 19 |
+
|
| 20 |
+
|
| 21 |
+
class FloatAttrDtype(ExtensionDtype):
|
| 22 |
+
type = float
|
| 23 |
+
name = "float_attr"
|
| 24 |
+
na_value = np.nan
|
| 25 |
+
|
| 26 |
+
@classmethod
|
| 27 |
+
def construct_array_type(cls) -> type_t[FloatAttrArray]:
|
| 28 |
+
"""
|
| 29 |
+
Return the array type associated with this dtype.
|
| 30 |
+
|
| 31 |
+
Returns
|
| 32 |
+
-------
|
| 33 |
+
type
|
| 34 |
+
"""
|
| 35 |
+
return FloatAttrArray
|
| 36 |
+
|
| 37 |
+
|
| 38 |
+
class FloatAttrArray(ExtensionArray):
|
| 39 |
+
dtype = FloatAttrDtype()
|
| 40 |
+
__array_priority__ = 1000
|
| 41 |
+
|
| 42 |
+
def __init__(self, values, attr=None) -> None:
|
| 43 |
+
if not isinstance(values, np.ndarray):
|
| 44 |
+
raise TypeError("Need to pass a numpy array of float64 dtype as values")
|
| 45 |
+
if not values.dtype == "float64":
|
| 46 |
+
raise TypeError("Need to pass a numpy array of float64 dtype as values")
|
| 47 |
+
self.data = values
|
| 48 |
+
self.attr = attr
|
| 49 |
+
|
| 50 |
+
@classmethod
|
| 51 |
+
def _from_sequence(cls, scalars, *, dtype=None, copy=False):
|
| 52 |
+
if not copy:
|
| 53 |
+
data = np.asarray(scalars, dtype="float64")
|
| 54 |
+
else:
|
| 55 |
+
data = np.array(scalars, dtype="float64", copy=copy)
|
| 56 |
+
return cls(data)
|
| 57 |
+
|
| 58 |
+
def __getitem__(self, item):
|
| 59 |
+
if isinstance(item, numbers.Integral):
|
| 60 |
+
return self.data[item]
|
| 61 |
+
else:
|
| 62 |
+
# slice, list-like, mask
|
| 63 |
+
item = pd.api.indexers.check_array_indexer(self, item)
|
| 64 |
+
return type(self)(self.data[item], self.attr)
|
| 65 |
+
|
| 66 |
+
def __len__(self) -> int:
|
| 67 |
+
return len(self.data)
|
| 68 |
+
|
| 69 |
+
def isna(self):
|
| 70 |
+
return np.isnan(self.data)
|
| 71 |
+
|
| 72 |
+
def take(self, indexer, allow_fill=False, fill_value=None):
|
| 73 |
+
from pandas.api.extensions import take
|
| 74 |
+
|
| 75 |
+
data = self.data
|
| 76 |
+
if allow_fill and fill_value is None:
|
| 77 |
+
fill_value = self.dtype.na_value
|
| 78 |
+
|
| 79 |
+
result = take(data, indexer, fill_value=fill_value, allow_fill=allow_fill)
|
| 80 |
+
return type(self)(result, self.attr)
|
| 81 |
+
|
| 82 |
+
def copy(self):
|
| 83 |
+
return type(self)(self.data.copy(), self.attr)
|
| 84 |
+
|
| 85 |
+
@classmethod
|
| 86 |
+
def _concat_same_type(cls, to_concat):
|
| 87 |
+
data = np.concatenate([x.data for x in to_concat])
|
| 88 |
+
attr = to_concat[0].attr if len(to_concat) else None
|
| 89 |
+
return cls(data, attr)
|
env-llmeval/lib/python3.10/site-packages/pandas/tests/extension/array_with_attr/test_array_with_attr.py
ADDED
|
@@ -0,0 +1,33 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import numpy as np
|
| 2 |
+
|
| 3 |
+
import pandas as pd
|
| 4 |
+
import pandas._testing as tm
|
| 5 |
+
from pandas.tests.extension.array_with_attr import FloatAttrArray
|
| 6 |
+
|
| 7 |
+
|
| 8 |
+
def test_concat_with_all_na():
|
| 9 |
+
# https://github.com/pandas-dev/pandas/pull/47762
|
| 10 |
+
# ensure that attribute of the column array is preserved (when it gets
|
| 11 |
+
# preserved in reindexing the array) during merge/concat
|
| 12 |
+
arr = FloatAttrArray(np.array([np.nan, np.nan], dtype="float64"), attr="test")
|
| 13 |
+
|
| 14 |
+
df1 = pd.DataFrame({"col": arr, "key": [0, 1]})
|
| 15 |
+
df2 = pd.DataFrame({"key": [0, 1], "col2": [1, 2]})
|
| 16 |
+
result = pd.merge(df1, df2, on="key")
|
| 17 |
+
expected = pd.DataFrame({"col": arr, "key": [0, 1], "col2": [1, 2]})
|
| 18 |
+
tm.assert_frame_equal(result, expected)
|
| 19 |
+
assert result["col"].array.attr == "test"
|
| 20 |
+
|
| 21 |
+
df1 = pd.DataFrame({"col": arr, "key": [0, 1]})
|
| 22 |
+
df2 = pd.DataFrame({"key": [0, 2], "col2": [1, 2]})
|
| 23 |
+
result = pd.merge(df1, df2, on="key")
|
| 24 |
+
expected = pd.DataFrame({"col": arr.take([0]), "key": [0], "col2": [1]})
|
| 25 |
+
tm.assert_frame_equal(result, expected)
|
| 26 |
+
assert result["col"].array.attr == "test"
|
| 27 |
+
|
| 28 |
+
result = pd.concat([df1.set_index("key"), df2.set_index("key")], axis=1)
|
| 29 |
+
expected = pd.DataFrame(
|
| 30 |
+
{"col": arr.take([0, 1, -1]), "col2": [1, np.nan, 2], "key": [0, 1, 2]}
|
| 31 |
+
).set_index("key")
|
| 32 |
+
tm.assert_frame_equal(result, expected)
|
| 33 |
+
assert result["col"].array.attr == "test"
|
env-llmeval/lib/python3.10/site-packages/pandas/tests/extension/base/__init__.py
ADDED
|
@@ -0,0 +1,131 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""
|
| 2 |
+
Base test suite for extension arrays.
|
| 3 |
+
|
| 4 |
+
These tests are intended for third-party libraries to subclass to validate
|
| 5 |
+
that their extension arrays and dtypes satisfy the interface. Moving or
|
| 6 |
+
renaming the tests should not be done lightly.
|
| 7 |
+
|
| 8 |
+
Libraries are expected to implement a few pytest fixtures to provide data
|
| 9 |
+
for the tests. The fixtures may be located in either
|
| 10 |
+
|
| 11 |
+
* The same module as your test class.
|
| 12 |
+
* A ``conftest.py`` in the same directory as your test class.
|
| 13 |
+
|
| 14 |
+
The full list of fixtures may be found in the ``conftest.py`` next to this
|
| 15 |
+
file.
|
| 16 |
+
|
| 17 |
+
.. code-block:: python
|
| 18 |
+
|
| 19 |
+
import pytest
|
| 20 |
+
from pandas.tests.extension.base import BaseDtypeTests
|
| 21 |
+
|
| 22 |
+
|
| 23 |
+
@pytest.fixture
|
| 24 |
+
def dtype():
|
| 25 |
+
return MyDtype()
|
| 26 |
+
|
| 27 |
+
|
| 28 |
+
class TestMyDtype(BaseDtypeTests):
|
| 29 |
+
pass
|
| 30 |
+
|
| 31 |
+
|
| 32 |
+
Your class ``TestDtype`` will inherit all the tests defined on
|
| 33 |
+
``BaseDtypeTests``. pytest's fixture discover will supply your ``dtype``
|
| 34 |
+
wherever the test requires it. You're free to implement additional tests.
|
| 35 |
+
|
| 36 |
+
"""
|
| 37 |
+
from pandas.tests.extension.base.accumulate import BaseAccumulateTests
|
| 38 |
+
from pandas.tests.extension.base.casting import BaseCastingTests
|
| 39 |
+
from pandas.tests.extension.base.constructors import BaseConstructorsTests
|
| 40 |
+
from pandas.tests.extension.base.dim2 import ( # noqa: F401
|
| 41 |
+
Dim2CompatTests,
|
| 42 |
+
NDArrayBacked2DTests,
|
| 43 |
+
)
|
| 44 |
+
from pandas.tests.extension.base.dtype import BaseDtypeTests
|
| 45 |
+
from pandas.tests.extension.base.getitem import BaseGetitemTests
|
| 46 |
+
from pandas.tests.extension.base.groupby import BaseGroupbyTests
|
| 47 |
+
from pandas.tests.extension.base.index import BaseIndexTests
|
| 48 |
+
from pandas.tests.extension.base.interface import BaseInterfaceTests
|
| 49 |
+
from pandas.tests.extension.base.io import BaseParsingTests
|
| 50 |
+
from pandas.tests.extension.base.methods import BaseMethodsTests
|
| 51 |
+
from pandas.tests.extension.base.missing import BaseMissingTests
|
| 52 |
+
from pandas.tests.extension.base.ops import ( # noqa: F401
|
| 53 |
+
BaseArithmeticOpsTests,
|
| 54 |
+
BaseComparisonOpsTests,
|
| 55 |
+
BaseOpsUtil,
|
| 56 |
+
BaseUnaryOpsTests,
|
| 57 |
+
)
|
| 58 |
+
from pandas.tests.extension.base.printing import BasePrintingTests
|
| 59 |
+
from pandas.tests.extension.base.reduce import BaseReduceTests
|
| 60 |
+
from pandas.tests.extension.base.reshaping import BaseReshapingTests
|
| 61 |
+
from pandas.tests.extension.base.setitem import BaseSetitemTests
|
| 62 |
+
|
| 63 |
+
|
| 64 |
+
# One test class that you can inherit as an alternative to inheriting all the
|
| 65 |
+
# test classes above.
|
| 66 |
+
# Note 1) this excludes Dim2CompatTests and NDArrayBacked2DTests.
|
| 67 |
+
# Note 2) this uses BaseReduceTests and and _not_ BaseBooleanReduceTests,
|
| 68 |
+
# BaseNoReduceTests, or BaseNumericReduceTests
|
| 69 |
+
class ExtensionTests(
|
| 70 |
+
BaseAccumulateTests,
|
| 71 |
+
BaseCastingTests,
|
| 72 |
+
BaseConstructorsTests,
|
| 73 |
+
BaseDtypeTests,
|
| 74 |
+
BaseGetitemTests,
|
| 75 |
+
BaseGroupbyTests,
|
| 76 |
+
BaseIndexTests,
|
| 77 |
+
BaseInterfaceTests,
|
| 78 |
+
BaseParsingTests,
|
| 79 |
+
BaseMethodsTests,
|
| 80 |
+
BaseMissingTests,
|
| 81 |
+
BaseArithmeticOpsTests,
|
| 82 |
+
BaseComparisonOpsTests,
|
| 83 |
+
BaseUnaryOpsTests,
|
| 84 |
+
BasePrintingTests,
|
| 85 |
+
BaseReduceTests,
|
| 86 |
+
BaseReshapingTests,
|
| 87 |
+
BaseSetitemTests,
|
| 88 |
+
Dim2CompatTests,
|
| 89 |
+
):
|
| 90 |
+
pass
|
| 91 |
+
|
| 92 |
+
|
| 93 |
+
def __getattr__(name: str):
|
| 94 |
+
import warnings
|
| 95 |
+
|
| 96 |
+
if name == "BaseNoReduceTests":
|
| 97 |
+
warnings.warn(
|
| 98 |
+
"BaseNoReduceTests is deprecated and will be removed in a "
|
| 99 |
+
"future version. Use BaseReduceTests and override "
|
| 100 |
+
"`_supports_reduction` instead.",
|
| 101 |
+
FutureWarning,
|
| 102 |
+
)
|
| 103 |
+
from pandas.tests.extension.base.reduce import BaseNoReduceTests
|
| 104 |
+
|
| 105 |
+
return BaseNoReduceTests
|
| 106 |
+
|
| 107 |
+
elif name == "BaseNumericReduceTests":
|
| 108 |
+
warnings.warn(
|
| 109 |
+
"BaseNumericReduceTests is deprecated and will be removed in a "
|
| 110 |
+
"future version. Use BaseReduceTests and override "
|
| 111 |
+
"`_supports_reduction` instead.",
|
| 112 |
+
FutureWarning,
|
| 113 |
+
)
|
| 114 |
+
from pandas.tests.extension.base.reduce import BaseNumericReduceTests
|
| 115 |
+
|
| 116 |
+
return BaseNumericReduceTests
|
| 117 |
+
|
| 118 |
+
elif name == "BaseBooleanReduceTests":
|
| 119 |
+
warnings.warn(
|
| 120 |
+
"BaseBooleanReduceTests is deprecated and will be removed in a "
|
| 121 |
+
"future version. Use BaseReduceTests and override "
|
| 122 |
+
"`_supports_reduction` instead.",
|
| 123 |
+
FutureWarning,
|
| 124 |
+
)
|
| 125 |
+
from pandas.tests.extension.base.reduce import BaseBooleanReduceTests
|
| 126 |
+
|
| 127 |
+
return BaseBooleanReduceTests
|
| 128 |
+
|
| 129 |
+
raise AttributeError(
|
| 130 |
+
f"module 'pandas.tests.extension.base' has no attribute '{name}'"
|
| 131 |
+
)
|
env-llmeval/lib/python3.10/site-packages/pandas/tests/extension/base/__pycache__/__init__.cpython-310.pyc
ADDED
|
Binary file (3.8 kB). View file
|
|
|
env-llmeval/lib/python3.10/site-packages/pandas/tests/extension/base/__pycache__/accumulate.cpython-310.pyc
ADDED
|
Binary file (1.64 kB). View file
|
|
|
env-llmeval/lib/python3.10/site-packages/pandas/tests/extension/base/__pycache__/base.cpython-310.pyc
ADDED
|
Binary file (350 Bytes). View file
|
|
|
env-llmeval/lib/python3.10/site-packages/pandas/tests/extension/base/__pycache__/casting.cpython-310.pyc
ADDED
|
Binary file (3.68 kB). View file
|
|
|
env-llmeval/lib/python3.10/site-packages/pandas/tests/extension/base/__pycache__/constructors.cpython-310.pyc
ADDED
|
Binary file (5.75 kB). View file
|
|
|
env-llmeval/lib/python3.10/site-packages/pandas/tests/extension/base/__pycache__/getitem.cpython-310.pyc
ADDED
|
Binary file (14.6 kB). View file
|
|
|
env-llmeval/lib/python3.10/site-packages/pandas/tests/extension/base/__pycache__/groupby.cpython-310.pyc
ADDED
|
Binary file (5.55 kB). View file
|
|
|
env-llmeval/lib/python3.10/site-packages/pandas/tests/extension/base/__pycache__/io.cpython-310.pyc
ADDED
|
Binary file (1.38 kB). View file
|
|
|
env-llmeval/lib/python3.10/site-packages/pandas/tests/extension/base/__pycache__/methods.cpython-310.pyc
ADDED
|
Binary file (23.6 kB). View file
|
|
|
env-llmeval/lib/python3.10/site-packages/pandas/tests/extension/base/__pycache__/missing.cpython-310.pyc
ADDED
|
Binary file (6.13 kB). View file
|
|
|
env-llmeval/lib/python3.10/site-packages/pandas/tests/extension/base/__pycache__/ops.cpython-310.pyc
ADDED
|
Binary file (8.95 kB). View file
|
|
|
env-llmeval/lib/python3.10/site-packages/pandas/tests/extension/base/__pycache__/printing.cpython-310.pyc
ADDED
|
Binary file (1.85 kB). View file
|
|
|
env-llmeval/lib/python3.10/site-packages/pandas/tests/extension/base/__pycache__/reduce.cpython-310.pyc
ADDED
|
Binary file (4.37 kB). View file
|
|
|
env-llmeval/lib/python3.10/site-packages/pandas/tests/extension/base/__pycache__/reshaping.cpython-310.pyc
ADDED
|
Binary file (11.6 kB). View file
|
|
|
env-llmeval/lib/python3.10/site-packages/pandas/tests/extension/base/__pycache__/setitem.cpython-310.pyc
ADDED
|
Binary file (15.9 kB). View file
|
|
|