Add files using upload-large-folder tool
Browse filesThis view is limited to 50 files because it contains too many changes.
See raw diff
- llmeval-env/lib/python3.10/site-packages/nvidia_nvtx_cu12-12.1.105.dist-info/INSTALLER +1 -0
- llmeval-env/lib/python3.10/site-packages/nvidia_nvtx_cu12-12.1.105.dist-info/License.txt +1568 -0
- llmeval-env/lib/python3.10/site-packages/nvidia_nvtx_cu12-12.1.105.dist-info/METADATA +35 -0
- llmeval-env/lib/python3.10/site-packages/nvidia_nvtx_cu12-12.1.105.dist-info/RECORD +36 -0
- llmeval-env/lib/python3.10/site-packages/nvidia_nvtx_cu12-12.1.105.dist-info/WHEEL +5 -0
- llmeval-env/lib/python3.10/site-packages/nvidia_nvtx_cu12-12.1.105.dist-info/top_level.txt +1 -0
- llmeval-env/lib/python3.10/site-packages/pandas/__init__.py +367 -0
- llmeval-env/lib/python3.10/site-packages/pandas/_typing.py +525 -0
- llmeval-env/lib/python3.10/site-packages/pandas/_version.py +692 -0
- llmeval-env/lib/python3.10/site-packages/pandas/_version_meson.py +2 -0
- llmeval-env/lib/python3.10/site-packages/pandas/conftest.py +1965 -0
- llmeval-env/lib/python3.10/site-packages/pandas/core/__init__.py +0 -0
- llmeval-env/lib/python3.10/site-packages/pandas/core/__pycache__/apply.cpython-310.pyc +0 -0
- llmeval-env/lib/python3.10/site-packages/pandas/core/__pycache__/arraylike.cpython-310.pyc +0 -0
- llmeval-env/lib/python3.10/site-packages/pandas/core/__pycache__/config_init.cpython-310.pyc +0 -0
- llmeval-env/lib/python3.10/site-packages/pandas/core/__pycache__/missing.cpython-310.pyc +0 -0
- llmeval-env/lib/python3.10/site-packages/pandas/core/__pycache__/nanops.cpython-310.pyc +0 -0
- llmeval-env/lib/python3.10/site-packages/pandas/core/__pycache__/series.cpython-310.pyc +0 -0
- llmeval-env/lib/python3.10/site-packages/pandas/core/accessor.py +340 -0
- llmeval-env/lib/python3.10/site-packages/pandas/core/algorithms.py +1747 -0
- llmeval-env/lib/python3.10/site-packages/pandas/core/api.py +140 -0
- llmeval-env/lib/python3.10/site-packages/pandas/core/apply.py +2062 -0
- llmeval-env/lib/python3.10/site-packages/pandas/core/arraylike.py +530 -0
- llmeval-env/lib/python3.10/site-packages/pandas/core/base.py +1391 -0
- llmeval-env/lib/python3.10/site-packages/pandas/core/common.py +657 -0
- llmeval-env/lib/python3.10/site-packages/pandas/core/config_init.py +924 -0
- llmeval-env/lib/python3.10/site-packages/pandas/core/construction.py +824 -0
- llmeval-env/lib/python3.10/site-packages/pandas/core/flags.py +117 -0
- llmeval-env/lib/python3.10/site-packages/pandas/core/frame.py +0 -0
- llmeval-env/lib/python3.10/site-packages/pandas/core/generic.py +0 -0
- llmeval-env/lib/python3.10/site-packages/pandas/core/indexes/__init__.py +0 -0
- llmeval-env/lib/python3.10/site-packages/pandas/core/indexes/__pycache__/__init__.cpython-310.pyc +0 -0
- llmeval-env/lib/python3.10/site-packages/pandas/core/indexes/__pycache__/accessors.cpython-310.pyc +0 -0
- llmeval-env/lib/python3.10/site-packages/pandas/core/indexes/__pycache__/api.cpython-310.pyc +0 -0
- llmeval-env/lib/python3.10/site-packages/pandas/core/indexes/__pycache__/base.cpython-310.pyc +0 -0
- llmeval-env/lib/python3.10/site-packages/pandas/core/indexes/__pycache__/category.cpython-310.pyc +0 -0
- llmeval-env/lib/python3.10/site-packages/pandas/core/indexes/__pycache__/datetimelike.cpython-310.pyc +0 -0
- llmeval-env/lib/python3.10/site-packages/pandas/core/indexes/__pycache__/datetimes.cpython-310.pyc +0 -0
- llmeval-env/lib/python3.10/site-packages/pandas/core/indexes/__pycache__/extension.cpython-310.pyc +0 -0
- llmeval-env/lib/python3.10/site-packages/pandas/core/indexes/__pycache__/frozen.cpython-310.pyc +0 -0
- llmeval-env/lib/python3.10/site-packages/pandas/core/indexes/__pycache__/interval.cpython-310.pyc +0 -0
- llmeval-env/lib/python3.10/site-packages/pandas/core/indexes/__pycache__/multi.cpython-310.pyc +0 -0
- llmeval-env/lib/python3.10/site-packages/pandas/core/indexes/__pycache__/period.cpython-310.pyc +0 -0
- llmeval-env/lib/python3.10/site-packages/pandas/core/indexes/__pycache__/range.cpython-310.pyc +0 -0
- llmeval-env/lib/python3.10/site-packages/pandas/core/indexes/__pycache__/timedeltas.cpython-310.pyc +0 -0
- llmeval-env/lib/python3.10/site-packages/pandas/core/indexes/accessors.py +643 -0
- llmeval-env/lib/python3.10/site-packages/pandas/core/indexes/api.py +388 -0
- llmeval-env/lib/python3.10/site-packages/pandas/core/indexes/base.py +0 -0
- llmeval-env/lib/python3.10/site-packages/pandas/core/indexes/category.py +513 -0
- llmeval-env/lib/python3.10/site-packages/pandas/core/indexes/datetimelike.py +843 -0
llmeval-env/lib/python3.10/site-packages/nvidia_nvtx_cu12-12.1.105.dist-info/INSTALLER
ADDED
@@ -0,0 +1 @@
|
|
|
|
|
1 |
+
pip
|
llmeval-env/lib/python3.10/site-packages/nvidia_nvtx_cu12-12.1.105.dist-info/License.txt
ADDED
@@ -0,0 +1,1568 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
End User License Agreement
|
2 |
+
--------------------------
|
3 |
+
|
4 |
+
|
5 |
+
Preface
|
6 |
+
-------
|
7 |
+
|
8 |
+
The Software License Agreement in Chapter 1 and the Supplement
|
9 |
+
in Chapter 2 contain license terms and conditions that govern
|
10 |
+
the use of NVIDIA software. By accepting this agreement, you
|
11 |
+
agree to comply with all the terms and conditions applicable
|
12 |
+
to the product(s) included herein.
|
13 |
+
|
14 |
+
|
15 |
+
NVIDIA Driver
|
16 |
+
|
17 |
+
|
18 |
+
Description
|
19 |
+
|
20 |
+
This package contains the operating system driver and
|
21 |
+
fundamental system software components for NVIDIA GPUs.
|
22 |
+
|
23 |
+
|
24 |
+
NVIDIA CUDA Toolkit
|
25 |
+
|
26 |
+
|
27 |
+
Description
|
28 |
+
|
29 |
+
The NVIDIA CUDA Toolkit provides command-line and graphical
|
30 |
+
tools for building, debugging and optimizing the performance
|
31 |
+
of applications accelerated by NVIDIA GPUs, runtime and math
|
32 |
+
libraries, and documentation including programming guides,
|
33 |
+
user manuals, and API references.
|
34 |
+
|
35 |
+
|
36 |
+
Default Install Location of CUDA Toolkit
|
37 |
+
|
38 |
+
Windows platform:
|
39 |
+
|
40 |
+
%ProgramFiles%\NVIDIA GPU Computing Toolkit\CUDA\v#.#
|
41 |
+
|
42 |
+
Linux platform:
|
43 |
+
|
44 |
+
/usr/local/cuda-#.#
|
45 |
+
|
46 |
+
Mac platform:
|
47 |
+
|
48 |
+
/Developer/NVIDIA/CUDA-#.#
|
49 |
+
|
50 |
+
|
51 |
+
NVIDIA CUDA Samples
|
52 |
+
|
53 |
+
|
54 |
+
Description
|
55 |
+
|
56 |
+
This package includes over 100+ CUDA examples that demonstrate
|
57 |
+
various CUDA programming principles, and efficient CUDA
|
58 |
+
implementation of algorithms in specific application domains.
|
59 |
+
|
60 |
+
|
61 |
+
Default Install Location of CUDA Samples
|
62 |
+
|
63 |
+
Windows platform:
|
64 |
+
|
65 |
+
%ProgramData%\NVIDIA Corporation\CUDA Samples\v#.#
|
66 |
+
|
67 |
+
Linux platform:
|
68 |
+
|
69 |
+
/usr/local/cuda-#.#/samples
|
70 |
+
|
71 |
+
and
|
72 |
+
|
73 |
+
$HOME/NVIDIA_CUDA-#.#_Samples
|
74 |
+
|
75 |
+
Mac platform:
|
76 |
+
|
77 |
+
/Developer/NVIDIA/CUDA-#.#/samples
|
78 |
+
|
79 |
+
|
80 |
+
NVIDIA Nsight Visual Studio Edition (Windows only)
|
81 |
+
|
82 |
+
|
83 |
+
Description
|
84 |
+
|
85 |
+
NVIDIA Nsight Development Platform, Visual Studio Edition is a
|
86 |
+
development environment integrated into Microsoft Visual
|
87 |
+
Studio that provides tools for debugging, profiling, analyzing
|
88 |
+
and optimizing your GPU computing and graphics applications.
|
89 |
+
|
90 |
+
|
91 |
+
Default Install Location of Nsight Visual Studio Edition
|
92 |
+
|
93 |
+
Windows platform:
|
94 |
+
|
95 |
+
%ProgramFiles(x86)%\NVIDIA Corporation\Nsight Visual Studio Edition #.#
|
96 |
+
|
97 |
+
|
98 |
+
1. License Agreement for NVIDIA Software Development Kits
|
99 |
+
---------------------------------------------------------
|
100 |
+
|
101 |
+
|
102 |
+
Release Date: July 26, 2018
|
103 |
+
---------------------------
|
104 |
+
|
105 |
+
|
106 |
+
Important NoticeRead before downloading, installing,
|
107 |
+
copying or using the licensed software:
|
108 |
+
-------------------------------------------------------
|
109 |
+
|
110 |
+
This license agreement, including exhibits attached
|
111 |
+
("Agreement”) is a legal agreement between you and NVIDIA
|
112 |
+
Corporation ("NVIDIA") and governs your use of a NVIDIA
|
113 |
+
software development kit (“SDK”).
|
114 |
+
|
115 |
+
Each SDK has its own set of software and materials, but here
|
116 |
+
is a description of the types of items that may be included in
|
117 |
+
a SDK: source code, header files, APIs, data sets and assets
|
118 |
+
(examples include images, textures, models, scenes, videos,
|
119 |
+
native API input/output files), binary software, sample code,
|
120 |
+
libraries, utility programs, programming code and
|
121 |
+
documentation.
|
122 |
+
|
123 |
+
This Agreement can be accepted only by an adult of legal age
|
124 |
+
of majority in the country in which the SDK is used.
|
125 |
+
|
126 |
+
If you are entering into this Agreement on behalf of a company
|
127 |
+
or other legal entity, you represent that you have the legal
|
128 |
+
authority to bind the entity to this Agreement, in which case
|
129 |
+
“you” will mean the entity you represent.
|
130 |
+
|
131 |
+
If you don’t have the required age or authority to accept
|
132 |
+
this Agreement, or if you don’t accept all the terms and
|
133 |
+
conditions of this Agreement, do not download, install or use
|
134 |
+
the SDK.
|
135 |
+
|
136 |
+
You agree to use the SDK only for purposes that are permitted
|
137 |
+
by (a) this Agreement, and (b) any applicable law, regulation
|
138 |
+
or generally accepted practices or guidelines in the relevant
|
139 |
+
jurisdictions.
|
140 |
+
|
141 |
+
|
142 |
+
1.1. License
|
143 |
+
|
144 |
+
|
145 |
+
1.1.1. License Grant
|
146 |
+
|
147 |
+
Subject to the terms of this Agreement, NVIDIA hereby grants
|
148 |
+
you a non-exclusive, non-transferable license, without the
|
149 |
+
right to sublicense (except as expressly provided in this
|
150 |
+
Agreement) to:
|
151 |
+
|
152 |
+
1. Install and use the SDK,
|
153 |
+
|
154 |
+
2. Modify and create derivative works of sample source code
|
155 |
+
delivered in the SDK, and
|
156 |
+
|
157 |
+
3. Distribute those portions of the SDK that are identified
|
158 |
+
in this Agreement as distributable, as incorporated in
|
159 |
+
object code format into a software application that meets
|
160 |
+
the distribution requirements indicated in this Agreement.
|
161 |
+
|
162 |
+
|
163 |
+
1.1.2. Distribution Requirements
|
164 |
+
|
165 |
+
These are the distribution requirements for you to exercise
|
166 |
+
the distribution grant:
|
167 |
+
|
168 |
+
1. Your application must have material additional
|
169 |
+
functionality, beyond the included portions of the SDK.
|
170 |
+
|
171 |
+
2. The distributable portions of the SDK shall only be
|
172 |
+
accessed by your application.
|
173 |
+
|
174 |
+
3. The following notice shall be included in modifications
|
175 |
+
and derivative works of sample source code distributed:
|
176 |
+
“This software contains source code provided by NVIDIA
|
177 |
+
Corporation.”
|
178 |
+
|
179 |
+
4. Unless a developer tool is identified in this Agreement
|
180 |
+
as distributable, it is delivered for your internal use
|
181 |
+
only.
|
182 |
+
|
183 |
+
5. The terms under which you distribute your application
|
184 |
+
must be consistent with the terms of this Agreement,
|
185 |
+
including (without limitation) terms relating to the
|
186 |
+
license grant and license restrictions and protection of
|
187 |
+
NVIDIA’s intellectual property rights. Additionally, you
|
188 |
+
agree that you will protect the privacy, security and
|
189 |
+
legal rights of your application users.
|
190 |
+
|
191 |
+
6. You agree to notify NVIDIA in writing of any known or
|
192 |
+
suspected distribution or use of the SDK not in compliance
|
193 |
+
with the requirements of this Agreement, and to enforce
|
194 |
+
the terms of your agreements with respect to distributed
|
195 |
+
SDK.
|
196 |
+
|
197 |
+
|
198 |
+
1.1.3. Authorized Users
|
199 |
+
|
200 |
+
You may allow employees and contractors of your entity or of
|
201 |
+
your subsidiary(ies) to access and use the SDK from your
|
202 |
+
secure network to perform work on your behalf.
|
203 |
+
|
204 |
+
If you are an academic institution you may allow users
|
205 |
+
enrolled or employed by the academic institution to access and
|
206 |
+
use the SDK from your secure network.
|
207 |
+
|
208 |
+
You are responsible for the compliance with the terms of this
|
209 |
+
Agreement by your authorized users. If you become aware that
|
210 |
+
your authorized users didn’t follow the terms of this
|
211 |
+
Agreement, you agree to take reasonable steps to resolve the
|
212 |
+
non-compliance and prevent new occurrences.
|
213 |
+
|
214 |
+
|
215 |
+
1.1.4. Pre-Release SDK
|
216 |
+
|
217 |
+
The SDK versions identified as alpha, beta, preview or
|
218 |
+
otherwise as pre-release, may not be fully functional, may
|
219 |
+
contain errors or design flaws, and may have reduced or
|
220 |
+
different security, privacy, accessibility, availability, and
|
221 |
+
reliability standards relative to commercial versions of
|
222 |
+
NVIDIA software and materials. Use of a pre-release SDK may
|
223 |
+
result in unexpected results, loss of data, project delays or
|
224 |
+
other unpredictable damage or loss.
|
225 |
+
|
226 |
+
You may use a pre-release SDK at your own risk, understanding
|
227 |
+
that pre-release SDKs are not intended for use in production
|
228 |
+
or business-critical systems.
|
229 |
+
|
230 |
+
NVIDIA may choose not to make available a commercial version
|
231 |
+
of any pre-release SDK. NVIDIA may also choose to abandon
|
232 |
+
development and terminate the availability of a pre-release
|
233 |
+
SDK at any time without liability.
|
234 |
+
|
235 |
+
|
236 |
+
1.1.5. Updates
|
237 |
+
|
238 |
+
NVIDIA may, at its option, make available patches, workarounds
|
239 |
+
or other updates to this SDK. Unless the updates are provided
|
240 |
+
with their separate governing terms, they are deemed part of
|
241 |
+
the SDK licensed to you as provided in this Agreement. You
|
242 |
+
agree that the form and content of the SDK that NVIDIA
|
243 |
+
provides may change without prior notice to you. While NVIDIA
|
244 |
+
generally maintains compatibility between versions, NVIDIA may
|
245 |
+
in some cases make changes that introduce incompatibilities in
|
246 |
+
future versions of the SDK.
|
247 |
+
|
248 |
+
|
249 |
+
1.1.6. Third Party Licenses
|
250 |
+
|
251 |
+
The SDK may come bundled with, or otherwise include or be
|
252 |
+
distributed with, third party software licensed by a NVIDIA
|
253 |
+
supplier and/or open source software provided under an open
|
254 |
+
source license. Use of third party software is subject to the
|
255 |
+
third-party license terms, or in the absence of third party
|
256 |
+
terms, the terms of this Agreement. Copyright to third party
|
257 |
+
software is held by the copyright holders indicated in the
|
258 |
+
third-party software or license.
|
259 |
+
|
260 |
+
|
261 |
+
1.1.7. Reservation of Rights
|
262 |
+
|
263 |
+
NVIDIA reserves all rights, title, and interest in and to the
|
264 |
+
SDK, not expressly granted to you under this Agreement.
|
265 |
+
|
266 |
+
|
267 |
+
1.2. Limitations
|
268 |
+
|
269 |
+
The following license limitations apply to your use of the
|
270 |
+
SDK:
|
271 |
+
|
272 |
+
1. You may not reverse engineer, decompile or disassemble,
|
273 |
+
or remove copyright or other proprietary notices from any
|
274 |
+
portion of the SDK or copies of the SDK.
|
275 |
+
|
276 |
+
2. Except as expressly provided in this Agreement, you may
|
277 |
+
not copy, sell, rent, sublicense, transfer, distribute,
|
278 |
+
modify, or create derivative works of any portion of the
|
279 |
+
SDK. For clarity, you may not distribute or sublicense the
|
280 |
+
SDK as a stand-alone product.
|
281 |
+
|
282 |
+
3. Unless you have an agreement with NVIDIA for this
|
283 |
+
purpose, you may not indicate that an application created
|
284 |
+
with the SDK is sponsored or endorsed by NVIDIA.
|
285 |
+
|
286 |
+
4. You may not bypass, disable, or circumvent any
|
287 |
+
encryption, security, digital rights management or
|
288 |
+
authentication mechanism in the SDK.
|
289 |
+
|
290 |
+
5. You may not use the SDK in any manner that would cause it
|
291 |
+
to become subject to an open source software license. As
|
292 |
+
examples, licenses that require as a condition of use,
|
293 |
+
modification, and/or distribution that the SDK be:
|
294 |
+
|
295 |
+
a. Disclosed or distributed in source code form;
|
296 |
+
|
297 |
+
b. Licensed for the purpose of making derivative works;
|
298 |
+
or
|
299 |
+
|
300 |
+
c. Redistributable at no charge.
|
301 |
+
|
302 |
+
6. Unless you have an agreement with NVIDIA for this
|
303 |
+
purpose, you may not use the SDK with any system or
|
304 |
+
application where the use or failure of the system or
|
305 |
+
application can reasonably be expected to threaten or
|
306 |
+
result in personal injury, death, or catastrophic loss.
|
307 |
+
Examples include use in avionics, navigation, military,
|
308 |
+
medical, life support or other life critical applications.
|
309 |
+
NVIDIA does not design, test or manufacture the SDK for
|
310 |
+
these critical uses and NVIDIA shall not be liable to you
|
311 |
+
or any third party, in whole or in part, for any claims or
|
312 |
+
damages arising from such uses.
|
313 |
+
|
314 |
+
7. You agree to defend, indemnify and hold harmless NVIDIA
|
315 |
+
and its affiliates, and their respective employees,
|
316 |
+
contractors, agents, officers and directors, from and
|
317 |
+
against any and all claims, damages, obligations, losses,
|
318 |
+
liabilities, costs or debt, fines, restitutions and
|
319 |
+
expenses (including but not limited to attorney’s fees
|
320 |
+
and costs incident to establishing the right of
|
321 |
+
indemnification) arising out of or related to your use of
|
322 |
+
the SDK outside of the scope of this Agreement, or not in
|
323 |
+
compliance with its terms.
|
324 |
+
|
325 |
+
|
326 |
+
1.3. Ownership
|
327 |
+
|
328 |
+
1. NVIDIA or its licensors hold all rights, title and
|
329 |
+
interest in and to the SDK and its modifications and
|
330 |
+
derivative works, including their respective intellectual
|
331 |
+
property rights, subject to your rights described in this
|
332 |
+
section. This SDK may include software and materials from
|
333 |
+
NVIDIA’s licensors, and these licensors are intended
|
334 |
+
third party beneficiaries that may enforce this Agreement
|
335 |
+
with respect to their intellectual property rights.
|
336 |
+
|
337 |
+
2. You hold all rights, title and interest in and to your
|
338 |
+
applications and your derivative works of the sample
|
339 |
+
source code delivered in the SDK, including their
|
340 |
+
respective intellectual property rights, subject to
|
341 |
+
NVIDIA’s rights described in this section.
|
342 |
+
|
343 |
+
3. You may, but don’t have to, provide to NVIDIA
|
344 |
+
suggestions, feature requests or other feedback regarding
|
345 |
+
the SDK, including possible enhancements or modifications
|
346 |
+
to the SDK. For any feedback that you voluntarily provide,
|
347 |
+
you hereby grant NVIDIA and its affiliates a perpetual,
|
348 |
+
non-exclusive, worldwide, irrevocable license to use,
|
349 |
+
reproduce, modify, license, sublicense (through multiple
|
350 |
+
tiers of sublicensees), and distribute (through multiple
|
351 |
+
tiers of distributors) it without the payment of any
|
352 |
+
royalties or fees to you. NVIDIA will use feedback at its
|
353 |
+
choice. NVIDIA is constantly looking for ways to improve
|
354 |
+
its products, so you may send feedback to NVIDIA through
|
355 |
+
the developer portal at https://developer.nvidia.com.
|
356 |
+
|
357 |
+
|
358 |
+
1.4. No Warranties
|
359 |
+
|
360 |
+
THE SDK IS PROVIDED BY NVIDIA “AS IS” AND “WITH ALL
|
361 |
+
FAULTS.” TO THE MAXIMUM EXTENT PERMITTED BY LAW, NVIDIA AND
|
362 |
+
ITS AFFILIATES EXPRESSLY DISCLAIM ALL WARRANTIES OF ANY KIND
|
363 |
+
OR NATURE, WHETHER EXPRESS, IMPLIED OR STATUTORY, INCLUDING,
|
364 |
+
BUT NOT LIMITED TO, ANY WARRANTIES OF MERCHANTABILITY, FITNESS
|
365 |
+
FOR A PARTICULAR PURPOSE, TITLE, NON-INFRINGEMENT, OR THE
|
366 |
+
ABSENCE OF ANY DEFECTS THEREIN, WHETHER LATENT OR PATENT. NO
|
367 |
+
WARRANTY IS MADE ON THE BASIS OF TRADE USAGE, COURSE OF
|
368 |
+
DEALING OR COURSE OF TRADE.
|
369 |
+
|
370 |
+
|
371 |
+
1.5. Limitation of Liability
|
372 |
+
|
373 |
+
TO THE MAXIMUM EXTENT PERMITTED BY LAW, NVIDIA AND ITS
|
374 |
+
AFFILIATES SHALL NOT BE LIABLE FOR ANY SPECIAL, INCIDENTAL,
|
375 |
+
PUNITIVE OR CONSEQUENTIAL DAMAGES, OR ANY LOST PROFITS, LOSS
|
376 |
+
OF USE, LOSS OF DATA OR LOSS OF GOODWILL, OR THE COSTS OF
|
377 |
+
PROCURING SUBSTITUTE PRODUCTS, ARISING OUT OF OR IN CONNECTION
|
378 |
+
WITH THIS AGREEMENT OR THE USE OR PERFORMANCE OF THE SDK,
|
379 |
+
WHETHER SUCH LIABILITY ARISES FROM ANY CLAIM BASED UPON BREACH
|
380 |
+
OF CONTRACT, BREACH OF WARRANTY, TORT (INCLUDING NEGLIGENCE),
|
381 |
+
PRODUCT LIABILITY OR ANY OTHER CAUSE OF ACTION OR THEORY OF
|
382 |
+
LIABILITY. IN NO EVENT WILL NVIDIA’S AND ITS AFFILIATES
|
383 |
+
TOTAL CUMULATIVE LIABILITY UNDER OR ARISING OUT OF THIS
|
384 |
+
AGREEMENT EXCEED US$10.00. THE NATURE OF THE LIABILITY OR THE
|
385 |
+
NUMBER OF CLAIMS OR SUITS SHALL NOT ENLARGE OR EXTEND THIS
|
386 |
+
LIMIT.
|
387 |
+
|
388 |
+
These exclusions and limitations of liability shall apply
|
389 |
+
regardless if NVIDIA or its affiliates have been advised of
|
390 |
+
the possibility of such damages, and regardless of whether a
|
391 |
+
remedy fails its essential purpose. These exclusions and
|
392 |
+
limitations of liability form an essential basis of the
|
393 |
+
bargain between the parties, and, absent any of these
|
394 |
+
exclusions or limitations of liability, the provisions of this
|
395 |
+
Agreement, including, without limitation, the economic terms,
|
396 |
+
would be substantially different.
|
397 |
+
|
398 |
+
|
399 |
+
1.6. Termination
|
400 |
+
|
401 |
+
1. This Agreement will continue to apply until terminated by
|
402 |
+
either you or NVIDIA as described below.
|
403 |
+
|
404 |
+
2. If you want to terminate this Agreement, you may do so by
|
405 |
+
stopping to use the SDK.
|
406 |
+
|
407 |
+
3. NVIDIA may, at any time, terminate this Agreement if:
|
408 |
+
|
409 |
+
a. (i) you fail to comply with any term of this
|
410 |
+
Agreement and the non-compliance is not fixed within
|
411 |
+
thirty (30) days following notice from NVIDIA (or
|
412 |
+
immediately if you violate NVIDIA’s intellectual
|
413 |
+
property rights);
|
414 |
+
|
415 |
+
b. (ii) you commence or participate in any legal
|
416 |
+
proceeding against NVIDIA with respect to the SDK; or
|
417 |
+
|
418 |
+
c. (iii) NVIDIA decides to no longer provide the SDK in
|
419 |
+
a country or, in NVIDIA’s sole discretion, the
|
420 |
+
continued use of it is no longer commercially viable.
|
421 |
+
|
422 |
+
4. Upon any termination of this Agreement, you agree to
|
423 |
+
promptly discontinue use of the SDK and destroy all copies
|
424 |
+
in your possession or control. Your prior distributions in
|
425 |
+
accordance with this Agreement are not affected by the
|
426 |
+
termination of this Agreement. Upon written request, you
|
427 |
+
will certify in writing that you have complied with your
|
428 |
+
commitments under this section. Upon any termination of
|
429 |
+
this Agreement all provisions survive except for the
|
430 |
+
license grant provisions.
|
431 |
+
|
432 |
+
|
433 |
+
1.7. General
|
434 |
+
|
435 |
+
If you wish to assign this Agreement or your rights and
|
436 |
+
obligations, including by merger, consolidation, dissolution
|
437 |
+
or operation of law, contact NVIDIA to ask for permission. Any
|
438 |
+
attempted assignment not approved by NVIDIA in writing shall
|
439 |
+
be void and of no effect. NVIDIA may assign, delegate or
|
440 |
+
transfer this Agreement and its rights and obligations, and if
|
441 |
+
to a non-affiliate you will be notified.
|
442 |
+
|
443 |
+
You agree to cooperate with NVIDIA and provide reasonably
|
444 |
+
requested information to verify your compliance with this
|
445 |
+
Agreement.
|
446 |
+
|
447 |
+
This Agreement will be governed in all respects by the laws of
|
448 |
+
the United States and of the State of Delaware as those laws
|
449 |
+
are applied to contracts entered into and performed entirely
|
450 |
+
within Delaware by Delaware residents, without regard to the
|
451 |
+
conflicts of laws principles. The United Nations Convention on
|
452 |
+
Contracts for the International Sale of Goods is specifically
|
453 |
+
disclaimed. You agree to all terms of this Agreement in the
|
454 |
+
English language.
|
455 |
+
|
456 |
+
The state or federal courts residing in Santa Clara County,
|
457 |
+
California shall have exclusive jurisdiction over any dispute
|
458 |
+
or claim arising out of this Agreement. Notwithstanding this,
|
459 |
+
you agree that NVIDIA shall still be allowed to apply for
|
460 |
+
injunctive remedies or an equivalent type of urgent legal
|
461 |
+
relief in any jurisdiction.
|
462 |
+
|
463 |
+
If any court of competent jurisdiction determines that any
|
464 |
+
provision of this Agreement is illegal, invalid or
|
465 |
+
unenforceable, such provision will be construed as limited to
|
466 |
+
the extent necessary to be consistent with and fully
|
467 |
+
enforceable under the law and the remaining provisions will
|
468 |
+
remain in full force and effect. Unless otherwise specified,
|
469 |
+
remedies are cumulative.
|
470 |
+
|
471 |
+
Each party acknowledges and agrees that the other is an
|
472 |
+
independent contractor in the performance of this Agreement.
|
473 |
+
|
474 |
+
The SDK has been developed entirely at private expense and is
|
475 |
+
“commercial items” consisting of “commercial computer
|
476 |
+
software” and “commercial computer software
|
477 |
+
documentation” provided with RESTRICTED RIGHTS. Use,
|
478 |
+
duplication or disclosure by the U.S. Government or a U.S.
|
479 |
+
Government subcontractor is subject to the restrictions in
|
480 |
+
this Agreement pursuant to DFARS 227.7202-3(a) or as set forth
|
481 |
+
in subparagraphs (c)(1) and (2) of the Commercial Computer
|
482 |
+
Software - Restricted Rights clause at FAR 52.227-19, as
|
483 |
+
applicable. Contractor/manufacturer is NVIDIA, 2788 San Tomas
|
484 |
+
Expressway, Santa Clara, CA 95051.
|
485 |
+
|
486 |
+
The SDK is subject to United States export laws and
|
487 |
+
regulations. You agree that you will not ship, transfer or
|
488 |
+
export the SDK into any country, or use the SDK in any manner,
|
489 |
+
prohibited by the United States Bureau of Industry and
|
490 |
+
Security or economic sanctions regulations administered by the
|
491 |
+
U.S. Department of Treasury’s Office of Foreign Assets
|
492 |
+
Control (OFAC), or any applicable export laws, restrictions or
|
493 |
+
regulations. These laws include restrictions on destinations,
|
494 |
+
end users and end use. By accepting this Agreement, you
|
495 |
+
confirm that you are not a resident or citizen of any country
|
496 |
+
currently embargoed by the U.S. and that you are not otherwise
|
497 |
+
prohibited from receiving the SDK.
|
498 |
+
|
499 |
+
Any notice delivered by NVIDIA to you under this Agreement
|
500 |
+
will be delivered via mail, email or fax. You agree that any
|
501 |
+
notices that NVIDIA sends you electronically will satisfy any
|
502 |
+
legal communication requirements. Please direct your legal
|
503 |
+
notices or other correspondence to NVIDIA Corporation, 2788
|
504 |
+
San Tomas Expressway, Santa Clara, California 95051, United
|
505 |
+
States of America, Attention: Legal Department.
|
506 |
+
|
507 |
+
This Agreement and any exhibits incorporated into this
|
508 |
+
Agreement constitute the entire agreement of the parties with
|
509 |
+
respect to the subject matter of this Agreement and supersede
|
510 |
+
all prior negotiations or documentation exchanged between the
|
511 |
+
parties relating to this SDK license. Any additional and/or
|
512 |
+
conflicting terms on documents issued by you are null, void,
|
513 |
+
and invalid. Any amendment or waiver under this Agreement
|
514 |
+
shall be in writing and signed by representatives of both
|
515 |
+
parties.
|
516 |
+
|
517 |
+
|
518 |
+
2. CUDA Toolkit Supplement to Software License Agreement for
|
519 |
+
NVIDIA Software Development Kits
|
520 |
+
------------------------------------------------------------
|
521 |
+
|
522 |
+
|
523 |
+
Release date: August 16, 2018
|
524 |
+
-----------------------------
|
525 |
+
|
526 |
+
The terms in this supplement govern your use of the NVIDIA
|
527 |
+
CUDA Toolkit SDK under the terms of your license agreement
|
528 |
+
(“Agreement”) as modified by this supplement. Capitalized
|
529 |
+
terms used but not defined below have the meaning assigned to
|
530 |
+
them in the Agreement.
|
531 |
+
|
532 |
+
This supplement is an exhibit to the Agreement and is
|
533 |
+
incorporated as an integral part of the Agreement. In the
|
534 |
+
event of conflict between the terms in this supplement and the
|
535 |
+
terms in the Agreement, the terms in this supplement govern.
|
536 |
+
|
537 |
+
|
538 |
+
2.1. License Scope
|
539 |
+
|
540 |
+
The SDK is licensed for you to develop applications only for
|
541 |
+
use in systems with NVIDIA GPUs.
|
542 |
+
|
543 |
+
|
544 |
+
2.2. Distribution
|
545 |
+
|
546 |
+
The portions of the SDK that are distributable under the
|
547 |
+
Agreement are listed in Attachment A.
|
548 |
+
|
549 |
+
|
550 |
+
2.3. Operating Systems
|
551 |
+
|
552 |
+
Those portions of the SDK designed exclusively for use on the
|
553 |
+
Linux or FreeBSD operating systems, or other operating systems
|
554 |
+
derived from the source code to these operating systems, may
|
555 |
+
be copied and redistributed for use in accordance with this
|
556 |
+
Agreement, provided that the object code files are not
|
557 |
+
modified in any way (except for unzipping of compressed
|
558 |
+
files).
|
559 |
+
|
560 |
+
|
561 |
+
2.4. Audio and Video Encoders and Decoders
|
562 |
+
|
563 |
+
You acknowledge and agree that it is your sole responsibility
|
564 |
+
to obtain any additional third-party licenses required to
|
565 |
+
make, have made, use, have used, sell, import, and offer for
|
566 |
+
sale your products or services that include or incorporate any
|
567 |
+
third-party software and content relating to audio and/or
|
568 |
+
video encoders and decoders from, including but not limited
|
569 |
+
to, Microsoft, Thomson, Fraunhofer IIS, Sisvel S.p.A.,
|
570 |
+
MPEG-LA, and Coding Technologies. NVIDIA does not grant to you
|
571 |
+
under this Agreement any necessary patent or other rights with
|
572 |
+
respect to any audio and/or video encoders and decoders.
|
573 |
+
|
574 |
+
|
575 |
+
2.5. Licensing
|
576 |
+
|
577 |
+
If the distribution terms in this Agreement are not suitable
|
578 |
+
for your organization, or for any questions regarding this
|
579 |
+
Agreement, please contact NVIDIA at
|
580 | |
581 |
+
|
582 |
+
|
583 |
+
2.6. Attachment A
|
584 |
+
|
585 |
+
The following portions of the SDK are distributable under the
|
586 |
+
Agreement:
|
587 |
+
|
588 |
+
Component
|
589 |
+
|
590 |
+
CUDA Runtime
|
591 |
+
|
592 |
+
Windows
|
593 |
+
|
594 |
+
cudart.dll, cudart_static.lib, cudadevrt.lib
|
595 |
+
|
596 |
+
Mac OSX
|
597 |
+
|
598 |
+
libcudart.dylib, libcudart_static.a, libcudadevrt.a
|
599 |
+
|
600 |
+
Linux
|
601 |
+
|
602 |
+
libcudart.so, libcudart_static.a, libcudadevrt.a
|
603 |
+
|
604 |
+
Android
|
605 |
+
|
606 |
+
libcudart.so, libcudart_static.a, libcudadevrt.a
|
607 |
+
|
608 |
+
Component
|
609 |
+
|
610 |
+
CUDA FFT Library
|
611 |
+
|
612 |
+
Windows
|
613 |
+
|
614 |
+
cufft.dll, cufftw.dll, cufft.lib, cufftw.lib
|
615 |
+
|
616 |
+
Mac OSX
|
617 |
+
|
618 |
+
libcufft.dylib, libcufft_static.a, libcufftw.dylib,
|
619 |
+
libcufftw_static.a
|
620 |
+
|
621 |
+
Linux
|
622 |
+
|
623 |
+
libcufft.so, libcufft_static.a, libcufftw.so,
|
624 |
+
libcufftw_static.a
|
625 |
+
|
626 |
+
Android
|
627 |
+
|
628 |
+
libcufft.so, libcufft_static.a, libcufftw.so,
|
629 |
+
libcufftw_static.a
|
630 |
+
|
631 |
+
Component
|
632 |
+
|
633 |
+
CUDA BLAS Library
|
634 |
+
|
635 |
+
Windows
|
636 |
+
|
637 |
+
cublas.dll, cublasLt.dll
|
638 |
+
|
639 |
+
Mac OSX
|
640 |
+
|
641 |
+
libcublas.dylib, libcublasLt.dylib, libcublas_static.a,
|
642 |
+
libcublasLt_static.a
|
643 |
+
|
644 |
+
Linux
|
645 |
+
|
646 |
+
libcublas.so, libcublasLt.so, libcublas_static.a,
|
647 |
+
libcublasLt_static.a
|
648 |
+
|
649 |
+
Android
|
650 |
+
|
651 |
+
libcublas.so, libcublasLt.so, libcublas_static.a,
|
652 |
+
libcublasLt_static.a
|
653 |
+
|
654 |
+
Component
|
655 |
+
|
656 |
+
NVIDIA "Drop-in" BLAS Library
|
657 |
+
|
658 |
+
Windows
|
659 |
+
|
660 |
+
nvblas.dll
|
661 |
+
|
662 |
+
Mac OSX
|
663 |
+
|
664 |
+
libnvblas.dylib
|
665 |
+
|
666 |
+
Linux
|
667 |
+
|
668 |
+
libnvblas.so
|
669 |
+
|
670 |
+
Component
|
671 |
+
|
672 |
+
CUDA Sparse Matrix Library
|
673 |
+
|
674 |
+
Windows
|
675 |
+
|
676 |
+
cusparse.dll, cusparse.lib
|
677 |
+
|
678 |
+
Mac OSX
|
679 |
+
|
680 |
+
libcusparse.dylib, libcusparse_static.a
|
681 |
+
|
682 |
+
Linux
|
683 |
+
|
684 |
+
libcusparse.so, libcusparse_static.a
|
685 |
+
|
686 |
+
Android
|
687 |
+
|
688 |
+
libcusparse.so, libcusparse_static.a
|
689 |
+
|
690 |
+
Component
|
691 |
+
|
692 |
+
CUDA Linear Solver Library
|
693 |
+
|
694 |
+
Windows
|
695 |
+
|
696 |
+
cusolver.dll, cusolver.lib
|
697 |
+
|
698 |
+
Mac OSX
|
699 |
+
|
700 |
+
libcusolver.dylib, libcusolver_static.a
|
701 |
+
|
702 |
+
Linux
|
703 |
+
|
704 |
+
libcusolver.so, libcusolver_static.a
|
705 |
+
|
706 |
+
Android
|
707 |
+
|
708 |
+
libcusolver.so, libcusolver_static.a
|
709 |
+
|
710 |
+
Component
|
711 |
+
|
712 |
+
CUDA Random Number Generation Library
|
713 |
+
|
714 |
+
Windows
|
715 |
+
|
716 |
+
curand.dll, curand.lib
|
717 |
+
|
718 |
+
Mac OSX
|
719 |
+
|
720 |
+
libcurand.dylib, libcurand_static.a
|
721 |
+
|
722 |
+
Linux
|
723 |
+
|
724 |
+
libcurand.so, libcurand_static.a
|
725 |
+
|
726 |
+
Android
|
727 |
+
|
728 |
+
libcurand.so, libcurand_static.a
|
729 |
+
|
730 |
+
Component
|
731 |
+
|
732 |
+
CUDA Accelerated Graph Library
|
733 |
+
|
734 |
+
Component
|
735 |
+
|
736 |
+
NVIDIA Performance Primitives Library
|
737 |
+
|
738 |
+
Windows
|
739 |
+
|
740 |
+
nppc.dll, nppc.lib, nppial.dll, nppial.lib, nppicc.dll,
|
741 |
+
nppicc.lib, nppicom.dll, nppicom.lib, nppidei.dll,
|
742 |
+
nppidei.lib, nppif.dll, nppif.lib, nppig.dll, nppig.lib,
|
743 |
+
nppim.dll, nppim.lib, nppist.dll, nppist.lib, nppisu.dll,
|
744 |
+
nppisu.lib, nppitc.dll, nppitc.lib, npps.dll, npps.lib
|
745 |
+
|
746 |
+
Mac OSX
|
747 |
+
|
748 |
+
libnppc.dylib, libnppc_static.a, libnppial.dylib,
|
749 |
+
libnppial_static.a, libnppicc.dylib, libnppicc_static.a,
|
750 |
+
libnppicom.dylib, libnppicom_static.a, libnppidei.dylib,
|
751 |
+
libnppidei_static.a, libnppif.dylib, libnppif_static.a,
|
752 |
+
libnppig.dylib, libnppig_static.a, libnppim.dylib,
|
753 |
+
libnppisu_static.a, libnppitc.dylib, libnppitc_static.a,
|
754 |
+
libnpps.dylib, libnpps_static.a
|
755 |
+
|
756 |
+
Linux
|
757 |
+
|
758 |
+
libnppc.so, libnppc_static.a, libnppial.so,
|
759 |
+
libnppial_static.a, libnppicc.so, libnppicc_static.a,
|
760 |
+
libnppicom.so, libnppicom_static.a, libnppidei.so,
|
761 |
+
libnppidei_static.a, libnppif.so, libnppif_static.a
|
762 |
+
libnppig.so, libnppig_static.a, libnppim.so,
|
763 |
+
libnppim_static.a, libnppist.so, libnppist_static.a,
|
764 |
+
libnppisu.so, libnppisu_static.a, libnppitc.so
|
765 |
+
libnppitc_static.a, libnpps.so, libnpps_static.a
|
766 |
+
|
767 |
+
Android
|
768 |
+
|
769 |
+
libnppc.so, libnppc_static.a, libnppial.so,
|
770 |
+
libnppial_static.a, libnppicc.so, libnppicc_static.a,
|
771 |
+
libnppicom.so, libnppicom_static.a, libnppidei.so,
|
772 |
+
libnppidei_static.a, libnppif.so, libnppif_static.a
|
773 |
+
libnppig.so, libnppig_static.a, libnppim.so,
|
774 |
+
libnppim_static.a, libnppist.so, libnppist_static.a,
|
775 |
+
libnppisu.so, libnppisu_static.a, libnppitc.so
|
776 |
+
libnppitc_static.a, libnpps.so, libnpps_static.a
|
777 |
+
|
778 |
+
Component
|
779 |
+
|
780 |
+
NVIDIA JPEG Library
|
781 |
+
|
782 |
+
Linux
|
783 |
+
|
784 |
+
libnvjpeg.so, libnvjpeg_static.a
|
785 |
+
|
786 |
+
Component
|
787 |
+
|
788 |
+
Internal common library required for statically linking to
|
789 |
+
cuBLAS, cuSPARSE, cuFFT, cuRAND, nvJPEG and NPP
|
790 |
+
|
791 |
+
Mac OSX
|
792 |
+
|
793 |
+
libculibos.a
|
794 |
+
|
795 |
+
Linux
|
796 |
+
|
797 |
+
libculibos.a
|
798 |
+
|
799 |
+
Component
|
800 |
+
|
801 |
+
NVIDIA Runtime Compilation Library and Header
|
802 |
+
|
803 |
+
All
|
804 |
+
|
805 |
+
nvrtc.h
|
806 |
+
|
807 |
+
Windows
|
808 |
+
|
809 |
+
nvrtc.dll, nvrtc-builtins.dll
|
810 |
+
|
811 |
+
Mac OSX
|
812 |
+
|
813 |
+
libnvrtc.dylib, libnvrtc-builtins.dylib
|
814 |
+
|
815 |
+
Linux
|
816 |
+
|
817 |
+
libnvrtc.so, libnvrtc-builtins.so
|
818 |
+
|
819 |
+
Component
|
820 |
+
|
821 |
+
NVIDIA Optimizing Compiler Library
|
822 |
+
|
823 |
+
Windows
|
824 |
+
|
825 |
+
nvvm.dll
|
826 |
+
|
827 |
+
Mac OSX
|
828 |
+
|
829 |
+
libnvvm.dylib
|
830 |
+
|
831 |
+
Linux
|
832 |
+
|
833 |
+
libnvvm.so
|
834 |
+
|
835 |
+
Component
|
836 |
+
|
837 |
+
NVIDIA Common Device Math Functions Library
|
838 |
+
|
839 |
+
Windows
|
840 |
+
|
841 |
+
libdevice.10.bc
|
842 |
+
|
843 |
+
Mac OSX
|
844 |
+
|
845 |
+
libdevice.10.bc
|
846 |
+
|
847 |
+
Linux
|
848 |
+
|
849 |
+
libdevice.10.bc
|
850 |
+
|
851 |
+
Component
|
852 |
+
|
853 |
+
CUDA Occupancy Calculation Header Library
|
854 |
+
|
855 |
+
All
|
856 |
+
|
857 |
+
cuda_occupancy.h
|
858 |
+
|
859 |
+
Component
|
860 |
+
|
861 |
+
CUDA Half Precision Headers
|
862 |
+
|
863 |
+
All
|
864 |
+
|
865 |
+
cuda_fp16.h, cuda_fp16.hpp
|
866 |
+
|
867 |
+
Component
|
868 |
+
|
869 |
+
CUDA Profiling Tools Interface (CUPTI) Library
|
870 |
+
|
871 |
+
Windows
|
872 |
+
|
873 |
+
cupti.dll
|
874 |
+
|
875 |
+
Mac OSX
|
876 |
+
|
877 |
+
libcupti.dylib
|
878 |
+
|
879 |
+
Linux
|
880 |
+
|
881 |
+
libcupti.so
|
882 |
+
|
883 |
+
Component
|
884 |
+
|
885 |
+
NVIDIA Tools Extension Library
|
886 |
+
|
887 |
+
Windows
|
888 |
+
|
889 |
+
nvToolsExt.dll, nvToolsExt.lib
|
890 |
+
|
891 |
+
Mac OSX
|
892 |
+
|
893 |
+
libnvToolsExt.dylib
|
894 |
+
|
895 |
+
Linux
|
896 |
+
|
897 |
+
libnvToolsExt.so
|
898 |
+
|
899 |
+
Component
|
900 |
+
|
901 |
+
NVIDIA CUDA Driver Libraries
|
902 |
+
|
903 |
+
Linux
|
904 |
+
|
905 |
+
libcuda.so, libnvidia-fatbinaryloader.so,
|
906 |
+
libnvidia-ptxjitcompiler.so
|
907 |
+
|
908 |
+
The NVIDIA CUDA Driver Libraries are only distributable in
|
909 |
+
applications that meet this criteria:
|
910 |
+
|
911 |
+
1. The application was developed starting from a NVIDIA CUDA
|
912 |
+
container obtained from Docker Hub or the NVIDIA GPU
|
913 |
+
Cloud, and
|
914 |
+
|
915 |
+
2. The resulting application is packaged as a Docker
|
916 |
+
container and distributed to users on Docker Hub or the
|
917 |
+
NVIDIA GPU Cloud only.
|
918 |
+
|
919 |
+
|
920 |
+
2.7. Attachment B
|
921 |
+
|
922 |
+
|
923 |
+
Additional Licensing Obligations
|
924 |
+
|
925 |
+
The following third party components included in the SOFTWARE
|
926 |
+
are licensed to Licensee pursuant to the following terms and
|
927 |
+
conditions:
|
928 |
+
|
929 |
+
1. Licensee's use of the GDB third party component is
|
930 |
+
subject to the terms and conditions of GNU GPL v3:
|
931 |
+
|
932 |
+
This product includes copyrighted third-party software licensed
|
933 |
+
under the terms of the GNU General Public License v3 ("GPL v3").
|
934 |
+
All third-party software packages are copyright by their respective
|
935 |
+
authors. GPL v3 terms and conditions are hereby incorporated into
|
936 |
+
the Agreement by this reference: http://www.gnu.org/licenses/gpl.txt
|
937 |
+
|
938 |
+
Consistent with these licensing requirements, the software
|
939 |
+
listed below is provided under the terms of the specified
|
940 |
+
open source software licenses. To obtain source code for
|
941 |
+
software provided under licenses that require
|
942 |
+
redistribution of source code, including the GNU General
|
943 |
+
Public License (GPL) and GNU Lesser General Public License
|
944 |
+
(LGPL), contact [email protected]. This offer is
|
945 |
+
valid for a period of three (3) years from the date of the
|
946 |
+
distribution of this product by NVIDIA CORPORATION.
|
947 |
+
|
948 |
+
Component License
|
949 |
+
CUDA-GDB GPL v3
|
950 |
+
|
951 |
+
2. Licensee represents and warrants that any and all third
|
952 |
+
party licensing and/or royalty payment obligations in
|
953 |
+
connection with Licensee's use of the H.264 video codecs
|
954 |
+
are solely the responsibility of Licensee.
|
955 |
+
|
956 |
+
3. Licensee's use of the Thrust library is subject to the
|
957 |
+
terms and conditions of the Apache License Version 2.0.
|
958 |
+
All third-party software packages are copyright by their
|
959 |
+
respective authors. Apache License Version 2.0 terms and
|
960 |
+
conditions are hereby incorporated into the Agreement by
|
961 |
+
this reference.
|
962 |
+
http://www.apache.org/licenses/LICENSE-2.0.html
|
963 |
+
|
964 |
+
In addition, Licensee acknowledges the following notice:
|
965 |
+
Thrust includes source code from the Boost Iterator,
|
966 |
+
Tuple, System, and Random Number libraries.
|
967 |
+
|
968 |
+
Boost Software License - Version 1.0 - August 17th, 2003
|
969 |
+
. . . .
|
970 |
+
|
971 |
+
Permission is hereby granted, free of charge, to any person or
|
972 |
+
organization obtaining a copy of the software and accompanying
|
973 |
+
documentation covered by this license (the "Software") to use,
|
974 |
+
reproduce, display, distribute, execute, and transmit the Software,
|
975 |
+
and to prepare derivative works of the Software, and to permit
|
976 |
+
third-parties to whom the Software is furnished to do so, all
|
977 |
+
subject to the following:
|
978 |
+
|
979 |
+
The copyright notices in the Software and this entire statement,
|
980 |
+
including the above license grant, this restriction and the following
|
981 |
+
disclaimer, must be included in all copies of the Software, in whole
|
982 |
+
or in part, and all derivative works of the Software, unless such
|
983 |
+
copies or derivative works are solely in the form of machine-executable
|
984 |
+
object code generated by a source language processor.
|
985 |
+
|
986 |
+
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
|
987 |
+
EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
|
988 |
+
MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE, TITLE AND
|
989 |
+
NON-INFRINGEMENT. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR
|
990 |
+
ANYONE DISTRIBUTING THE SOFTWARE BE LIABLE FOR ANY DAMAGES OR
|
991 |
+
OTHER LIABILITY, WHETHER IN CONTRACT, TORT OR OTHERWISE, ARISING
|
992 |
+
FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
|
993 |
+
OTHER DEALINGS IN THE SOFTWARE.
|
994 |
+
|
995 |
+
4. Licensee's use of the LLVM third party component is
|
996 |
+
subject to the following terms and conditions:
|
997 |
+
|
998 |
+
======================================================
|
999 |
+
LLVM Release License
|
1000 |
+
======================================================
|
1001 |
+
University of Illinois/NCSA
|
1002 |
+
Open Source License
|
1003 |
+
|
1004 |
+
Copyright (c) 2003-2010 University of Illinois at Urbana-Champaign.
|
1005 |
+
All rights reserved.
|
1006 |
+
|
1007 |
+
Developed by:
|
1008 |
+
|
1009 |
+
LLVM Team
|
1010 |
+
|
1011 |
+
University of Illinois at Urbana-Champaign
|
1012 |
+
|
1013 |
+
http://llvm.org
|
1014 |
+
|
1015 |
+
Permission is hereby granted, free of charge, to any person obtaining a copy
|
1016 |
+
of this software and associated documentation files (the "Software"), to
|
1017 |
+
deal with the Software without restriction, including without limitation the
|
1018 |
+
rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
|
1019 |
+
sell copies of the Software, and to permit persons to whom the Software is
|
1020 |
+
furnished to do so, subject to the following conditions:
|
1021 |
+
|
1022 |
+
* Redistributions of source code must retain the above copyright notice,
|
1023 |
+
this list of conditions and the following disclaimers.
|
1024 |
+
|
1025 |
+
* Redistributions in binary form must reproduce the above copyright
|
1026 |
+
notice, this list of conditions and the following disclaimers in the
|
1027 |
+
documentation and/or other materials provided with the distribution.
|
1028 |
+
|
1029 |
+
* Neither the names of the LLVM Team, University of Illinois at Urbana-
|
1030 |
+
Champaign, nor the names of its contributors may be used to endorse or
|
1031 |
+
promote products derived from this Software without specific prior
|
1032 |
+
written permission.
|
1033 |
+
|
1034 |
+
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
1035 |
+
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
1036 |
+
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
|
1037 |
+
THE CONTRIBUTORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
|
1038 |
+
OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
|
1039 |
+
ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
|
1040 |
+
DEALINGS WITH THE SOFTWARE.
|
1041 |
+
|
1042 |
+
5. Licensee's use (e.g. nvprof) of the PCRE third party
|
1043 |
+
component is subject to the following terms and
|
1044 |
+
conditions:
|
1045 |
+
|
1046 |
+
------------
|
1047 |
+
PCRE LICENCE
|
1048 |
+
------------
|
1049 |
+
PCRE is a library of functions to support regular expressions whose syntax
|
1050 |
+
and semantics are as close as possible to those of the Perl 5 language.
|
1051 |
+
Release 8 of PCRE is distributed under the terms of the "BSD" licence, as
|
1052 |
+
specified below. The documentation for PCRE, supplied in the "doc"
|
1053 |
+
directory, is distributed under the same terms as the software itself. The
|
1054 |
+
basic library functions are written in C and are freestanding. Also
|
1055 |
+
included in the distribution is a set of C++ wrapper functions, and a just-
|
1056 |
+
in-time compiler that can be used to optimize pattern matching. These are
|
1057 |
+
both optional features that can be omitted when the library is built.
|
1058 |
+
|
1059 |
+
THE BASIC LIBRARY FUNCTIONS
|
1060 |
+
---------------------------
|
1061 |
+
Written by: Philip Hazel
|
1062 |
+
Email local part: ph10
|
1063 |
+
Email domain: cam.ac.uk
|
1064 |
+
University of Cambridge Computing Service,
|
1065 |
+
Cambridge, England.
|
1066 |
+
Copyright (c) 1997-2012 University of Cambridge
|
1067 |
+
All rights reserved.
|
1068 |
+
|
1069 |
+
PCRE JUST-IN-TIME COMPILATION SUPPORT
|
1070 |
+
-------------------------------------
|
1071 |
+
Written by: Zoltan Herczeg
|
1072 |
+
Email local part: hzmester
|
1073 |
+
Emain domain: freemail.hu
|
1074 |
+
Copyright(c) 2010-2012 Zoltan Herczeg
|
1075 |
+
All rights reserved.
|
1076 |
+
|
1077 |
+
STACK-LESS JUST-IN-TIME COMPILER
|
1078 |
+
--------------------------------
|
1079 |
+
Written by: Zoltan Herczeg
|
1080 |
+
Email local part: hzmester
|
1081 |
+
Emain domain: freemail.hu
|
1082 |
+
Copyright(c) 2009-2012 Zoltan Herczeg
|
1083 |
+
All rights reserved.
|
1084 |
+
|
1085 |
+
THE C++ WRAPPER FUNCTIONS
|
1086 |
+
-------------------------
|
1087 |
+
Contributed by: Google Inc.
|
1088 |
+
Copyright (c) 2007-2012, Google Inc.
|
1089 |
+
All rights reserved.
|
1090 |
+
|
1091 |
+
THE "BSD" LICENCE
|
1092 |
+
-----------------
|
1093 |
+
Redistribution and use in source and binary forms, with or without
|
1094 |
+
modification, are permitted provided that the following conditions are met:
|
1095 |
+
|
1096 |
+
* Redistributions of source code must retain the above copyright notice,
|
1097 |
+
this list of conditions and the following disclaimer.
|
1098 |
+
|
1099 |
+
* Redistributions in binary form must reproduce the above copyright
|
1100 |
+
notice, this list of conditions and the following disclaimer in the
|
1101 |
+
documentation and/or other materials provided with the distribution.
|
1102 |
+
|
1103 |
+
* Neither the name of the University of Cambridge nor the name of Google
|
1104 |
+
Inc. nor the names of their contributors may be used to endorse or
|
1105 |
+
promote products derived from this software without specific prior
|
1106 |
+
written permission.
|
1107 |
+
|
1108 |
+
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
|
1109 |
+
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
|
1110 |
+
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
|
1111 |
+
ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
|
1112 |
+
LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
|
1113 |
+
CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
|
1114 |
+
SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
|
1115 |
+
INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
|
1116 |
+
CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
|
1117 |
+
ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
|
1118 |
+
POSSIBILITY OF SUCH DAMAGE.
|
1119 |
+
|
1120 |
+
6. Some of the cuBLAS library routines were written by or
|
1121 |
+
derived from code written by Vasily Volkov and are subject
|
1122 |
+
to the Modified Berkeley Software Distribution License as
|
1123 |
+
follows:
|
1124 |
+
|
1125 |
+
Copyright (c) 2007-2009, Regents of the University of California
|
1126 |
+
|
1127 |
+
All rights reserved.
|
1128 |
+
|
1129 |
+
Redistribution and use in source and binary forms, with or without
|
1130 |
+
modification, are permitted provided that the following conditions are
|
1131 |
+
met:
|
1132 |
+
* Redistributions of source code must retain the above copyright
|
1133 |
+
notice, this list of conditions and the following disclaimer.
|
1134 |
+
* Redistributions in binary form must reproduce the above
|
1135 |
+
copyright notice, this list of conditions and the following
|
1136 |
+
disclaimer in the documentation and/or other materials provided
|
1137 |
+
with the distribution.
|
1138 |
+
* Neither the name of the University of California, Berkeley nor
|
1139 |
+
the names of its contributors may be used to endorse or promote
|
1140 |
+
products derived from this software without specific prior
|
1141 |
+
written permission.
|
1142 |
+
|
1143 |
+
THIS SOFTWARE IS PROVIDED BY THE AUTHOR "AS IS" AND ANY EXPRESS OR
|
1144 |
+
IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
|
1145 |
+
WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
|
1146 |
+
DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT,
|
1147 |
+
INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
|
1148 |
+
(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
|
1149 |
+
SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
|
1150 |
+
HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
|
1151 |
+
STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
|
1152 |
+
IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
|
1153 |
+
POSSIBILITY OF SUCH DAMAGE.
|
1154 |
+
|
1155 |
+
7. Some of the cuBLAS library routines were written by or
|
1156 |
+
derived from code written by Davide Barbieri and are
|
1157 |
+
subject to the Modified Berkeley Software Distribution
|
1158 |
+
License as follows:
|
1159 |
+
|
1160 |
+
Copyright (c) 2008-2009 Davide Barbieri @ University of Rome Tor Vergata.
|
1161 |
+
|
1162 |
+
All rights reserved.
|
1163 |
+
|
1164 |
+
Redistribution and use in source and binary forms, with or without
|
1165 |
+
modification, are permitted provided that the following conditions are
|
1166 |
+
met:
|
1167 |
+
* Redistributions of source code must retain the above copyright
|
1168 |
+
notice, this list of conditions and the following disclaimer.
|
1169 |
+
* Redistributions in binary form must reproduce the above
|
1170 |
+
copyright notice, this list of conditions and the following
|
1171 |
+
disclaimer in the documentation and/or other materials provided
|
1172 |
+
with the distribution.
|
1173 |
+
* The name of the author may not be used to endorse or promote
|
1174 |
+
products derived from this software without specific prior
|
1175 |
+
written permission.
|
1176 |
+
|
1177 |
+
THIS SOFTWARE IS PROVIDED BY THE AUTHOR "AS IS" AND ANY EXPRESS OR
|
1178 |
+
IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
|
1179 |
+
WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
|
1180 |
+
DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT,
|
1181 |
+
INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
|
1182 |
+
(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
|
1183 |
+
SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
|
1184 |
+
HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
|
1185 |
+
STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
|
1186 |
+
IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
|
1187 |
+
POSSIBILITY OF SUCH DAMAGE.
|
1188 |
+
|
1189 |
+
8. Some of the cuBLAS library routines were derived from
|
1190 |
+
code developed by the University of Tennessee and are
|
1191 |
+
subject to the Modified Berkeley Software Distribution
|
1192 |
+
License as follows:
|
1193 |
+
|
1194 |
+
Copyright (c) 2010 The University of Tennessee.
|
1195 |
+
|
1196 |
+
All rights reserved.
|
1197 |
+
|
1198 |
+
Redistribution and use in source and binary forms, with or without
|
1199 |
+
modification, are permitted provided that the following conditions are
|
1200 |
+
met:
|
1201 |
+
* Redistributions of source code must retain the above copyright
|
1202 |
+
notice, this list of conditions and the following disclaimer.
|
1203 |
+
* Redistributions in binary form must reproduce the above
|
1204 |
+
copyright notice, this list of conditions and the following
|
1205 |
+
disclaimer listed in this license in the documentation and/or
|
1206 |
+
other materials provided with the distribution.
|
1207 |
+
* Neither the name of the copyright holders nor the names of its
|
1208 |
+
contributors may be used to endorse or promote products derived
|
1209 |
+
from this software without specific prior written permission.
|
1210 |
+
|
1211 |
+
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
1212 |
+
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
1213 |
+
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
1214 |
+
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
1215 |
+
OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
1216 |
+
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
1217 |
+
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
1218 |
+
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
1219 |
+
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
1220 |
+
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
1221 |
+
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
1222 |
+
|
1223 |
+
9. Some of the cuBLAS library routines were written by or
|
1224 |
+
derived from code written by Jonathan Hogg and are subject
|
1225 |
+
to the Modified Berkeley Software Distribution License as
|
1226 |
+
follows:
|
1227 |
+
|
1228 |
+
Copyright (c) 2012, The Science and Technology Facilities Council (STFC).
|
1229 |
+
|
1230 |
+
All rights reserved.
|
1231 |
+
|
1232 |
+
Redistribution and use in source and binary forms, with or without
|
1233 |
+
modification, are permitted provided that the following conditions are
|
1234 |
+
met:
|
1235 |
+
* Redistributions of source code must retain the above copyright
|
1236 |
+
notice, this list of conditions and the following disclaimer.
|
1237 |
+
* Redistributions in binary form must reproduce the above
|
1238 |
+
copyright notice, this list of conditions and the following
|
1239 |
+
disclaimer in the documentation and/or other materials provided
|
1240 |
+
with the distribution.
|
1241 |
+
* Neither the name of the STFC nor the names of its contributors
|
1242 |
+
may be used to endorse or promote products derived from this
|
1243 |
+
software without specific prior written permission.
|
1244 |
+
|
1245 |
+
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
1246 |
+
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
1247 |
+
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
1248 |
+
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE STFC BE
|
1249 |
+
LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
|
1250 |
+
CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
|
1251 |
+
SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
|
1252 |
+
BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
|
1253 |
+
WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
|
1254 |
+
OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN
|
1255 |
+
IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
1256 |
+
|
1257 |
+
10. Some of the cuBLAS library routines were written by or
|
1258 |
+
derived from code written by Ahmad M. Abdelfattah, David
|
1259 |
+
Keyes, and Hatem Ltaief, and are subject to the Apache
|
1260 |
+
License, Version 2.0, as follows:
|
1261 |
+
|
1262 |
+
-- (C) Copyright 2013 King Abdullah University of Science and Technology
|
1263 |
+
Authors:
|
1264 |
+
Ahmad Abdelfattah ([email protected])
|
1265 |
+
David Keyes ([email protected])
|
1266 |
+
Hatem Ltaief ([email protected])
|
1267 |
+
|
1268 |
+
Redistribution and use in source and binary forms, with or without
|
1269 |
+
modification, are permitted provided that the following conditions
|
1270 |
+
are met:
|
1271 |
+
|
1272 |
+
* Redistributions of source code must retain the above copyright
|
1273 |
+
notice, this list of conditions and the following disclaimer.
|
1274 |
+
* Redistributions in binary form must reproduce the above copyright
|
1275 |
+
notice, this list of conditions and the following disclaimer in the
|
1276 |
+
documentation and/or other materials provided with the distribution.
|
1277 |
+
* Neither the name of the King Abdullah University of Science and
|
1278 |
+
Technology nor the names of its contributors may be used to endorse
|
1279 |
+
or promote products derived from this software without specific prior
|
1280 |
+
written permission.
|
1281 |
+
|
1282 |
+
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
1283 |
+
``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
1284 |
+
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
1285 |
+
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
1286 |
+
HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
1287 |
+
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
1288 |
+
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
1289 |
+
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
1290 |
+
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
1291 |
+
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
1292 |
+
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE
|
1293 |
+
|
1294 |
+
11. Some of the cuSPARSE library routines were written by or
|
1295 |
+
derived from code written by Li-Wen Chang and are subject
|
1296 |
+
to the NCSA Open Source License as follows:
|
1297 |
+
|
1298 |
+
Copyright (c) 2012, University of Illinois.
|
1299 |
+
|
1300 |
+
All rights reserved.
|
1301 |
+
|
1302 |
+
Developed by: IMPACT Group, University of Illinois, http://impact.crhc.illinois.edu
|
1303 |
+
|
1304 |
+
Permission is hereby granted, free of charge, to any person obtaining
|
1305 |
+
a copy of this software and associated documentation files (the
|
1306 |
+
"Software"), to deal with the Software without restriction, including
|
1307 |
+
without limitation the rights to use, copy, modify, merge, publish,
|
1308 |
+
distribute, sublicense, and/or sell copies of the Software, and to
|
1309 |
+
permit persons to whom the Software is furnished to do so, subject to
|
1310 |
+
the following conditions:
|
1311 |
+
* Redistributions of source code must retain the above copyright
|
1312 |
+
notice, this list of conditions and the following disclaimer.
|
1313 |
+
* Redistributions in binary form must reproduce the above
|
1314 |
+
copyright notice, this list of conditions and the following
|
1315 |
+
disclaimers in the documentation and/or other materials provided
|
1316 |
+
with the distribution.
|
1317 |
+
* Neither the names of IMPACT Group, University of Illinois, nor
|
1318 |
+
the names of its contributors may be used to endorse or promote
|
1319 |
+
products derived from this Software without specific prior
|
1320 |
+
written permission.
|
1321 |
+
|
1322 |
+
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
|
1323 |
+
EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
|
1324 |
+
MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
|
1325 |
+
NONINFRINGEMENT. IN NO EVENT SHALL THE CONTRIBUTORS OR COPYRIGHT
|
1326 |
+
HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
|
1327 |
+
IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR
|
1328 |
+
IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS WITH THE
|
1329 |
+
SOFTWARE.
|
1330 |
+
|
1331 |
+
12. Some of the cuRAND library routines were written by or
|
1332 |
+
derived from code written by Mutsuo Saito and Makoto
|
1333 |
+
Matsumoto and are subject to the following license:
|
1334 |
+
|
1335 |
+
Copyright (c) 2009, 2010 Mutsuo Saito, Makoto Matsumoto and Hiroshima
|
1336 |
+
University. All rights reserved.
|
1337 |
+
|
1338 |
+
Copyright (c) 2011 Mutsuo Saito, Makoto Matsumoto, Hiroshima
|
1339 |
+
University and University of Tokyo. All rights reserved.
|
1340 |
+
|
1341 |
+
Redistribution and use in source and binary forms, with or without
|
1342 |
+
modification, are permitted provided that the following conditions are
|
1343 |
+
met:
|
1344 |
+
* Redistributions of source code must retain the above copyright
|
1345 |
+
notice, this list of conditions and the following disclaimer.
|
1346 |
+
* Redistributions in binary form must reproduce the above
|
1347 |
+
copyright notice, this list of conditions and the following
|
1348 |
+
disclaimer in the documentation and/or other materials provided
|
1349 |
+
with the distribution.
|
1350 |
+
* Neither the name of the Hiroshima University nor the names of
|
1351 |
+
its contributors may be used to endorse or promote products
|
1352 |
+
derived from this software without specific prior written
|
1353 |
+
permission.
|
1354 |
+
|
1355 |
+
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
1356 |
+
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
1357 |
+
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
1358 |
+
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
1359 |
+
OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
1360 |
+
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
1361 |
+
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
1362 |
+
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
1363 |
+
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
1364 |
+
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
1365 |
+
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
1366 |
+
|
1367 |
+
13. Some of the cuRAND library routines were derived from
|
1368 |
+
code developed by D. E. Shaw Research and are subject to
|
1369 |
+
the following license:
|
1370 |
+
|
1371 |
+
Copyright 2010-2011, D. E. Shaw Research.
|
1372 |
+
|
1373 |
+
All rights reserved.
|
1374 |
+
|
1375 |
+
Redistribution and use in source and binary forms, with or without
|
1376 |
+
modification, are permitted provided that the following conditions are
|
1377 |
+
met:
|
1378 |
+
* Redistributions of source code must retain the above copyright
|
1379 |
+
notice, this list of conditions, and the following disclaimer.
|
1380 |
+
* Redistributions in binary form must reproduce the above
|
1381 |
+
copyright notice, this list of conditions, and the following
|
1382 |
+
disclaimer in the documentation and/or other materials provided
|
1383 |
+
with the distribution.
|
1384 |
+
* Neither the name of D. E. Shaw Research nor the names of its
|
1385 |
+
contributors may be used to endorse or promote products derived
|
1386 |
+
from this software without specific prior written permission.
|
1387 |
+
|
1388 |
+
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
1389 |
+
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
1390 |
+
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
1391 |
+
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
1392 |
+
OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
1393 |
+
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
1394 |
+
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
1395 |
+
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
1396 |
+
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
1397 |
+
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
1398 |
+
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
1399 |
+
|
1400 |
+
14. Some of the Math library routines were written by or
|
1401 |
+
derived from code developed by Norbert Juffa and are
|
1402 |
+
subject to the following license:
|
1403 |
+
|
1404 |
+
Copyright (c) 2015-2017, Norbert Juffa
|
1405 |
+
All rights reserved.
|
1406 |
+
|
1407 |
+
Redistribution and use in source and binary forms, with or without
|
1408 |
+
modification, are permitted provided that the following conditions
|
1409 |
+
are met:
|
1410 |
+
|
1411 |
+
1. Redistributions of source code must retain the above copyright
|
1412 |
+
notice, this list of conditions and the following disclaimer.
|
1413 |
+
|
1414 |
+
2. Redistributions in binary form must reproduce the above copyright
|
1415 |
+
notice, this list of conditions and the following disclaimer in the
|
1416 |
+
documentation and/or other materials provided with the distribution.
|
1417 |
+
|
1418 |
+
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
1419 |
+
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
1420 |
+
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
1421 |
+
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
1422 |
+
HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
1423 |
+
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
1424 |
+
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
1425 |
+
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
1426 |
+
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
1427 |
+
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
1428 |
+
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
1429 |
+
|
1430 |
+
15. Licensee's use of the lz4 third party component is
|
1431 |
+
subject to the following terms and conditions:
|
1432 |
+
|
1433 |
+
Copyright (C) 2011-2013, Yann Collet.
|
1434 |
+
BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php)
|
1435 |
+
|
1436 |
+
Redistribution and use in source and binary forms, with or without
|
1437 |
+
modification, are permitted provided that the following conditions are
|
1438 |
+
met:
|
1439 |
+
|
1440 |
+
* Redistributions of source code must retain the above copyright
|
1441 |
+
notice, this list of conditions and the following disclaimer.
|
1442 |
+
* Redistributions in binary form must reproduce the above
|
1443 |
+
copyright notice, this list of conditions and the following disclaimer
|
1444 |
+
in the documentation and/or other materials provided with the
|
1445 |
+
distribution.
|
1446 |
+
|
1447 |
+
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
1448 |
+
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
1449 |
+
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
1450 |
+
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
1451 |
+
OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
1452 |
+
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
1453 |
+
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
1454 |
+
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
1455 |
+
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
1456 |
+
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
1457 |
+
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
1458 |
+
|
1459 |
+
16. The NPP library uses code from the Boost Math Toolkit,
|
1460 |
+
and is subject to the following license:
|
1461 |
+
|
1462 |
+
Boost Software License - Version 1.0 - August 17th, 2003
|
1463 |
+
. . . .
|
1464 |
+
|
1465 |
+
Permission is hereby granted, free of charge, to any person or
|
1466 |
+
organization obtaining a copy of the software and accompanying
|
1467 |
+
documentation covered by this license (the "Software") to use,
|
1468 |
+
reproduce, display, distribute, execute, and transmit the Software,
|
1469 |
+
and to prepare derivative works of the Software, and to permit
|
1470 |
+
third-parties to whom the Software is furnished to do so, all
|
1471 |
+
subject to the following:
|
1472 |
+
|
1473 |
+
The copyright notices in the Software and this entire statement,
|
1474 |
+
including the above license grant, this restriction and the following
|
1475 |
+
disclaimer, must be included in all copies of the Software, in whole
|
1476 |
+
or in part, and all derivative works of the Software, unless such
|
1477 |
+
copies or derivative works are solely in the form of machine-executable
|
1478 |
+
object code generated by a source language processor.
|
1479 |
+
|
1480 |
+
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
|
1481 |
+
EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
|
1482 |
+
MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE, TITLE AND
|
1483 |
+
NON-INFRINGEMENT. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR
|
1484 |
+
ANYONE DISTRIBUTING THE SOFTWARE BE LIABLE FOR ANY DAMAGES OR
|
1485 |
+
OTHER LIABILITY, WHETHER IN CONTRACT, TORT OR OTHERWISE, ARISING
|
1486 |
+
FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
|
1487 |
+
OTHER DEALINGS IN THE SOFTWARE.
|
1488 |
+
|
1489 |
+
17. Portions of the Nsight Eclipse Edition is subject to the
|
1490 |
+
following license:
|
1491 |
+
|
1492 |
+
The Eclipse Foundation makes available all content in this plug-in
|
1493 |
+
("Content"). Unless otherwise indicated below, the Content is provided
|
1494 |
+
to you under the terms and conditions of the Eclipse Public License
|
1495 |
+
Version 1.0 ("EPL"). A copy of the EPL is available at http://
|
1496 |
+
www.eclipse.org/legal/epl-v10.html. For purposes of the EPL, "Program"
|
1497 |
+
will mean the Content.
|
1498 |
+
|
1499 |
+
If you did not receive this Content directly from the Eclipse
|
1500 |
+
Foundation, the Content is being redistributed by another party
|
1501 |
+
("Redistributor") and different terms and conditions may apply to your
|
1502 |
+
use of any object code in the Content. Check the Redistributor's
|
1503 |
+
license that was provided with the Content. If no such license exists,
|
1504 |
+
contact the Redistributor. Unless otherwise indicated below, the terms
|
1505 |
+
and conditions of the EPL still apply to any source code in the
|
1506 |
+
Content and such source code may be obtained at http://www.eclipse.org.
|
1507 |
+
|
1508 |
+
18. Some of the cuBLAS library routines uses code from
|
1509 |
+
OpenAI, which is subject to the following license:
|
1510 |
+
|
1511 |
+
License URL
|
1512 |
+
https://github.com/openai/openai-gemm/blob/master/LICENSE
|
1513 |
+
|
1514 |
+
License Text
|
1515 |
+
The MIT License
|
1516 |
+
|
1517 |
+
Copyright (c) 2016 OpenAI (http://openai.com), 2016 Google Inc.
|
1518 |
+
|
1519 |
+
Permission is hereby granted, free of charge, to any person obtaining a copy
|
1520 |
+
of this software and associated documentation files (the "Software"), to deal
|
1521 |
+
in the Software without restriction, including without limitation the rights
|
1522 |
+
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
1523 |
+
copies of the Software, and to permit persons to whom the Software is
|
1524 |
+
furnished to do so, subject to the following conditions:
|
1525 |
+
|
1526 |
+
The above copyright notice and this permission notice shall be included in
|
1527 |
+
all copies or substantial portions of the Software.
|
1528 |
+
|
1529 |
+
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
1530 |
+
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
1531 |
+
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
1532 |
+
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
1533 |
+
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
1534 |
+
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
|
1535 |
+
THE SOFTWARE.
|
1536 |
+
|
1537 |
+
19. Licensee's use of the Visual Studio Setup Configuration
|
1538 |
+
Samples is subject to the following license:
|
1539 |
+
|
1540 |
+
The MIT License (MIT)
|
1541 |
+
Copyright (C) Microsoft Corporation. All rights reserved.
|
1542 |
+
|
1543 |
+
Permission is hereby granted, free of charge, to any person
|
1544 |
+
obtaining a copy of this software and associated documentation
|
1545 |
+
files (the "Software"), to deal in the Software without restriction,
|
1546 |
+
including without limitation the rights to use, copy, modify, merge,
|
1547 |
+
publish, distribute, sublicense, and/or sell copies of the Software,
|
1548 |
+
and to permit persons to whom the Software is furnished to do so,
|
1549 |
+
subject to the following conditions:
|
1550 |
+
|
1551 |
+
The above copyright notice and this permission notice shall be included
|
1552 |
+
in all copies or substantial portions of the Software.
|
1553 |
+
|
1554 |
+
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
|
1555 |
+
OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
1556 |
+
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
1557 |
+
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
1558 |
+
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
1559 |
+
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
|
1560 |
+
|
1561 |
+
20. Licensee's use of linmath.h header for CPU functions for
|
1562 |
+
GL vector/matrix operations from lunarG is subject to the
|
1563 |
+
Apache License Version 2.0.
|
1564 |
+
|
1565 |
+
21. The DX12-CUDA sample uses the d3dx12.h header, which is
|
1566 |
+
subject to the MIT license .
|
1567 |
+
|
1568 |
+
-----------------
|
llmeval-env/lib/python3.10/site-packages/nvidia_nvtx_cu12-12.1.105.dist-info/METADATA
ADDED
@@ -0,0 +1,35 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
Metadata-Version: 2.1
|
2 |
+
Name: nvidia-nvtx-cu12
|
3 |
+
Version: 12.1.105
|
4 |
+
Summary: NVIDIA Tools Extension
|
5 |
+
Home-page: https://developer.nvidia.com/cuda-zone
|
6 |
+
Author: Nvidia CUDA Installer Team
|
7 |
+
Author-email: [email protected]
|
8 |
+
License: NVIDIA Proprietary Software
|
9 |
+
Keywords: cuda,nvidia,runtime,machine learning,deep learning
|
10 |
+
Classifier: Development Status :: 4 - Beta
|
11 |
+
Classifier: Intended Audience :: Developers
|
12 |
+
Classifier: Intended Audience :: Education
|
13 |
+
Classifier: Intended Audience :: Science/Research
|
14 |
+
Classifier: License :: Other/Proprietary License
|
15 |
+
Classifier: Natural Language :: English
|
16 |
+
Classifier: Programming Language :: Python :: 3
|
17 |
+
Classifier: Programming Language :: Python :: 3.5
|
18 |
+
Classifier: Programming Language :: Python :: 3.6
|
19 |
+
Classifier: Programming Language :: Python :: 3.7
|
20 |
+
Classifier: Programming Language :: Python :: 3.8
|
21 |
+
Classifier: Programming Language :: Python :: 3.9
|
22 |
+
Classifier: Programming Language :: Python :: 3.10
|
23 |
+
Classifier: Programming Language :: Python :: 3.11
|
24 |
+
Classifier: Programming Language :: Python :: 3 :: Only
|
25 |
+
Classifier: Topic :: Scientific/Engineering
|
26 |
+
Classifier: Topic :: Scientific/Engineering :: Mathematics
|
27 |
+
Classifier: Topic :: Scientific/Engineering :: Artificial Intelligence
|
28 |
+
Classifier: Topic :: Software Development
|
29 |
+
Classifier: Topic :: Software Development :: Libraries
|
30 |
+
Classifier: Operating System :: Microsoft :: Windows
|
31 |
+
Classifier: Operating System :: POSIX :: Linux
|
32 |
+
Requires-Python: >=3
|
33 |
+
License-File: License.txt
|
34 |
+
|
35 |
+
A C-based API for annotating events, code ranges, and resources in your applications. Applications which integrate NVTX can use the Visual Profiler to capture and visualize these events and ranges.
|
llmeval-env/lib/python3.10/site-packages/nvidia_nvtx_cu12-12.1.105.dist-info/RECORD
ADDED
@@ -0,0 +1,36 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
nvidia/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
2 |
+
nvidia/__pycache__/__init__.cpython-310.pyc,,
|
3 |
+
nvidia/nvtx/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
4 |
+
nvidia/nvtx/__pycache__/__init__.cpython-310.pyc,,
|
5 |
+
nvidia/nvtx/include/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
6 |
+
nvidia/nvtx/include/__pycache__/__init__.cpython-310.pyc,,
|
7 |
+
nvidia/nvtx/include/nvToolsExt.h,sha256=OiT6v1G2-vlkYnpDQZjiGT1O-THDyk1gw2021qMRvQM,53680
|
8 |
+
nvidia/nvtx/include/nvToolsExtCuda.h,sha256=UDA1pbmvoRFmlJ11Et9tIMEztOtOVw-10mO27Q6K8jg,6009
|
9 |
+
nvidia/nvtx/include/nvToolsExtCudaRt.h,sha256=6IbgdRGObly53jzRqvsZ4FQoTrXJOJwSyCOLuXr9ncA,5192
|
10 |
+
nvidia/nvtx/include/nvToolsExtOpenCL.h,sha256=gETZH9ch_o6MYE_BYQ2pj9SSuxyAo1H4ptmRK-DMWSo,8360
|
11 |
+
nvidia/nvtx/include/nvToolsExtSync.h,sha256=wqONIiycUPaUUCzQBmCippilgKt8sOL9tpzG773u0nY,14562
|
12 |
+
nvidia/nvtx/include/nvtx3/nvToolsExt.h,sha256=TFEF3fx1043EwMdbS7FqvvavwK0koZeGrIOAsCrB12s,52247
|
13 |
+
nvidia/nvtx/include/nvtx3/nvToolsExtCuda.h,sha256=4ZbZHUMcmHRf4SdKB7nH0E3uHd_9ZhZBuwuWPItK-Vs,6204
|
14 |
+
nvidia/nvtx/include/nvtx3/nvToolsExtCudaRt.h,sha256=boW0zdYobNFFE9wwxCyzBGBLcSGtdbQ5osKjQGNC2E8,5393
|
15 |
+
nvidia/nvtx/include/nvtx3/nvToolsExtOpenCL.h,sha256=RPfsZl3lHAPIOCzTipmz07-vaiIO4cxelcx12EjB2L0,8563
|
16 |
+
nvidia/nvtx/include/nvtx3/nvToolsExtSync.h,sha256=C-HIVBaupxYom3BqMggQ_ePq1bxFhw8kXsOfYJKBWrI,14756
|
17 |
+
nvidia/nvtx/include/nvtx3/nvtxDetail/nvtxImpl.h,sha256=jEnYF3MyLsD72euw2It3Bz0X0GK4Xv_htEd8BeIrPjY,23333
|
18 |
+
nvidia/nvtx/include/nvtx3/nvtxDetail/nvtxImplCore.h,sha256=sYpWqZfYrjsMddxtezPX3qSTIbAOn4dlEoLiYQ9M2nM,9756
|
19 |
+
nvidia/nvtx/include/nvtx3/nvtxDetail/nvtxImplCudaRt_v3.h,sha256=SoaiprvsI80yLmEAnlFX0iFufv6RtKjjMMrVwQZjjQI,4775
|
20 |
+
nvidia/nvtx/include/nvtx3/nvtxDetail/nvtxImplCuda_v3.h,sha256=IEor-ISqComCRGVDdIzKBLU3eWCuDI0Igqz-eRKKcvg,5550
|
21 |
+
nvidia/nvtx/include/nvtx3/nvtxDetail/nvtxImplOpenCL_v3.h,sha256=iPR2x74bJE3plFQBT9FWGBaTm4sC-Pll6WAjpKRnz7g,8275
|
22 |
+
nvidia/nvtx/include/nvtx3/nvtxDetail/nvtxImplSync_v3.h,sha256=TqwQfEUVbwc58bpHioE13NMweFhOuHXNql65BnLzhvc,5022
|
23 |
+
nvidia/nvtx/include/nvtx3/nvtxDetail/nvtxInit.h,sha256=foajOFacvLGx3BN5ntw5v8o4J3OY4hqkVZE5ZC0x3e4,14716
|
24 |
+
nvidia/nvtx/include/nvtx3/nvtxDetail/nvtxInitDecls.h,sha256=-Qyxcy9CDXOBhEtYZ8L7iYd6daJ9aCeyQM48X0BafMM,9361
|
25 |
+
nvidia/nvtx/include/nvtx3/nvtxDetail/nvtxInitDefs.h,sha256=dLhOV4knhNrmT2DnUNzXreOt_Qc6GAa3yIlmqJFCeVI,35432
|
26 |
+
nvidia/nvtx/include/nvtx3/nvtxDetail/nvtxLinkOnce.h,sha256=Jp-z6LTz_p8fKRulcFfdcskIxzcZ6ybbHkGB9mpJa2M,3863
|
27 |
+
nvidia/nvtx/include/nvtx3/nvtxDetail/nvtxTypes.h,sha256=jkbCwyvIP1G-Ef8SwYp4kDi69hjZbzaxKSk7ScgrNI8,17352
|
28 |
+
nvidia/nvtx/lib/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
29 |
+
nvidia/nvtx/lib/__pycache__/__init__.cpython-310.pyc,,
|
30 |
+
nvidia/nvtx/lib/libnvToolsExt.so.1,sha256=hH148nXIzJdEKieAcyBL3BoACf_CVZv3JIxw2SEF39w,40136
|
31 |
+
nvidia_nvtx_cu12-12.1.105.dist-info/INSTALLER,sha256=zuuue4knoyJ-UwPPXg8fezS7VCrXJQrAP7zeNuwvFQg,4
|
32 |
+
nvidia_nvtx_cu12-12.1.105.dist-info/License.txt,sha256=rW9YU_ugyg0VnQ9Y1JrkmDDC-Mk_epJki5zpCttMbM0,59262
|
33 |
+
nvidia_nvtx_cu12-12.1.105.dist-info/METADATA,sha256=LP0Xeqykb8k4yxR2_JzTBqGwxALQERIJbbmP1k6-Z3Y,1660
|
34 |
+
nvidia_nvtx_cu12-12.1.105.dist-info/RECORD,,
|
35 |
+
nvidia_nvtx_cu12-12.1.105.dist-info/WHEEL,sha256=-kQi_VMfvRQozZJT7HUPMfY-5vLo0LVTmAylNJ3Ft98,106
|
36 |
+
nvidia_nvtx_cu12-12.1.105.dist-info/top_level.txt,sha256=fTkAtiFuL16nUrB9ytDDtpytz2t0B4NvYTnRzwAhO14,7
|
llmeval-env/lib/python3.10/site-packages/nvidia_nvtx_cu12-12.1.105.dist-info/WHEEL
ADDED
@@ -0,0 +1,5 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
Wheel-Version: 1.0
|
2 |
+
Generator: bdist_wheel (0.37.1)
|
3 |
+
Root-Is-Purelib: true
|
4 |
+
Tag: py3-none-manylinux1_x86_64
|
5 |
+
|
llmeval-env/lib/python3.10/site-packages/nvidia_nvtx_cu12-12.1.105.dist-info/top_level.txt
ADDED
@@ -0,0 +1 @@
|
|
|
|
|
1 |
+
nvidia
|
llmeval-env/lib/python3.10/site-packages/pandas/__init__.py
ADDED
@@ -0,0 +1,367 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from __future__ import annotations
|
2 |
+
|
3 |
+
import os
|
4 |
+
import warnings
|
5 |
+
|
6 |
+
__docformat__ = "restructuredtext"
|
7 |
+
|
8 |
+
# Let users know if they're missing any of our hard dependencies
|
9 |
+
_hard_dependencies = ("numpy", "pytz", "dateutil")
|
10 |
+
_missing_dependencies = []
|
11 |
+
|
12 |
+
for _dependency in _hard_dependencies:
|
13 |
+
try:
|
14 |
+
__import__(_dependency)
|
15 |
+
except ImportError as _e: # pragma: no cover
|
16 |
+
_missing_dependencies.append(f"{_dependency}: {_e}")
|
17 |
+
|
18 |
+
if _missing_dependencies: # pragma: no cover
|
19 |
+
raise ImportError(
|
20 |
+
"Unable to import required dependencies:\n" + "\n".join(_missing_dependencies)
|
21 |
+
)
|
22 |
+
del _hard_dependencies, _dependency, _missing_dependencies
|
23 |
+
|
24 |
+
try:
|
25 |
+
# numpy compat
|
26 |
+
from pandas.compat import (
|
27 |
+
is_numpy_dev as _is_numpy_dev, # pyright: ignore[reportUnusedImport] # noqa: F401
|
28 |
+
)
|
29 |
+
except ImportError as _err: # pragma: no cover
|
30 |
+
_module = _err.name
|
31 |
+
raise ImportError(
|
32 |
+
f"C extension: {_module} not built. If you want to import "
|
33 |
+
"pandas from the source directory, you may need to run "
|
34 |
+
"'python setup.py build_ext' to build the C extensions first."
|
35 |
+
) from _err
|
36 |
+
|
37 |
+
from pandas._config import (
|
38 |
+
get_option,
|
39 |
+
set_option,
|
40 |
+
reset_option,
|
41 |
+
describe_option,
|
42 |
+
option_context,
|
43 |
+
options,
|
44 |
+
)
|
45 |
+
|
46 |
+
# let init-time option registration happen
|
47 |
+
import pandas.core.config_init # pyright: ignore[reportUnusedImport] # noqa: F401
|
48 |
+
|
49 |
+
from pandas.core.api import (
|
50 |
+
# dtype
|
51 |
+
ArrowDtype,
|
52 |
+
Int8Dtype,
|
53 |
+
Int16Dtype,
|
54 |
+
Int32Dtype,
|
55 |
+
Int64Dtype,
|
56 |
+
UInt8Dtype,
|
57 |
+
UInt16Dtype,
|
58 |
+
UInt32Dtype,
|
59 |
+
UInt64Dtype,
|
60 |
+
Float32Dtype,
|
61 |
+
Float64Dtype,
|
62 |
+
CategoricalDtype,
|
63 |
+
PeriodDtype,
|
64 |
+
IntervalDtype,
|
65 |
+
DatetimeTZDtype,
|
66 |
+
StringDtype,
|
67 |
+
BooleanDtype,
|
68 |
+
# missing
|
69 |
+
NA,
|
70 |
+
isna,
|
71 |
+
isnull,
|
72 |
+
notna,
|
73 |
+
notnull,
|
74 |
+
# indexes
|
75 |
+
Index,
|
76 |
+
CategoricalIndex,
|
77 |
+
RangeIndex,
|
78 |
+
MultiIndex,
|
79 |
+
IntervalIndex,
|
80 |
+
TimedeltaIndex,
|
81 |
+
DatetimeIndex,
|
82 |
+
PeriodIndex,
|
83 |
+
IndexSlice,
|
84 |
+
# tseries
|
85 |
+
NaT,
|
86 |
+
Period,
|
87 |
+
period_range,
|
88 |
+
Timedelta,
|
89 |
+
timedelta_range,
|
90 |
+
Timestamp,
|
91 |
+
date_range,
|
92 |
+
bdate_range,
|
93 |
+
Interval,
|
94 |
+
interval_range,
|
95 |
+
DateOffset,
|
96 |
+
# conversion
|
97 |
+
to_numeric,
|
98 |
+
to_datetime,
|
99 |
+
to_timedelta,
|
100 |
+
# misc
|
101 |
+
Flags,
|
102 |
+
Grouper,
|
103 |
+
factorize,
|
104 |
+
unique,
|
105 |
+
value_counts,
|
106 |
+
NamedAgg,
|
107 |
+
array,
|
108 |
+
Categorical,
|
109 |
+
set_eng_float_format,
|
110 |
+
Series,
|
111 |
+
DataFrame,
|
112 |
+
)
|
113 |
+
|
114 |
+
from pandas.core.dtypes.dtypes import SparseDtype
|
115 |
+
|
116 |
+
from pandas.tseries.api import infer_freq
|
117 |
+
from pandas.tseries import offsets
|
118 |
+
|
119 |
+
from pandas.core.computation.api import eval
|
120 |
+
|
121 |
+
from pandas.core.reshape.api import (
|
122 |
+
concat,
|
123 |
+
lreshape,
|
124 |
+
melt,
|
125 |
+
wide_to_long,
|
126 |
+
merge,
|
127 |
+
merge_asof,
|
128 |
+
merge_ordered,
|
129 |
+
crosstab,
|
130 |
+
pivot,
|
131 |
+
pivot_table,
|
132 |
+
get_dummies,
|
133 |
+
from_dummies,
|
134 |
+
cut,
|
135 |
+
qcut,
|
136 |
+
)
|
137 |
+
|
138 |
+
from pandas import api, arrays, errors, io, plotting, tseries
|
139 |
+
from pandas import testing
|
140 |
+
from pandas.util._print_versions import show_versions
|
141 |
+
|
142 |
+
from pandas.io.api import (
|
143 |
+
# excel
|
144 |
+
ExcelFile,
|
145 |
+
ExcelWriter,
|
146 |
+
read_excel,
|
147 |
+
# parsers
|
148 |
+
read_csv,
|
149 |
+
read_fwf,
|
150 |
+
read_table,
|
151 |
+
# pickle
|
152 |
+
read_pickle,
|
153 |
+
to_pickle,
|
154 |
+
# pytables
|
155 |
+
HDFStore,
|
156 |
+
read_hdf,
|
157 |
+
# sql
|
158 |
+
read_sql,
|
159 |
+
read_sql_query,
|
160 |
+
read_sql_table,
|
161 |
+
# misc
|
162 |
+
read_clipboard,
|
163 |
+
read_parquet,
|
164 |
+
read_orc,
|
165 |
+
read_feather,
|
166 |
+
read_gbq,
|
167 |
+
read_html,
|
168 |
+
read_xml,
|
169 |
+
read_json,
|
170 |
+
read_stata,
|
171 |
+
read_sas,
|
172 |
+
read_spss,
|
173 |
+
)
|
174 |
+
|
175 |
+
from pandas.io.json._normalize import json_normalize
|
176 |
+
|
177 |
+
from pandas.util._tester import test
|
178 |
+
|
179 |
+
# use the closest tagged version if possible
|
180 |
+
_built_with_meson = False
|
181 |
+
try:
|
182 |
+
from pandas._version_meson import ( # pyright: ignore [reportMissingImports]
|
183 |
+
__version__,
|
184 |
+
__git_version__,
|
185 |
+
)
|
186 |
+
|
187 |
+
_built_with_meson = True
|
188 |
+
except ImportError:
|
189 |
+
from pandas._version import get_versions
|
190 |
+
|
191 |
+
v = get_versions()
|
192 |
+
__version__ = v.get("closest-tag", v["version"])
|
193 |
+
__git_version__ = v.get("full-revisionid")
|
194 |
+
del get_versions, v
|
195 |
+
|
196 |
+
# GH#55043 - deprecation of the data_manager option
|
197 |
+
if "PANDAS_DATA_MANAGER" in os.environ:
|
198 |
+
warnings.warn(
|
199 |
+
"The env variable PANDAS_DATA_MANAGER is set. The data_manager option is "
|
200 |
+
"deprecated and will be removed in a future version. Only the BlockManager "
|
201 |
+
"will be available. Unset this environment variable to silence this warning.",
|
202 |
+
FutureWarning,
|
203 |
+
stacklevel=2,
|
204 |
+
)
|
205 |
+
|
206 |
+
del warnings, os
|
207 |
+
|
208 |
+
# module level doc-string
|
209 |
+
__doc__ = """
|
210 |
+
pandas - a powerful data analysis and manipulation library for Python
|
211 |
+
=====================================================================
|
212 |
+
|
213 |
+
**pandas** is a Python package providing fast, flexible, and expressive data
|
214 |
+
structures designed to make working with "relational" or "labeled" data both
|
215 |
+
easy and intuitive. It aims to be the fundamental high-level building block for
|
216 |
+
doing practical, **real world** data analysis in Python. Additionally, it has
|
217 |
+
the broader goal of becoming **the most powerful and flexible open source data
|
218 |
+
analysis / manipulation tool available in any language**. It is already well on
|
219 |
+
its way toward this goal.
|
220 |
+
|
221 |
+
Main Features
|
222 |
+
-------------
|
223 |
+
Here are just a few of the things that pandas does well:
|
224 |
+
|
225 |
+
- Easy handling of missing data in floating point as well as non-floating
|
226 |
+
point data.
|
227 |
+
- Size mutability: columns can be inserted and deleted from DataFrame and
|
228 |
+
higher dimensional objects
|
229 |
+
- Automatic and explicit data alignment: objects can be explicitly aligned
|
230 |
+
to a set of labels, or the user can simply ignore the labels and let
|
231 |
+
`Series`, `DataFrame`, etc. automatically align the data for you in
|
232 |
+
computations.
|
233 |
+
- Powerful, flexible group by functionality to perform split-apply-combine
|
234 |
+
operations on data sets, for both aggregating and transforming data.
|
235 |
+
- Make it easy to convert ragged, differently-indexed data in other Python
|
236 |
+
and NumPy data structures into DataFrame objects.
|
237 |
+
- Intelligent label-based slicing, fancy indexing, and subsetting of large
|
238 |
+
data sets.
|
239 |
+
- Intuitive merging and joining data sets.
|
240 |
+
- Flexible reshaping and pivoting of data sets.
|
241 |
+
- Hierarchical labeling of axes (possible to have multiple labels per tick).
|
242 |
+
- Robust IO tools for loading data from flat files (CSV and delimited),
|
243 |
+
Excel files, databases, and saving/loading data from the ultrafast HDF5
|
244 |
+
format.
|
245 |
+
- Time series-specific functionality: date range generation and frequency
|
246 |
+
conversion, moving window statistics, date shifting and lagging.
|
247 |
+
"""
|
248 |
+
|
249 |
+
# Use __all__ to let type checkers know what is part of the public API.
|
250 |
+
# Pandas is not (yet) a py.typed library: the public API is determined
|
251 |
+
# based on the documentation.
|
252 |
+
__all__ = [
|
253 |
+
"ArrowDtype",
|
254 |
+
"BooleanDtype",
|
255 |
+
"Categorical",
|
256 |
+
"CategoricalDtype",
|
257 |
+
"CategoricalIndex",
|
258 |
+
"DataFrame",
|
259 |
+
"DateOffset",
|
260 |
+
"DatetimeIndex",
|
261 |
+
"DatetimeTZDtype",
|
262 |
+
"ExcelFile",
|
263 |
+
"ExcelWriter",
|
264 |
+
"Flags",
|
265 |
+
"Float32Dtype",
|
266 |
+
"Float64Dtype",
|
267 |
+
"Grouper",
|
268 |
+
"HDFStore",
|
269 |
+
"Index",
|
270 |
+
"IndexSlice",
|
271 |
+
"Int16Dtype",
|
272 |
+
"Int32Dtype",
|
273 |
+
"Int64Dtype",
|
274 |
+
"Int8Dtype",
|
275 |
+
"Interval",
|
276 |
+
"IntervalDtype",
|
277 |
+
"IntervalIndex",
|
278 |
+
"MultiIndex",
|
279 |
+
"NA",
|
280 |
+
"NaT",
|
281 |
+
"NamedAgg",
|
282 |
+
"Period",
|
283 |
+
"PeriodDtype",
|
284 |
+
"PeriodIndex",
|
285 |
+
"RangeIndex",
|
286 |
+
"Series",
|
287 |
+
"SparseDtype",
|
288 |
+
"StringDtype",
|
289 |
+
"Timedelta",
|
290 |
+
"TimedeltaIndex",
|
291 |
+
"Timestamp",
|
292 |
+
"UInt16Dtype",
|
293 |
+
"UInt32Dtype",
|
294 |
+
"UInt64Dtype",
|
295 |
+
"UInt8Dtype",
|
296 |
+
"api",
|
297 |
+
"array",
|
298 |
+
"arrays",
|
299 |
+
"bdate_range",
|
300 |
+
"concat",
|
301 |
+
"crosstab",
|
302 |
+
"cut",
|
303 |
+
"date_range",
|
304 |
+
"describe_option",
|
305 |
+
"errors",
|
306 |
+
"eval",
|
307 |
+
"factorize",
|
308 |
+
"get_dummies",
|
309 |
+
"from_dummies",
|
310 |
+
"get_option",
|
311 |
+
"infer_freq",
|
312 |
+
"interval_range",
|
313 |
+
"io",
|
314 |
+
"isna",
|
315 |
+
"isnull",
|
316 |
+
"json_normalize",
|
317 |
+
"lreshape",
|
318 |
+
"melt",
|
319 |
+
"merge",
|
320 |
+
"merge_asof",
|
321 |
+
"merge_ordered",
|
322 |
+
"notna",
|
323 |
+
"notnull",
|
324 |
+
"offsets",
|
325 |
+
"option_context",
|
326 |
+
"options",
|
327 |
+
"period_range",
|
328 |
+
"pivot",
|
329 |
+
"pivot_table",
|
330 |
+
"plotting",
|
331 |
+
"qcut",
|
332 |
+
"read_clipboard",
|
333 |
+
"read_csv",
|
334 |
+
"read_excel",
|
335 |
+
"read_feather",
|
336 |
+
"read_fwf",
|
337 |
+
"read_gbq",
|
338 |
+
"read_hdf",
|
339 |
+
"read_html",
|
340 |
+
"read_json",
|
341 |
+
"read_orc",
|
342 |
+
"read_parquet",
|
343 |
+
"read_pickle",
|
344 |
+
"read_sas",
|
345 |
+
"read_spss",
|
346 |
+
"read_sql",
|
347 |
+
"read_sql_query",
|
348 |
+
"read_sql_table",
|
349 |
+
"read_stata",
|
350 |
+
"read_table",
|
351 |
+
"read_xml",
|
352 |
+
"reset_option",
|
353 |
+
"set_eng_float_format",
|
354 |
+
"set_option",
|
355 |
+
"show_versions",
|
356 |
+
"test",
|
357 |
+
"testing",
|
358 |
+
"timedelta_range",
|
359 |
+
"to_datetime",
|
360 |
+
"to_numeric",
|
361 |
+
"to_pickle",
|
362 |
+
"to_timedelta",
|
363 |
+
"tseries",
|
364 |
+
"unique",
|
365 |
+
"value_counts",
|
366 |
+
"wide_to_long",
|
367 |
+
]
|
llmeval-env/lib/python3.10/site-packages/pandas/_typing.py
ADDED
@@ -0,0 +1,525 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from __future__ import annotations
|
2 |
+
|
3 |
+
from collections.abc import (
|
4 |
+
Hashable,
|
5 |
+
Iterator,
|
6 |
+
Mapping,
|
7 |
+
MutableMapping,
|
8 |
+
Sequence,
|
9 |
+
)
|
10 |
+
from datetime import (
|
11 |
+
date,
|
12 |
+
datetime,
|
13 |
+
timedelta,
|
14 |
+
tzinfo,
|
15 |
+
)
|
16 |
+
from os import PathLike
|
17 |
+
import sys
|
18 |
+
from typing import (
|
19 |
+
TYPE_CHECKING,
|
20 |
+
Any,
|
21 |
+
Callable,
|
22 |
+
Literal,
|
23 |
+
Optional,
|
24 |
+
Protocol,
|
25 |
+
Type as type_t,
|
26 |
+
TypeVar,
|
27 |
+
Union,
|
28 |
+
overload,
|
29 |
+
)
|
30 |
+
|
31 |
+
import numpy as np
|
32 |
+
|
33 |
+
# To prevent import cycles place any internal imports in the branch below
|
34 |
+
# and use a string literal forward reference to it in subsequent types
|
35 |
+
# https://mypy.readthedocs.io/en/latest/common_issues.html#import-cycles
|
36 |
+
if TYPE_CHECKING:
|
37 |
+
import numpy.typing as npt
|
38 |
+
|
39 |
+
from pandas._libs import (
|
40 |
+
NaTType,
|
41 |
+
Period,
|
42 |
+
Timedelta,
|
43 |
+
Timestamp,
|
44 |
+
)
|
45 |
+
from pandas._libs.tslibs import BaseOffset
|
46 |
+
|
47 |
+
from pandas.core.dtypes.dtypes import ExtensionDtype
|
48 |
+
|
49 |
+
from pandas import Interval
|
50 |
+
from pandas.arrays import (
|
51 |
+
DatetimeArray,
|
52 |
+
TimedeltaArray,
|
53 |
+
)
|
54 |
+
from pandas.core.arrays.base import ExtensionArray
|
55 |
+
from pandas.core.frame import DataFrame
|
56 |
+
from pandas.core.generic import NDFrame
|
57 |
+
from pandas.core.groupby.generic import (
|
58 |
+
DataFrameGroupBy,
|
59 |
+
GroupBy,
|
60 |
+
SeriesGroupBy,
|
61 |
+
)
|
62 |
+
from pandas.core.indexes.base import Index
|
63 |
+
from pandas.core.internals import (
|
64 |
+
ArrayManager,
|
65 |
+
BlockManager,
|
66 |
+
SingleArrayManager,
|
67 |
+
SingleBlockManager,
|
68 |
+
)
|
69 |
+
from pandas.core.resample import Resampler
|
70 |
+
from pandas.core.series import Series
|
71 |
+
from pandas.core.window.rolling import BaseWindow
|
72 |
+
|
73 |
+
from pandas.io.formats.format import EngFormatter
|
74 |
+
from pandas.tseries.holiday import AbstractHolidayCalendar
|
75 |
+
|
76 |
+
ScalarLike_co = Union[
|
77 |
+
int,
|
78 |
+
float,
|
79 |
+
complex,
|
80 |
+
str,
|
81 |
+
bytes,
|
82 |
+
np.generic,
|
83 |
+
]
|
84 |
+
|
85 |
+
# numpy compatible types
|
86 |
+
NumpyValueArrayLike = Union[ScalarLike_co, npt.ArrayLike]
|
87 |
+
# Name "npt._ArrayLikeInt_co" is not defined [name-defined]
|
88 |
+
NumpySorter = Optional[npt._ArrayLikeInt_co] # type: ignore[name-defined]
|
89 |
+
|
90 |
+
from typing import SupportsIndex
|
91 |
+
|
92 |
+
if sys.version_info >= (3, 10):
|
93 |
+
from typing import TypeGuard # pyright: ignore[reportUnusedImport]
|
94 |
+
else:
|
95 |
+
from typing_extensions import TypeGuard # pyright: ignore[reportUnusedImport]
|
96 |
+
|
97 |
+
if sys.version_info >= (3, 11):
|
98 |
+
from typing import Self # pyright: ignore[reportUnusedImport]
|
99 |
+
else:
|
100 |
+
from typing_extensions import Self # pyright: ignore[reportUnusedImport]
|
101 |
+
else:
|
102 |
+
npt: Any = None
|
103 |
+
Self: Any = None
|
104 |
+
TypeGuard: Any = None
|
105 |
+
|
106 |
+
HashableT = TypeVar("HashableT", bound=Hashable)
|
107 |
+
MutableMappingT = TypeVar("MutableMappingT", bound=MutableMapping)
|
108 |
+
|
109 |
+
# array-like
|
110 |
+
|
111 |
+
ArrayLike = Union["ExtensionArray", np.ndarray]
|
112 |
+
AnyArrayLike = Union[ArrayLike, "Index", "Series"]
|
113 |
+
TimeArrayLike = Union["DatetimeArray", "TimedeltaArray"]
|
114 |
+
|
115 |
+
# list-like
|
116 |
+
|
117 |
+
# from https://github.com/hauntsaninja/useful_types
|
118 |
+
# includes Sequence-like objects but excludes str and bytes
|
119 |
+
_T_co = TypeVar("_T_co", covariant=True)
|
120 |
+
|
121 |
+
|
122 |
+
class SequenceNotStr(Protocol[_T_co]):
|
123 |
+
@overload
|
124 |
+
def __getitem__(self, index: SupportsIndex, /) -> _T_co:
|
125 |
+
...
|
126 |
+
|
127 |
+
@overload
|
128 |
+
def __getitem__(self, index: slice, /) -> Sequence[_T_co]:
|
129 |
+
...
|
130 |
+
|
131 |
+
def __contains__(self, value: object, /) -> bool:
|
132 |
+
...
|
133 |
+
|
134 |
+
def __len__(self) -> int:
|
135 |
+
...
|
136 |
+
|
137 |
+
def __iter__(self) -> Iterator[_T_co]:
|
138 |
+
...
|
139 |
+
|
140 |
+
def index(self, value: Any, /, start: int = 0, stop: int = ...) -> int:
|
141 |
+
...
|
142 |
+
|
143 |
+
def count(self, value: Any, /) -> int:
|
144 |
+
...
|
145 |
+
|
146 |
+
def __reversed__(self) -> Iterator[_T_co]:
|
147 |
+
...
|
148 |
+
|
149 |
+
|
150 |
+
ListLike = Union[AnyArrayLike, SequenceNotStr, range]
|
151 |
+
|
152 |
+
# scalars
|
153 |
+
|
154 |
+
PythonScalar = Union[str, float, bool]
|
155 |
+
DatetimeLikeScalar = Union["Period", "Timestamp", "Timedelta"]
|
156 |
+
PandasScalar = Union["Period", "Timestamp", "Timedelta", "Interval"]
|
157 |
+
Scalar = Union[PythonScalar, PandasScalar, np.datetime64, np.timedelta64, date]
|
158 |
+
IntStrT = TypeVar("IntStrT", bound=Union[int, str])
|
159 |
+
|
160 |
+
|
161 |
+
# timestamp and timedelta convertible types
|
162 |
+
|
163 |
+
TimestampConvertibleTypes = Union[
|
164 |
+
"Timestamp", date, np.datetime64, np.int64, float, str
|
165 |
+
]
|
166 |
+
TimestampNonexistent = Union[
|
167 |
+
Literal["shift_forward", "shift_backward", "NaT", "raise"], timedelta
|
168 |
+
]
|
169 |
+
TimedeltaConvertibleTypes = Union[
|
170 |
+
"Timedelta", timedelta, np.timedelta64, np.int64, float, str
|
171 |
+
]
|
172 |
+
Timezone = Union[str, tzinfo]
|
173 |
+
|
174 |
+
ToTimestampHow = Literal["s", "e", "start", "end"]
|
175 |
+
|
176 |
+
# NDFrameT is stricter and ensures that the same subclass of NDFrame always is
|
177 |
+
# used. E.g. `def func(a: NDFrameT) -> NDFrameT: ...` means that if a
|
178 |
+
# Series is passed into a function, a Series is always returned and if a DataFrame is
|
179 |
+
# passed in, a DataFrame is always returned.
|
180 |
+
NDFrameT = TypeVar("NDFrameT", bound="NDFrame")
|
181 |
+
|
182 |
+
NumpyIndexT = TypeVar("NumpyIndexT", np.ndarray, "Index")
|
183 |
+
|
184 |
+
AxisInt = int
|
185 |
+
Axis = Union[AxisInt, Literal["index", "columns", "rows"]]
|
186 |
+
IndexLabel = Union[Hashable, Sequence[Hashable]]
|
187 |
+
Level = Hashable
|
188 |
+
Shape = tuple[int, ...]
|
189 |
+
Suffixes = tuple[Optional[str], Optional[str]]
|
190 |
+
Ordered = Optional[bool]
|
191 |
+
JSONSerializable = Optional[Union[PythonScalar, list, dict]]
|
192 |
+
Frequency = Union[str, "BaseOffset"]
|
193 |
+
Axes = ListLike
|
194 |
+
|
195 |
+
RandomState = Union[
|
196 |
+
int,
|
197 |
+
np.ndarray,
|
198 |
+
np.random.Generator,
|
199 |
+
np.random.BitGenerator,
|
200 |
+
np.random.RandomState,
|
201 |
+
]
|
202 |
+
|
203 |
+
# dtypes
|
204 |
+
NpDtype = Union[str, np.dtype, type_t[Union[str, complex, bool, object]]]
|
205 |
+
Dtype = Union["ExtensionDtype", NpDtype]
|
206 |
+
AstypeArg = Union["ExtensionDtype", "npt.DTypeLike"]
|
207 |
+
# DtypeArg specifies all allowable dtypes in a functions its dtype argument
|
208 |
+
DtypeArg = Union[Dtype, dict[Hashable, Dtype]]
|
209 |
+
DtypeObj = Union[np.dtype, "ExtensionDtype"]
|
210 |
+
|
211 |
+
# converters
|
212 |
+
ConvertersArg = dict[Hashable, Callable[[Dtype], Dtype]]
|
213 |
+
|
214 |
+
# parse_dates
|
215 |
+
ParseDatesArg = Union[
|
216 |
+
bool, list[Hashable], list[list[Hashable]], dict[Hashable, list[Hashable]]
|
217 |
+
]
|
218 |
+
|
219 |
+
# For functions like rename that convert one label to another
|
220 |
+
Renamer = Union[Mapping[Any, Hashable], Callable[[Any], Hashable]]
|
221 |
+
|
222 |
+
# to maintain type information across generic functions and parametrization
|
223 |
+
T = TypeVar("T")
|
224 |
+
|
225 |
+
# used in decorators to preserve the signature of the function it decorates
|
226 |
+
# see https://mypy.readthedocs.io/en/stable/generics.html#declaring-decorators
|
227 |
+
FuncType = Callable[..., Any]
|
228 |
+
F = TypeVar("F", bound=FuncType)
|
229 |
+
|
230 |
+
# types of vectorized key functions for DataFrame::sort_values and
|
231 |
+
# DataFrame::sort_index, among others
|
232 |
+
ValueKeyFunc = Optional[Callable[["Series"], Union["Series", AnyArrayLike]]]
|
233 |
+
IndexKeyFunc = Optional[Callable[["Index"], Union["Index", AnyArrayLike]]]
|
234 |
+
|
235 |
+
# types of `func` kwarg for DataFrame.aggregate and Series.aggregate
|
236 |
+
AggFuncTypeBase = Union[Callable, str]
|
237 |
+
AggFuncTypeDict = MutableMapping[
|
238 |
+
Hashable, Union[AggFuncTypeBase, list[AggFuncTypeBase]]
|
239 |
+
]
|
240 |
+
AggFuncType = Union[
|
241 |
+
AggFuncTypeBase,
|
242 |
+
list[AggFuncTypeBase],
|
243 |
+
AggFuncTypeDict,
|
244 |
+
]
|
245 |
+
AggObjType = Union[
|
246 |
+
"Series",
|
247 |
+
"DataFrame",
|
248 |
+
"GroupBy",
|
249 |
+
"SeriesGroupBy",
|
250 |
+
"DataFrameGroupBy",
|
251 |
+
"BaseWindow",
|
252 |
+
"Resampler",
|
253 |
+
]
|
254 |
+
|
255 |
+
PythonFuncType = Callable[[Any], Any]
|
256 |
+
|
257 |
+
# filenames and file-like-objects
|
258 |
+
AnyStr_co = TypeVar("AnyStr_co", str, bytes, covariant=True)
|
259 |
+
AnyStr_contra = TypeVar("AnyStr_contra", str, bytes, contravariant=True)
|
260 |
+
|
261 |
+
|
262 |
+
class BaseBuffer(Protocol):
|
263 |
+
@property
|
264 |
+
def mode(self) -> str:
|
265 |
+
# for _get_filepath_or_buffer
|
266 |
+
...
|
267 |
+
|
268 |
+
def seek(self, __offset: int, __whence: int = ...) -> int:
|
269 |
+
# with one argument: gzip.GzipFile, bz2.BZ2File
|
270 |
+
# with two arguments: zip.ZipFile, read_sas
|
271 |
+
...
|
272 |
+
|
273 |
+
def seekable(self) -> bool:
|
274 |
+
# for bz2.BZ2File
|
275 |
+
...
|
276 |
+
|
277 |
+
def tell(self) -> int:
|
278 |
+
# for zip.ZipFile, read_stata, to_stata
|
279 |
+
...
|
280 |
+
|
281 |
+
|
282 |
+
class ReadBuffer(BaseBuffer, Protocol[AnyStr_co]):
|
283 |
+
def read(self, __n: int = ...) -> AnyStr_co:
|
284 |
+
# for BytesIOWrapper, gzip.GzipFile, bz2.BZ2File
|
285 |
+
...
|
286 |
+
|
287 |
+
|
288 |
+
class WriteBuffer(BaseBuffer, Protocol[AnyStr_contra]):
|
289 |
+
def write(self, __b: AnyStr_contra) -> Any:
|
290 |
+
# for gzip.GzipFile, bz2.BZ2File
|
291 |
+
...
|
292 |
+
|
293 |
+
def flush(self) -> Any:
|
294 |
+
# for gzip.GzipFile, bz2.BZ2File
|
295 |
+
...
|
296 |
+
|
297 |
+
|
298 |
+
class ReadPickleBuffer(ReadBuffer[bytes], Protocol):
|
299 |
+
def readline(self) -> bytes:
|
300 |
+
...
|
301 |
+
|
302 |
+
|
303 |
+
class WriteExcelBuffer(WriteBuffer[bytes], Protocol):
|
304 |
+
def truncate(self, size: int | None = ...) -> int:
|
305 |
+
...
|
306 |
+
|
307 |
+
|
308 |
+
class ReadCsvBuffer(ReadBuffer[AnyStr_co], Protocol):
|
309 |
+
def __iter__(self) -> Iterator[AnyStr_co]:
|
310 |
+
# for engine=python
|
311 |
+
...
|
312 |
+
|
313 |
+
def fileno(self) -> int:
|
314 |
+
# for _MMapWrapper
|
315 |
+
...
|
316 |
+
|
317 |
+
def readline(self) -> AnyStr_co:
|
318 |
+
# for engine=python
|
319 |
+
...
|
320 |
+
|
321 |
+
@property
|
322 |
+
def closed(self) -> bool:
|
323 |
+
# for enine=pyarrow
|
324 |
+
...
|
325 |
+
|
326 |
+
|
327 |
+
FilePath = Union[str, "PathLike[str]"]
|
328 |
+
|
329 |
+
# for arbitrary kwargs passed during reading/writing files
|
330 |
+
StorageOptions = Optional[dict[str, Any]]
|
331 |
+
|
332 |
+
|
333 |
+
# compression keywords and compression
|
334 |
+
CompressionDict = dict[str, Any]
|
335 |
+
CompressionOptions = Optional[
|
336 |
+
Union[Literal["infer", "gzip", "bz2", "zip", "xz", "zstd", "tar"], CompressionDict]
|
337 |
+
]
|
338 |
+
|
339 |
+
# types in DataFrameFormatter
|
340 |
+
FormattersType = Union[
|
341 |
+
list[Callable], tuple[Callable, ...], Mapping[Union[str, int], Callable]
|
342 |
+
]
|
343 |
+
ColspaceType = Mapping[Hashable, Union[str, int]]
|
344 |
+
FloatFormatType = Union[str, Callable, "EngFormatter"]
|
345 |
+
ColspaceArgType = Union[
|
346 |
+
str, int, Sequence[Union[str, int]], Mapping[Hashable, Union[str, int]]
|
347 |
+
]
|
348 |
+
|
349 |
+
# Arguments for fillna()
|
350 |
+
FillnaOptions = Literal["backfill", "bfill", "ffill", "pad"]
|
351 |
+
InterpolateOptions = Literal[
|
352 |
+
"linear",
|
353 |
+
"time",
|
354 |
+
"index",
|
355 |
+
"values",
|
356 |
+
"nearest",
|
357 |
+
"zero",
|
358 |
+
"slinear",
|
359 |
+
"quadratic",
|
360 |
+
"cubic",
|
361 |
+
"barycentric",
|
362 |
+
"polynomial",
|
363 |
+
"krogh",
|
364 |
+
"piecewise_polynomial",
|
365 |
+
"spline",
|
366 |
+
"pchip",
|
367 |
+
"akima",
|
368 |
+
"cubicspline",
|
369 |
+
"from_derivatives",
|
370 |
+
]
|
371 |
+
|
372 |
+
# internals
|
373 |
+
Manager = Union[
|
374 |
+
"ArrayManager", "SingleArrayManager", "BlockManager", "SingleBlockManager"
|
375 |
+
]
|
376 |
+
SingleManager = Union["SingleArrayManager", "SingleBlockManager"]
|
377 |
+
Manager2D = Union["ArrayManager", "BlockManager"]
|
378 |
+
|
379 |
+
# indexing
|
380 |
+
# PositionalIndexer -> valid 1D positional indexer, e.g. can pass
|
381 |
+
# to ndarray.__getitem__
|
382 |
+
# ScalarIndexer is for a single value as the index
|
383 |
+
# SequenceIndexer is for list like or slices (but not tuples)
|
384 |
+
# PositionalIndexerTuple is extends the PositionalIndexer for 2D arrays
|
385 |
+
# These are used in various __getitem__ overloads
|
386 |
+
# TODO(typing#684): add Ellipsis, see
|
387 |
+
# https://github.com/python/typing/issues/684#issuecomment-548203158
|
388 |
+
# https://bugs.python.org/issue41810
|
389 |
+
# Using List[int] here rather than Sequence[int] to disallow tuples.
|
390 |
+
ScalarIndexer = Union[int, np.integer]
|
391 |
+
SequenceIndexer = Union[slice, list[int], np.ndarray]
|
392 |
+
PositionalIndexer = Union[ScalarIndexer, SequenceIndexer]
|
393 |
+
PositionalIndexerTuple = tuple[PositionalIndexer, PositionalIndexer]
|
394 |
+
PositionalIndexer2D = Union[PositionalIndexer, PositionalIndexerTuple]
|
395 |
+
if TYPE_CHECKING:
|
396 |
+
TakeIndexer = Union[Sequence[int], Sequence[np.integer], npt.NDArray[np.integer]]
|
397 |
+
else:
|
398 |
+
TakeIndexer = Any
|
399 |
+
|
400 |
+
# Shared by functions such as drop and astype
|
401 |
+
IgnoreRaise = Literal["ignore", "raise"]
|
402 |
+
|
403 |
+
# Windowing rank methods
|
404 |
+
WindowingRankType = Literal["average", "min", "max"]
|
405 |
+
|
406 |
+
# read_csv engines
|
407 |
+
CSVEngine = Literal["c", "python", "pyarrow", "python-fwf"]
|
408 |
+
|
409 |
+
# read_json engines
|
410 |
+
JSONEngine = Literal["ujson", "pyarrow"]
|
411 |
+
|
412 |
+
# read_xml parsers
|
413 |
+
XMLParsers = Literal["lxml", "etree"]
|
414 |
+
|
415 |
+
# read_html flavors
|
416 |
+
HTMLFlavors = Literal["lxml", "html5lib", "bs4"]
|
417 |
+
|
418 |
+
# Interval closed type
|
419 |
+
IntervalLeftRight = Literal["left", "right"]
|
420 |
+
IntervalClosedType = Union[IntervalLeftRight, Literal["both", "neither"]]
|
421 |
+
|
422 |
+
# datetime and NaTType
|
423 |
+
DatetimeNaTType = Union[datetime, "NaTType"]
|
424 |
+
DateTimeErrorChoices = Union[IgnoreRaise, Literal["coerce"]]
|
425 |
+
|
426 |
+
# sort_index
|
427 |
+
SortKind = Literal["quicksort", "mergesort", "heapsort", "stable"]
|
428 |
+
NaPosition = Literal["first", "last"]
|
429 |
+
|
430 |
+
# Arguments for nsmalles and n_largest
|
431 |
+
NsmallestNlargestKeep = Literal["first", "last", "all"]
|
432 |
+
|
433 |
+
# quantile interpolation
|
434 |
+
QuantileInterpolation = Literal["linear", "lower", "higher", "midpoint", "nearest"]
|
435 |
+
|
436 |
+
# plotting
|
437 |
+
PlottingOrientation = Literal["horizontal", "vertical"]
|
438 |
+
|
439 |
+
# dropna
|
440 |
+
AnyAll = Literal["any", "all"]
|
441 |
+
|
442 |
+
# merge
|
443 |
+
MergeHow = Literal["left", "right", "inner", "outer", "cross"]
|
444 |
+
MergeValidate = Literal[
|
445 |
+
"one_to_one",
|
446 |
+
"1:1",
|
447 |
+
"one_to_many",
|
448 |
+
"1:m",
|
449 |
+
"many_to_one",
|
450 |
+
"m:1",
|
451 |
+
"many_to_many",
|
452 |
+
"m:m",
|
453 |
+
]
|
454 |
+
|
455 |
+
# join
|
456 |
+
JoinHow = Literal["left", "right", "inner", "outer"]
|
457 |
+
JoinValidate = Literal[
|
458 |
+
"one_to_one",
|
459 |
+
"1:1",
|
460 |
+
"one_to_many",
|
461 |
+
"1:m",
|
462 |
+
"many_to_one",
|
463 |
+
"m:1",
|
464 |
+
"many_to_many",
|
465 |
+
"m:m",
|
466 |
+
]
|
467 |
+
|
468 |
+
# reindex
|
469 |
+
ReindexMethod = Union[FillnaOptions, Literal["nearest"]]
|
470 |
+
|
471 |
+
MatplotlibColor = Union[str, Sequence[float]]
|
472 |
+
TimeGrouperOrigin = Union[
|
473 |
+
"Timestamp", Literal["epoch", "start", "start_day", "end", "end_day"]
|
474 |
+
]
|
475 |
+
TimeAmbiguous = Union[Literal["infer", "NaT", "raise"], "npt.NDArray[np.bool_]"]
|
476 |
+
TimeNonexistent = Union[
|
477 |
+
Literal["shift_forward", "shift_backward", "NaT", "raise"], timedelta
|
478 |
+
]
|
479 |
+
DropKeep = Literal["first", "last", False]
|
480 |
+
CorrelationMethod = Union[
|
481 |
+
Literal["pearson", "kendall", "spearman"], Callable[[np.ndarray, np.ndarray], float]
|
482 |
+
]
|
483 |
+
AlignJoin = Literal["outer", "inner", "left", "right"]
|
484 |
+
DtypeBackend = Literal["pyarrow", "numpy_nullable"]
|
485 |
+
|
486 |
+
TimeUnit = Literal["s", "ms", "us", "ns"]
|
487 |
+
OpenFileErrors = Literal[
|
488 |
+
"strict",
|
489 |
+
"ignore",
|
490 |
+
"replace",
|
491 |
+
"surrogateescape",
|
492 |
+
"xmlcharrefreplace",
|
493 |
+
"backslashreplace",
|
494 |
+
"namereplace",
|
495 |
+
]
|
496 |
+
|
497 |
+
# update
|
498 |
+
UpdateJoin = Literal["left"]
|
499 |
+
|
500 |
+
# applymap
|
501 |
+
NaAction = Literal["ignore"]
|
502 |
+
|
503 |
+
# from_dict
|
504 |
+
FromDictOrient = Literal["columns", "index", "tight"]
|
505 |
+
|
506 |
+
# to_gbc
|
507 |
+
ToGbqIfexist = Literal["fail", "replace", "append"]
|
508 |
+
|
509 |
+
# to_stata
|
510 |
+
ToStataByteorder = Literal[">", "<", "little", "big"]
|
511 |
+
|
512 |
+
# ExcelWriter
|
513 |
+
ExcelWriterIfSheetExists = Literal["error", "new", "replace", "overlay"]
|
514 |
+
|
515 |
+
# Offsets
|
516 |
+
OffsetCalendar = Union[np.busdaycalendar, "AbstractHolidayCalendar"]
|
517 |
+
|
518 |
+
# read_csv: usecols
|
519 |
+
UsecolsArgType = Union[
|
520 |
+
SequenceNotStr[Hashable],
|
521 |
+
range,
|
522 |
+
AnyArrayLike,
|
523 |
+
Callable[[HashableT], bool],
|
524 |
+
None,
|
525 |
+
]
|
llmeval-env/lib/python3.10/site-packages/pandas/_version.py
ADDED
@@ -0,0 +1,692 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# This file helps to compute a version number in source trees obtained from
|
2 |
+
# git-archive tarball (such as those provided by githubs download-from-tag
|
3 |
+
# feature). Distribution tarballs (built by setup.py sdist) and build
|
4 |
+
# directories (produced by setup.py build) will contain a much shorter file
|
5 |
+
# that just contains the computed version number.
|
6 |
+
|
7 |
+
# This file is released into the public domain.
|
8 |
+
# Generated by versioneer-0.28
|
9 |
+
# https://github.com/python-versioneer/python-versioneer
|
10 |
+
|
11 |
+
"""Git implementation of _version.py."""
|
12 |
+
|
13 |
+
import errno
|
14 |
+
import functools
|
15 |
+
import os
|
16 |
+
import re
|
17 |
+
import subprocess
|
18 |
+
import sys
|
19 |
+
from typing import Callable
|
20 |
+
|
21 |
+
|
22 |
+
def get_keywords():
|
23 |
+
"""Get the keywords needed to look up the version information."""
|
24 |
+
# these strings will be replaced by git during git-archive.
|
25 |
+
# setup.py/versioneer.py will grep for the variable names, so they must
|
26 |
+
# each be defined on a line of their own. _version.py will just call
|
27 |
+
# get_keywords().
|
28 |
+
git_refnames = "$Format:%d$"
|
29 |
+
git_full = "$Format:%H$"
|
30 |
+
git_date = "$Format:%ci$"
|
31 |
+
keywords = {"refnames": git_refnames, "full": git_full, "date": git_date}
|
32 |
+
return keywords
|
33 |
+
|
34 |
+
|
35 |
+
class VersioneerConfig:
|
36 |
+
"""Container for Versioneer configuration parameters."""
|
37 |
+
|
38 |
+
|
39 |
+
def get_config():
|
40 |
+
"""Create, populate and return the VersioneerConfig() object."""
|
41 |
+
# these strings are filled in when 'setup.py versioneer' creates
|
42 |
+
# _version.py
|
43 |
+
cfg = VersioneerConfig()
|
44 |
+
cfg.VCS = "git"
|
45 |
+
cfg.style = "pep440"
|
46 |
+
cfg.tag_prefix = "v"
|
47 |
+
cfg.parentdir_prefix = "pandas-"
|
48 |
+
cfg.versionfile_source = "pandas/_version.py"
|
49 |
+
cfg.verbose = False
|
50 |
+
return cfg
|
51 |
+
|
52 |
+
|
53 |
+
class NotThisMethod(Exception):
|
54 |
+
"""Exception raised if a method is not valid for the current scenario."""
|
55 |
+
|
56 |
+
|
57 |
+
LONG_VERSION_PY: dict[str, str] = {}
|
58 |
+
HANDLERS: dict[str, dict[str, Callable]] = {}
|
59 |
+
|
60 |
+
|
61 |
+
def register_vcs_handler(vcs, method): # decorator
|
62 |
+
"""Create decorator to mark a method as the handler of a VCS."""
|
63 |
+
|
64 |
+
def decorate(f):
|
65 |
+
"""Store f in HANDLERS[vcs][method]."""
|
66 |
+
if vcs not in HANDLERS:
|
67 |
+
HANDLERS[vcs] = {}
|
68 |
+
HANDLERS[vcs][method] = f
|
69 |
+
return f
|
70 |
+
|
71 |
+
return decorate
|
72 |
+
|
73 |
+
|
74 |
+
def run_command(commands, args, cwd=None, verbose=False, hide_stderr=False, env=None):
|
75 |
+
"""Call the given command(s)."""
|
76 |
+
assert isinstance(commands, list)
|
77 |
+
process = None
|
78 |
+
|
79 |
+
popen_kwargs = {}
|
80 |
+
if sys.platform == "win32":
|
81 |
+
# This hides the console window if pythonw.exe is used
|
82 |
+
startupinfo = subprocess.STARTUPINFO()
|
83 |
+
startupinfo.dwFlags |= subprocess.STARTF_USESHOWWINDOW
|
84 |
+
popen_kwargs["startupinfo"] = startupinfo
|
85 |
+
|
86 |
+
for command in commands:
|
87 |
+
dispcmd = str([command] + args)
|
88 |
+
try:
|
89 |
+
# remember shell=False, so use git.cmd on windows, not just git
|
90 |
+
process = subprocess.Popen(
|
91 |
+
[command] + args,
|
92 |
+
cwd=cwd,
|
93 |
+
env=env,
|
94 |
+
stdout=subprocess.PIPE,
|
95 |
+
stderr=(subprocess.PIPE if hide_stderr else None),
|
96 |
+
**popen_kwargs,
|
97 |
+
)
|
98 |
+
break
|
99 |
+
except OSError:
|
100 |
+
e = sys.exc_info()[1]
|
101 |
+
if e.errno == errno.ENOENT:
|
102 |
+
continue
|
103 |
+
if verbose:
|
104 |
+
print(f"unable to run {dispcmd}")
|
105 |
+
print(e)
|
106 |
+
return None, None
|
107 |
+
else:
|
108 |
+
if verbose:
|
109 |
+
print(f"unable to find command, tried {commands}")
|
110 |
+
return None, None
|
111 |
+
stdout = process.communicate()[0].strip().decode()
|
112 |
+
if process.returncode != 0:
|
113 |
+
if verbose:
|
114 |
+
print(f"unable to run {dispcmd} (error)")
|
115 |
+
print(f"stdout was {stdout}")
|
116 |
+
return None, process.returncode
|
117 |
+
return stdout, process.returncode
|
118 |
+
|
119 |
+
|
120 |
+
def versions_from_parentdir(parentdir_prefix, root, verbose):
|
121 |
+
"""Try to determine the version from the parent directory name.
|
122 |
+
|
123 |
+
Source tarballs conventionally unpack into a directory that includes both
|
124 |
+
the project name and a version string. We will also support searching up
|
125 |
+
two directory levels for an appropriately named parent directory
|
126 |
+
"""
|
127 |
+
rootdirs = []
|
128 |
+
|
129 |
+
for _ in range(3):
|
130 |
+
dirname = os.path.basename(root)
|
131 |
+
if dirname.startswith(parentdir_prefix):
|
132 |
+
return {
|
133 |
+
"version": dirname[len(parentdir_prefix) :],
|
134 |
+
"full-revisionid": None,
|
135 |
+
"dirty": False,
|
136 |
+
"error": None,
|
137 |
+
"date": None,
|
138 |
+
}
|
139 |
+
rootdirs.append(root)
|
140 |
+
root = os.path.dirname(root) # up a level
|
141 |
+
|
142 |
+
if verbose:
|
143 |
+
print(
|
144 |
+
f"Tried directories {str(rootdirs)} \
|
145 |
+
but none started with prefix {parentdir_prefix}"
|
146 |
+
)
|
147 |
+
raise NotThisMethod("rootdir doesn't start with parentdir_prefix")
|
148 |
+
|
149 |
+
|
150 |
+
@register_vcs_handler("git", "get_keywords")
|
151 |
+
def git_get_keywords(versionfile_abs):
|
152 |
+
"""Extract version information from the given file."""
|
153 |
+
# the code embedded in _version.py can just fetch the value of these
|
154 |
+
# keywords. When used from setup.py, we don't want to import _version.py,
|
155 |
+
# so we do it with a regexp instead. This function is not used from
|
156 |
+
# _version.py.
|
157 |
+
keywords = {}
|
158 |
+
try:
|
159 |
+
with open(versionfile_abs, encoding="utf-8") as fobj:
|
160 |
+
for line in fobj:
|
161 |
+
if line.strip().startswith("git_refnames ="):
|
162 |
+
mo = re.search(r'=\s*"(.*)"', line)
|
163 |
+
if mo:
|
164 |
+
keywords["refnames"] = mo.group(1)
|
165 |
+
if line.strip().startswith("git_full ="):
|
166 |
+
mo = re.search(r'=\s*"(.*)"', line)
|
167 |
+
if mo:
|
168 |
+
keywords["full"] = mo.group(1)
|
169 |
+
if line.strip().startswith("git_date ="):
|
170 |
+
mo = re.search(r'=\s*"(.*)"', line)
|
171 |
+
if mo:
|
172 |
+
keywords["date"] = mo.group(1)
|
173 |
+
except OSError:
|
174 |
+
pass
|
175 |
+
return keywords
|
176 |
+
|
177 |
+
|
178 |
+
@register_vcs_handler("git", "keywords")
|
179 |
+
def git_versions_from_keywords(keywords, tag_prefix, verbose):
|
180 |
+
"""Get version information from git keywords."""
|
181 |
+
if "refnames" not in keywords:
|
182 |
+
raise NotThisMethod("Short version file found")
|
183 |
+
date = keywords.get("date")
|
184 |
+
if date is not None:
|
185 |
+
# Use only the last line. Previous lines may contain GPG signature
|
186 |
+
# information.
|
187 |
+
date = date.splitlines()[-1]
|
188 |
+
|
189 |
+
# git-2.2.0 added "%cI", which expands to an ISO-8601 -compliant
|
190 |
+
# datestamp. However we prefer "%ci" (which expands to an "ISO-8601
|
191 |
+
# -like" string, which we must then edit to make compliant), because
|
192 |
+
# it's been around since git-1.5.3, and it's too difficult to
|
193 |
+
# discover which version we're using, or to work around using an
|
194 |
+
# older one.
|
195 |
+
date = date.strip().replace(" ", "T", 1).replace(" ", "", 1)
|
196 |
+
refnames = keywords["refnames"].strip()
|
197 |
+
if refnames.startswith("$Format"):
|
198 |
+
if verbose:
|
199 |
+
print("keywords are unexpanded, not using")
|
200 |
+
raise NotThisMethod("unexpanded keywords, not a git-archive tarball")
|
201 |
+
refs = {r.strip() for r in refnames.strip("()").split(",")}
|
202 |
+
# starting in git-1.8.3, tags are listed as "tag: foo-1.0" instead of
|
203 |
+
# just "foo-1.0". If we see a "tag: " prefix, prefer those.
|
204 |
+
TAG = "tag: "
|
205 |
+
tags = {r[len(TAG) :] for r in refs if r.startswith(TAG)}
|
206 |
+
if not tags:
|
207 |
+
# Either we're using git < 1.8.3, or there really are no tags. We use
|
208 |
+
# a heuristic: assume all version tags have a digit. The old git %d
|
209 |
+
# expansion behaves like git log --decorate=short and strips out the
|
210 |
+
# refs/heads/ and refs/tags/ prefixes that would let us distinguish
|
211 |
+
# between branches and tags. By ignoring refnames without digits, we
|
212 |
+
# filter out many common branch names like "release" and
|
213 |
+
# "stabilization", as well as "HEAD" and "master".
|
214 |
+
tags = {r for r in refs if re.search(r"\d", r)}
|
215 |
+
if verbose:
|
216 |
+
print(f"discarding '{','.join(refs - tags)}', no digits")
|
217 |
+
if verbose:
|
218 |
+
print(f"likely tags: {','.join(sorted(tags))}")
|
219 |
+
for ref in sorted(tags):
|
220 |
+
# sorting will prefer e.g. "2.0" over "2.0rc1"
|
221 |
+
if ref.startswith(tag_prefix):
|
222 |
+
r = ref[len(tag_prefix) :]
|
223 |
+
# Filter out refs that exactly match prefix or that don't start
|
224 |
+
# with a number once the prefix is stripped (mostly a concern
|
225 |
+
# when prefix is '')
|
226 |
+
if not re.match(r"\d", r):
|
227 |
+
continue
|
228 |
+
if verbose:
|
229 |
+
print(f"picking {r}")
|
230 |
+
return {
|
231 |
+
"version": r,
|
232 |
+
"full-revisionid": keywords["full"].strip(),
|
233 |
+
"dirty": False,
|
234 |
+
"error": None,
|
235 |
+
"date": date,
|
236 |
+
}
|
237 |
+
# no suitable tags, so version is "0+unknown", but full hex is still there
|
238 |
+
if verbose:
|
239 |
+
print("no suitable tags, using unknown + full revision id")
|
240 |
+
return {
|
241 |
+
"version": "0+unknown",
|
242 |
+
"full-revisionid": keywords["full"].strip(),
|
243 |
+
"dirty": False,
|
244 |
+
"error": "no suitable tags",
|
245 |
+
"date": None,
|
246 |
+
}
|
247 |
+
|
248 |
+
|
249 |
+
@register_vcs_handler("git", "pieces_from_vcs")
|
250 |
+
def git_pieces_from_vcs(tag_prefix, root, verbose, runner=run_command):
|
251 |
+
"""Get version from 'git describe' in the root of the source tree.
|
252 |
+
|
253 |
+
This only gets called if the git-archive 'subst' keywords were *not*
|
254 |
+
expanded, and _version.py hasn't already been rewritten with a short
|
255 |
+
version string, meaning we're inside a checked out source tree.
|
256 |
+
"""
|
257 |
+
GITS = ["git"]
|
258 |
+
if sys.platform == "win32":
|
259 |
+
GITS = ["git.cmd", "git.exe"]
|
260 |
+
|
261 |
+
# GIT_DIR can interfere with correct operation of Versioneer.
|
262 |
+
# It may be intended to be passed to the Versioneer-versioned project,
|
263 |
+
# but that should not change where we get our version from.
|
264 |
+
env = os.environ.copy()
|
265 |
+
env.pop("GIT_DIR", None)
|
266 |
+
runner = functools.partial(runner, env=env)
|
267 |
+
|
268 |
+
_, rc = runner(GITS, ["rev-parse", "--git-dir"], cwd=root, hide_stderr=not verbose)
|
269 |
+
if rc != 0:
|
270 |
+
if verbose:
|
271 |
+
print(f"Directory {root} not under git control")
|
272 |
+
raise NotThisMethod("'git rev-parse --git-dir' returned error")
|
273 |
+
|
274 |
+
# if there is a tag matching tag_prefix, this yields TAG-NUM-gHEX[-dirty]
|
275 |
+
# if there isn't one, this yields HEX[-dirty] (no NUM)
|
276 |
+
describe_out, rc = runner(
|
277 |
+
GITS,
|
278 |
+
[
|
279 |
+
"describe",
|
280 |
+
"--tags",
|
281 |
+
"--dirty",
|
282 |
+
"--always",
|
283 |
+
"--long",
|
284 |
+
"--match",
|
285 |
+
f"{tag_prefix}[[:digit:]]*",
|
286 |
+
],
|
287 |
+
cwd=root,
|
288 |
+
)
|
289 |
+
# --long was added in git-1.5.5
|
290 |
+
if describe_out is None:
|
291 |
+
raise NotThisMethod("'git describe' failed")
|
292 |
+
describe_out = describe_out.strip()
|
293 |
+
full_out, rc = runner(GITS, ["rev-parse", "HEAD"], cwd=root)
|
294 |
+
if full_out is None:
|
295 |
+
raise NotThisMethod("'git rev-parse' failed")
|
296 |
+
full_out = full_out.strip()
|
297 |
+
|
298 |
+
pieces = {}
|
299 |
+
pieces["long"] = full_out
|
300 |
+
pieces["short"] = full_out[:7] # maybe improved later
|
301 |
+
pieces["error"] = None
|
302 |
+
|
303 |
+
branch_name, rc = runner(GITS, ["rev-parse", "--abbrev-ref", "HEAD"], cwd=root)
|
304 |
+
# --abbrev-ref was added in git-1.6.3
|
305 |
+
if rc != 0 or branch_name is None:
|
306 |
+
raise NotThisMethod("'git rev-parse --abbrev-ref' returned error")
|
307 |
+
branch_name = branch_name.strip()
|
308 |
+
|
309 |
+
if branch_name == "HEAD":
|
310 |
+
# If we aren't exactly on a branch, pick a branch which represents
|
311 |
+
# the current commit. If all else fails, we are on a branchless
|
312 |
+
# commit.
|
313 |
+
branches, rc = runner(GITS, ["branch", "--contains"], cwd=root)
|
314 |
+
# --contains was added in git-1.5.4
|
315 |
+
if rc != 0 or branches is None:
|
316 |
+
raise NotThisMethod("'git branch --contains' returned error")
|
317 |
+
branches = branches.split("\n")
|
318 |
+
|
319 |
+
# Remove the first line if we're running detached
|
320 |
+
if "(" in branches[0]:
|
321 |
+
branches.pop(0)
|
322 |
+
|
323 |
+
# Strip off the leading "* " from the list of branches.
|
324 |
+
branches = [branch[2:] for branch in branches]
|
325 |
+
if "master" in branches:
|
326 |
+
branch_name = "master"
|
327 |
+
elif not branches:
|
328 |
+
branch_name = None
|
329 |
+
else:
|
330 |
+
# Pick the first branch that is returned. Good or bad.
|
331 |
+
branch_name = branches[0]
|
332 |
+
|
333 |
+
pieces["branch"] = branch_name
|
334 |
+
|
335 |
+
# parse describe_out. It will be like TAG-NUM-gHEX[-dirty] or HEX[-dirty]
|
336 |
+
# TAG might have hyphens.
|
337 |
+
git_describe = describe_out
|
338 |
+
|
339 |
+
# look for -dirty suffix
|
340 |
+
dirty = git_describe.endswith("-dirty")
|
341 |
+
pieces["dirty"] = dirty
|
342 |
+
if dirty:
|
343 |
+
git_describe = git_describe[: git_describe.rindex("-dirty")]
|
344 |
+
|
345 |
+
# now we have TAG-NUM-gHEX or HEX
|
346 |
+
|
347 |
+
if "-" in git_describe:
|
348 |
+
# TAG-NUM-gHEX
|
349 |
+
mo = re.search(r"^(.+)-(\d+)-g([0-9a-f]+)$", git_describe)
|
350 |
+
if not mo:
|
351 |
+
# unparsable. Maybe git-describe is misbehaving?
|
352 |
+
pieces["error"] = f"unable to parse git-describe output: '{describe_out}'"
|
353 |
+
return pieces
|
354 |
+
|
355 |
+
# tag
|
356 |
+
full_tag = mo.group(1)
|
357 |
+
if not full_tag.startswith(tag_prefix):
|
358 |
+
if verbose:
|
359 |
+
fmt = "tag '%s' doesn't start with prefix '%s'"
|
360 |
+
print(fmt % (full_tag, tag_prefix))
|
361 |
+
pieces[
|
362 |
+
"error"
|
363 |
+
] = f"tag '{full_tag}' doesn't start with prefix '{tag_prefix}'"
|
364 |
+
return pieces
|
365 |
+
pieces["closest-tag"] = full_tag[len(tag_prefix) :]
|
366 |
+
|
367 |
+
# distance: number of commits since tag
|
368 |
+
pieces["distance"] = int(mo.group(2))
|
369 |
+
|
370 |
+
# commit: short hex revision ID
|
371 |
+
pieces["short"] = mo.group(3)
|
372 |
+
|
373 |
+
else:
|
374 |
+
# HEX: no tags
|
375 |
+
pieces["closest-tag"] = None
|
376 |
+
out, rc = runner(GITS, ["rev-list", "HEAD", "--left-right"], cwd=root)
|
377 |
+
pieces["distance"] = len(out.split()) # total number of commits
|
378 |
+
|
379 |
+
# commit date: see ISO-8601 comment in git_versions_from_keywords()
|
380 |
+
date = runner(GITS, ["show", "-s", "--format=%ci", "HEAD"], cwd=root)[0].strip()
|
381 |
+
# Use only the last line. Previous lines may contain GPG signature
|
382 |
+
# information.
|
383 |
+
date = date.splitlines()[-1]
|
384 |
+
pieces["date"] = date.strip().replace(" ", "T", 1).replace(" ", "", 1)
|
385 |
+
|
386 |
+
return pieces
|
387 |
+
|
388 |
+
|
389 |
+
def plus_or_dot(pieces) -> str:
|
390 |
+
"""Return a + if we don't already have one, else return a ."""
|
391 |
+
if "+" in pieces.get("closest-tag", ""):
|
392 |
+
return "."
|
393 |
+
return "+"
|
394 |
+
|
395 |
+
|
396 |
+
def render_pep440(pieces):
|
397 |
+
"""Build up version string, with post-release "local version identifier".
|
398 |
+
|
399 |
+
Our goal: TAG[+DISTANCE.gHEX[.dirty]] . Note that if you
|
400 |
+
get a tagged build and then dirty it, you'll get TAG+0.gHEX.dirty
|
401 |
+
|
402 |
+
Exceptions:
|
403 |
+
1: no tags. git_describe was just HEX. 0+untagged.DISTANCE.gHEX[.dirty]
|
404 |
+
"""
|
405 |
+
if pieces["closest-tag"]:
|
406 |
+
rendered = pieces["closest-tag"]
|
407 |
+
if pieces["distance"] or pieces["dirty"]:
|
408 |
+
rendered += plus_or_dot(pieces)
|
409 |
+
rendered += f"{pieces['distance']}.g{pieces['short']}"
|
410 |
+
if pieces["dirty"]:
|
411 |
+
rendered += ".dirty"
|
412 |
+
else:
|
413 |
+
# exception #1
|
414 |
+
rendered = f"0+untagged.{pieces['distance']}.g{pieces['short']}"
|
415 |
+
if pieces["dirty"]:
|
416 |
+
rendered += ".dirty"
|
417 |
+
return rendered
|
418 |
+
|
419 |
+
|
420 |
+
def render_pep440_branch(pieces):
|
421 |
+
"""TAG[[.dev0]+DISTANCE.gHEX[.dirty]] .
|
422 |
+
|
423 |
+
The ".dev0" means not master branch. Note that .dev0 sorts backwards
|
424 |
+
(a feature branch will appear "older" than the master branch).
|
425 |
+
|
426 |
+
Exceptions:
|
427 |
+
1: no tags. 0[.dev0]+untagged.DISTANCE.gHEX[.dirty]
|
428 |
+
"""
|
429 |
+
if pieces["closest-tag"]:
|
430 |
+
rendered = pieces["closest-tag"]
|
431 |
+
if pieces["distance"] or pieces["dirty"]:
|
432 |
+
if pieces["branch"] != "master":
|
433 |
+
rendered += ".dev0"
|
434 |
+
rendered += plus_or_dot(pieces)
|
435 |
+
rendered += f"{pieces['distance']}.g{pieces['short']}"
|
436 |
+
if pieces["dirty"]:
|
437 |
+
rendered += ".dirty"
|
438 |
+
else:
|
439 |
+
# exception #1
|
440 |
+
rendered = "0"
|
441 |
+
if pieces["branch"] != "master":
|
442 |
+
rendered += ".dev0"
|
443 |
+
rendered += f"+untagged.{pieces['distance']}.g{pieces['short']}"
|
444 |
+
if pieces["dirty"]:
|
445 |
+
rendered += ".dirty"
|
446 |
+
return rendered
|
447 |
+
|
448 |
+
|
449 |
+
def pep440_split_post(ver):
|
450 |
+
"""Split pep440 version string at the post-release segment.
|
451 |
+
|
452 |
+
Returns the release segments before the post-release and the
|
453 |
+
post-release version number (or -1 if no post-release segment is present).
|
454 |
+
"""
|
455 |
+
vc = str.split(ver, ".post")
|
456 |
+
return vc[0], int(vc[1] or 0) if len(vc) == 2 else None
|
457 |
+
|
458 |
+
|
459 |
+
def render_pep440_pre(pieces):
|
460 |
+
"""TAG[.postN.devDISTANCE] -- No -dirty.
|
461 |
+
|
462 |
+
Exceptions:
|
463 |
+
1: no tags. 0.post0.devDISTANCE
|
464 |
+
"""
|
465 |
+
if pieces["closest-tag"]:
|
466 |
+
if pieces["distance"]:
|
467 |
+
# update the post release segment
|
468 |
+
tag_version, post_version = pep440_split_post(pieces["closest-tag"])
|
469 |
+
rendered = tag_version
|
470 |
+
if post_version is not None:
|
471 |
+
rendered += f".post{post_version + 1}.dev{pieces['distance']}"
|
472 |
+
else:
|
473 |
+
rendered += f".post0.dev{pieces['distance']}"
|
474 |
+
else:
|
475 |
+
# no commits, use the tag as the version
|
476 |
+
rendered = pieces["closest-tag"]
|
477 |
+
else:
|
478 |
+
# exception #1
|
479 |
+
rendered = f"0.post0.dev{pieces['distance']}"
|
480 |
+
return rendered
|
481 |
+
|
482 |
+
|
483 |
+
def render_pep440_post(pieces):
|
484 |
+
"""TAG[.postDISTANCE[.dev0]+gHEX] .
|
485 |
+
|
486 |
+
The ".dev0" means dirty. Note that .dev0 sorts backwards
|
487 |
+
(a dirty tree will appear "older" than the corresponding clean one),
|
488 |
+
but you shouldn't be releasing software with -dirty anyways.
|
489 |
+
|
490 |
+
Exceptions:
|
491 |
+
1: no tags. 0.postDISTANCE[.dev0]
|
492 |
+
"""
|
493 |
+
if pieces["closest-tag"]:
|
494 |
+
rendered = pieces["closest-tag"]
|
495 |
+
if pieces["distance"] or pieces["dirty"]:
|
496 |
+
rendered += f".post{pieces['distance']}"
|
497 |
+
if pieces["dirty"]:
|
498 |
+
rendered += ".dev0"
|
499 |
+
rendered += plus_or_dot(pieces)
|
500 |
+
rendered += f"g{pieces['short']}"
|
501 |
+
else:
|
502 |
+
# exception #1
|
503 |
+
rendered = f"0.post{pieces['distance']}"
|
504 |
+
if pieces["dirty"]:
|
505 |
+
rendered += ".dev0"
|
506 |
+
rendered += f"+g{pieces['short']}"
|
507 |
+
return rendered
|
508 |
+
|
509 |
+
|
510 |
+
def render_pep440_post_branch(pieces):
|
511 |
+
"""TAG[.postDISTANCE[.dev0]+gHEX[.dirty]] .
|
512 |
+
|
513 |
+
The ".dev0" means not master branch.
|
514 |
+
|
515 |
+
Exceptions:
|
516 |
+
1: no tags. 0.postDISTANCE[.dev0]+gHEX[.dirty]
|
517 |
+
"""
|
518 |
+
if pieces["closest-tag"]:
|
519 |
+
rendered = pieces["closest-tag"]
|
520 |
+
if pieces["distance"] or pieces["dirty"]:
|
521 |
+
rendered += f".post{pieces['distance']}"
|
522 |
+
if pieces["branch"] != "master":
|
523 |
+
rendered += ".dev0"
|
524 |
+
rendered += plus_or_dot(pieces)
|
525 |
+
rendered += f"g{pieces['short']}"
|
526 |
+
if pieces["dirty"]:
|
527 |
+
rendered += ".dirty"
|
528 |
+
else:
|
529 |
+
# exception #1
|
530 |
+
rendered = f"0.post{pieces['distance']}"
|
531 |
+
if pieces["branch"] != "master":
|
532 |
+
rendered += ".dev0"
|
533 |
+
rendered += f"+g{pieces['short']}"
|
534 |
+
if pieces["dirty"]:
|
535 |
+
rendered += ".dirty"
|
536 |
+
return rendered
|
537 |
+
|
538 |
+
|
539 |
+
def render_pep440_old(pieces):
|
540 |
+
"""TAG[.postDISTANCE[.dev0]] .
|
541 |
+
|
542 |
+
The ".dev0" means dirty.
|
543 |
+
|
544 |
+
Exceptions:
|
545 |
+
1: no tags. 0.postDISTANCE[.dev0]
|
546 |
+
"""
|
547 |
+
if pieces["closest-tag"]:
|
548 |
+
rendered = pieces["closest-tag"]
|
549 |
+
if pieces["distance"] or pieces["dirty"]:
|
550 |
+
rendered += f"0.post{pieces['distance']}"
|
551 |
+
if pieces["dirty"]:
|
552 |
+
rendered += ".dev0"
|
553 |
+
else:
|
554 |
+
# exception #1
|
555 |
+
rendered = f"0.post{pieces['distance']}"
|
556 |
+
if pieces["dirty"]:
|
557 |
+
rendered += ".dev0"
|
558 |
+
return rendered
|
559 |
+
|
560 |
+
|
561 |
+
def render_git_describe(pieces):
|
562 |
+
"""TAG[-DISTANCE-gHEX][-dirty].
|
563 |
+
|
564 |
+
Like 'git describe --tags --dirty --always'.
|
565 |
+
|
566 |
+
Exceptions:
|
567 |
+
1: no tags. HEX[-dirty] (note: no 'g' prefix)
|
568 |
+
"""
|
569 |
+
if pieces["closest-tag"]:
|
570 |
+
rendered = pieces["closest-tag"]
|
571 |
+
if pieces["distance"]:
|
572 |
+
rendered += f"-{pieces['distance']}-g{pieces['short']}"
|
573 |
+
else:
|
574 |
+
# exception #1
|
575 |
+
rendered = pieces["short"]
|
576 |
+
if pieces["dirty"]:
|
577 |
+
rendered += "-dirty"
|
578 |
+
return rendered
|
579 |
+
|
580 |
+
|
581 |
+
def render_git_describe_long(pieces):
|
582 |
+
"""TAG-DISTANCE-gHEX[-dirty].
|
583 |
+
|
584 |
+
Like 'git describe --tags --dirty --always -long'.
|
585 |
+
The distance/hash is unconditional.
|
586 |
+
|
587 |
+
Exceptions:
|
588 |
+
1: no tags. HEX[-dirty] (note: no 'g' prefix)
|
589 |
+
"""
|
590 |
+
if pieces["closest-tag"]:
|
591 |
+
rendered = pieces["closest-tag"]
|
592 |
+
rendered += f"-{pieces['distance']}-g{pieces['short']}"
|
593 |
+
else:
|
594 |
+
# exception #1
|
595 |
+
rendered = pieces["short"]
|
596 |
+
if pieces["dirty"]:
|
597 |
+
rendered += "-dirty"
|
598 |
+
return rendered
|
599 |
+
|
600 |
+
|
601 |
+
def render(pieces, style):
|
602 |
+
"""Render the given version pieces into the requested style."""
|
603 |
+
if pieces["error"]:
|
604 |
+
return {
|
605 |
+
"version": "unknown",
|
606 |
+
"full-revisionid": pieces.get("long"),
|
607 |
+
"dirty": None,
|
608 |
+
"error": pieces["error"],
|
609 |
+
"date": None,
|
610 |
+
}
|
611 |
+
|
612 |
+
if not style or style == "default":
|
613 |
+
style = "pep440" # the default
|
614 |
+
|
615 |
+
if style == "pep440":
|
616 |
+
rendered = render_pep440(pieces)
|
617 |
+
elif style == "pep440-branch":
|
618 |
+
rendered = render_pep440_branch(pieces)
|
619 |
+
elif style == "pep440-pre":
|
620 |
+
rendered = render_pep440_pre(pieces)
|
621 |
+
elif style == "pep440-post":
|
622 |
+
rendered = render_pep440_post(pieces)
|
623 |
+
elif style == "pep440-post-branch":
|
624 |
+
rendered = render_pep440_post_branch(pieces)
|
625 |
+
elif style == "pep440-old":
|
626 |
+
rendered = render_pep440_old(pieces)
|
627 |
+
elif style == "git-describe":
|
628 |
+
rendered = render_git_describe(pieces)
|
629 |
+
elif style == "git-describe-long":
|
630 |
+
rendered = render_git_describe_long(pieces)
|
631 |
+
else:
|
632 |
+
raise ValueError(f"unknown style '{style}'")
|
633 |
+
|
634 |
+
return {
|
635 |
+
"version": rendered,
|
636 |
+
"full-revisionid": pieces["long"],
|
637 |
+
"dirty": pieces["dirty"],
|
638 |
+
"error": None,
|
639 |
+
"date": pieces.get("date"),
|
640 |
+
}
|
641 |
+
|
642 |
+
|
643 |
+
def get_versions():
|
644 |
+
"""Get version information or return default if unable to do so."""
|
645 |
+
# I am in _version.py, which lives at ROOT/VERSIONFILE_SOURCE. If we have
|
646 |
+
# __file__, we can work backwards from there to the root. Some
|
647 |
+
# py2exe/bbfreeze/non-CPython implementations don't do __file__, in which
|
648 |
+
# case we can only use expanded keywords.
|
649 |
+
|
650 |
+
cfg = get_config()
|
651 |
+
verbose = cfg.verbose
|
652 |
+
|
653 |
+
try:
|
654 |
+
return git_versions_from_keywords(get_keywords(), cfg.tag_prefix, verbose)
|
655 |
+
except NotThisMethod:
|
656 |
+
pass
|
657 |
+
|
658 |
+
try:
|
659 |
+
root = os.path.realpath(__file__)
|
660 |
+
# versionfile_source is the relative path from the top of the source
|
661 |
+
# tree (where the .git directory might live) to this file. Invert
|
662 |
+
# this to find the root from __file__.
|
663 |
+
for _ in cfg.versionfile_source.split("/"):
|
664 |
+
root = os.path.dirname(root)
|
665 |
+
except NameError:
|
666 |
+
return {
|
667 |
+
"version": "0+unknown",
|
668 |
+
"full-revisionid": None,
|
669 |
+
"dirty": None,
|
670 |
+
"error": "unable to find root of source tree",
|
671 |
+
"date": None,
|
672 |
+
}
|
673 |
+
|
674 |
+
try:
|
675 |
+
pieces = git_pieces_from_vcs(cfg.tag_prefix, root, verbose)
|
676 |
+
return render(pieces, cfg.style)
|
677 |
+
except NotThisMethod:
|
678 |
+
pass
|
679 |
+
|
680 |
+
try:
|
681 |
+
if cfg.parentdir_prefix:
|
682 |
+
return versions_from_parentdir(cfg.parentdir_prefix, root, verbose)
|
683 |
+
except NotThisMethod:
|
684 |
+
pass
|
685 |
+
|
686 |
+
return {
|
687 |
+
"version": "0+unknown",
|
688 |
+
"full-revisionid": None,
|
689 |
+
"dirty": None,
|
690 |
+
"error": "unable to compute version",
|
691 |
+
"date": None,
|
692 |
+
}
|
llmeval-env/lib/python3.10/site-packages/pandas/_version_meson.py
ADDED
@@ -0,0 +1,2 @@
|
|
|
|
|
|
|
1 |
+
__version__="2.2.2"
|
2 |
+
__git_version__="d9cdd2ee5a58015ef6f4d15c7226110c9aab8140"
|
llmeval-env/lib/python3.10/site-packages/pandas/conftest.py
ADDED
@@ -0,0 +1,1965 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
"""
|
2 |
+
This file is very long and growing, but it was decided to not split it yet, as
|
3 |
+
it's still manageable (2020-03-17, ~1.1k LoC). See gh-31989
|
4 |
+
|
5 |
+
Instead of splitting it was decided to define sections here:
|
6 |
+
- Configuration / Settings
|
7 |
+
- Autouse fixtures
|
8 |
+
- Common arguments
|
9 |
+
- Missing values & co.
|
10 |
+
- Classes
|
11 |
+
- Indices
|
12 |
+
- Series'
|
13 |
+
- DataFrames
|
14 |
+
- Operators & Operations
|
15 |
+
- Data sets/files
|
16 |
+
- Time zones
|
17 |
+
- Dtypes
|
18 |
+
- Misc
|
19 |
+
"""
|
20 |
+
from __future__ import annotations
|
21 |
+
|
22 |
+
from collections import abc
|
23 |
+
from datetime import (
|
24 |
+
date,
|
25 |
+
datetime,
|
26 |
+
time,
|
27 |
+
timedelta,
|
28 |
+
timezone,
|
29 |
+
)
|
30 |
+
from decimal import Decimal
|
31 |
+
import operator
|
32 |
+
import os
|
33 |
+
from typing import (
|
34 |
+
TYPE_CHECKING,
|
35 |
+
Callable,
|
36 |
+
)
|
37 |
+
|
38 |
+
from dateutil.tz import (
|
39 |
+
tzlocal,
|
40 |
+
tzutc,
|
41 |
+
)
|
42 |
+
import hypothesis
|
43 |
+
from hypothesis import strategies as st
|
44 |
+
import numpy as np
|
45 |
+
import pytest
|
46 |
+
from pytz import (
|
47 |
+
FixedOffset,
|
48 |
+
utc,
|
49 |
+
)
|
50 |
+
|
51 |
+
from pandas._config.config import _get_option
|
52 |
+
|
53 |
+
import pandas.util._test_decorators as td
|
54 |
+
|
55 |
+
from pandas.core.dtypes.dtypes import (
|
56 |
+
DatetimeTZDtype,
|
57 |
+
IntervalDtype,
|
58 |
+
)
|
59 |
+
|
60 |
+
import pandas as pd
|
61 |
+
from pandas import (
|
62 |
+
CategoricalIndex,
|
63 |
+
DataFrame,
|
64 |
+
Interval,
|
65 |
+
IntervalIndex,
|
66 |
+
Period,
|
67 |
+
RangeIndex,
|
68 |
+
Series,
|
69 |
+
Timedelta,
|
70 |
+
Timestamp,
|
71 |
+
date_range,
|
72 |
+
period_range,
|
73 |
+
timedelta_range,
|
74 |
+
)
|
75 |
+
import pandas._testing as tm
|
76 |
+
from pandas.core import ops
|
77 |
+
from pandas.core.indexes.api import (
|
78 |
+
Index,
|
79 |
+
MultiIndex,
|
80 |
+
)
|
81 |
+
from pandas.util.version import Version
|
82 |
+
|
83 |
+
if TYPE_CHECKING:
|
84 |
+
from collections.abc import (
|
85 |
+
Hashable,
|
86 |
+
Iterator,
|
87 |
+
)
|
88 |
+
|
89 |
+
try:
|
90 |
+
import pyarrow as pa
|
91 |
+
except ImportError:
|
92 |
+
has_pyarrow = False
|
93 |
+
else:
|
94 |
+
del pa
|
95 |
+
has_pyarrow = True
|
96 |
+
|
97 |
+
import zoneinfo
|
98 |
+
|
99 |
+
try:
|
100 |
+
zoneinfo.ZoneInfo("UTC")
|
101 |
+
except zoneinfo.ZoneInfoNotFoundError:
|
102 |
+
zoneinfo = None # type: ignore[assignment]
|
103 |
+
|
104 |
+
|
105 |
+
# ----------------------------------------------------------------
|
106 |
+
# Configuration / Settings
|
107 |
+
# ----------------------------------------------------------------
|
108 |
+
# pytest
|
109 |
+
|
110 |
+
|
111 |
+
def pytest_addoption(parser) -> None:
|
112 |
+
parser.addoption(
|
113 |
+
"--no-strict-data-files",
|
114 |
+
action="store_false",
|
115 |
+
help="Don't fail if a test is skipped for missing data file.",
|
116 |
+
)
|
117 |
+
|
118 |
+
|
119 |
+
def ignore_doctest_warning(item: pytest.Item, path: str, message: str) -> None:
|
120 |
+
"""Ignore doctest warning.
|
121 |
+
|
122 |
+
Parameters
|
123 |
+
----------
|
124 |
+
item : pytest.Item
|
125 |
+
pytest test item.
|
126 |
+
path : str
|
127 |
+
Module path to Python object, e.g. "pandas.core.frame.DataFrame.append". A
|
128 |
+
warning will be filtered when item.name ends with in given path. So it is
|
129 |
+
sufficient to specify e.g. "DataFrame.append".
|
130 |
+
message : str
|
131 |
+
Message to be filtered.
|
132 |
+
"""
|
133 |
+
if item.name.endswith(path):
|
134 |
+
item.add_marker(pytest.mark.filterwarnings(f"ignore:{message}"))
|
135 |
+
|
136 |
+
|
137 |
+
def pytest_collection_modifyitems(items, config) -> None:
|
138 |
+
is_doctest = config.getoption("--doctest-modules") or config.getoption(
|
139 |
+
"--doctest-cython", default=False
|
140 |
+
)
|
141 |
+
|
142 |
+
# Warnings from doctests that can be ignored; place reason in comment above.
|
143 |
+
# Each entry specifies (path, message) - see the ignore_doctest_warning function
|
144 |
+
ignored_doctest_warnings = [
|
145 |
+
("is_int64_dtype", "is_int64_dtype is deprecated"),
|
146 |
+
("is_interval_dtype", "is_interval_dtype is deprecated"),
|
147 |
+
("is_period_dtype", "is_period_dtype is deprecated"),
|
148 |
+
("is_datetime64tz_dtype", "is_datetime64tz_dtype is deprecated"),
|
149 |
+
("is_categorical_dtype", "is_categorical_dtype is deprecated"),
|
150 |
+
("is_sparse", "is_sparse is deprecated"),
|
151 |
+
("DataFrameGroupBy.fillna", "DataFrameGroupBy.fillna is deprecated"),
|
152 |
+
("NDFrame.replace", "The 'method' keyword"),
|
153 |
+
("NDFrame.replace", "Series.replace without 'value'"),
|
154 |
+
("NDFrame.clip", "Downcasting behavior in Series and DataFrame methods"),
|
155 |
+
("Series.idxmin", "The behavior of Series.idxmin"),
|
156 |
+
("Series.idxmax", "The behavior of Series.idxmax"),
|
157 |
+
("SeriesGroupBy.fillna", "SeriesGroupBy.fillna is deprecated"),
|
158 |
+
("SeriesGroupBy.idxmin", "The behavior of Series.idxmin"),
|
159 |
+
("SeriesGroupBy.idxmax", "The behavior of Series.idxmax"),
|
160 |
+
# Docstring divides by zero to show behavior difference
|
161 |
+
("missing.mask_zero_div_zero", "divide by zero encountered"),
|
162 |
+
(
|
163 |
+
"to_pydatetime",
|
164 |
+
"The behavior of DatetimeProperties.to_pydatetime is deprecated",
|
165 |
+
),
|
166 |
+
(
|
167 |
+
"pandas.core.generic.NDFrame.bool",
|
168 |
+
"(Series|DataFrame).bool is now deprecated and will be removed "
|
169 |
+
"in future version of pandas",
|
170 |
+
),
|
171 |
+
(
|
172 |
+
"pandas.core.generic.NDFrame.first",
|
173 |
+
"first is deprecated and will be removed in a future version. "
|
174 |
+
"Please create a mask and filter using `.loc` instead",
|
175 |
+
),
|
176 |
+
(
|
177 |
+
"Resampler.fillna",
|
178 |
+
"DatetimeIndexResampler.fillna is deprecated",
|
179 |
+
),
|
180 |
+
(
|
181 |
+
"DataFrameGroupBy.fillna",
|
182 |
+
"DataFrameGroupBy.fillna with 'method' is deprecated",
|
183 |
+
),
|
184 |
+
(
|
185 |
+
"DataFrameGroupBy.fillna",
|
186 |
+
"DataFrame.fillna with 'method' is deprecated",
|
187 |
+
),
|
188 |
+
("read_parquet", "Passing a BlockManager to DataFrame is deprecated"),
|
189 |
+
]
|
190 |
+
|
191 |
+
if is_doctest:
|
192 |
+
for item in items:
|
193 |
+
for path, message in ignored_doctest_warnings:
|
194 |
+
ignore_doctest_warning(item, path, message)
|
195 |
+
|
196 |
+
|
197 |
+
hypothesis_health_checks = [hypothesis.HealthCheck.too_slow]
|
198 |
+
if Version(hypothesis.__version__) >= Version("6.83.2"):
|
199 |
+
hypothesis_health_checks.append(hypothesis.HealthCheck.differing_executors)
|
200 |
+
|
201 |
+
# Hypothesis
|
202 |
+
hypothesis.settings.register_profile(
|
203 |
+
"ci",
|
204 |
+
# Hypothesis timing checks are tuned for scalars by default, so we bump
|
205 |
+
# them from 200ms to 500ms per test case as the global default. If this
|
206 |
+
# is too short for a specific test, (a) try to make it faster, and (b)
|
207 |
+
# if it really is slow add `@settings(deadline=...)` with a working value,
|
208 |
+
# or `deadline=None` to entirely disable timeouts for that test.
|
209 |
+
# 2022-02-09: Changed deadline from 500 -> None. Deadline leads to
|
210 |
+
# non-actionable, flaky CI failures (# GH 24641, 44969, 45118, 44969)
|
211 |
+
deadline=None,
|
212 |
+
suppress_health_check=tuple(hypothesis_health_checks),
|
213 |
+
)
|
214 |
+
hypothesis.settings.load_profile("ci")
|
215 |
+
|
216 |
+
# Registering these strategies makes them globally available via st.from_type,
|
217 |
+
# which is use for offsets in tests/tseries/offsets/test_offsets_properties.py
|
218 |
+
for name in "MonthBegin MonthEnd BMonthBegin BMonthEnd".split():
|
219 |
+
cls = getattr(pd.tseries.offsets, name)
|
220 |
+
st.register_type_strategy(
|
221 |
+
cls, st.builds(cls, n=st.integers(-99, 99), normalize=st.booleans())
|
222 |
+
)
|
223 |
+
|
224 |
+
for name in "YearBegin YearEnd BYearBegin BYearEnd".split():
|
225 |
+
cls = getattr(pd.tseries.offsets, name)
|
226 |
+
st.register_type_strategy(
|
227 |
+
cls,
|
228 |
+
st.builds(
|
229 |
+
cls,
|
230 |
+
n=st.integers(-5, 5),
|
231 |
+
normalize=st.booleans(),
|
232 |
+
month=st.integers(min_value=1, max_value=12),
|
233 |
+
),
|
234 |
+
)
|
235 |
+
|
236 |
+
for name in "QuarterBegin QuarterEnd BQuarterBegin BQuarterEnd".split():
|
237 |
+
cls = getattr(pd.tseries.offsets, name)
|
238 |
+
st.register_type_strategy(
|
239 |
+
cls,
|
240 |
+
st.builds(
|
241 |
+
cls,
|
242 |
+
n=st.integers(-24, 24),
|
243 |
+
normalize=st.booleans(),
|
244 |
+
startingMonth=st.integers(min_value=1, max_value=12),
|
245 |
+
),
|
246 |
+
)
|
247 |
+
|
248 |
+
|
249 |
+
# ----------------------------------------------------------------
|
250 |
+
# Autouse fixtures
|
251 |
+
# ----------------------------------------------------------------
|
252 |
+
|
253 |
+
|
254 |
+
# https://github.com/pytest-dev/pytest/issues/11873
|
255 |
+
# Would like to avoid autouse=True, but cannot as of pytest 8.0.0
|
256 |
+
@pytest.fixture(autouse=True)
|
257 |
+
def add_doctest_imports(doctest_namespace) -> None:
|
258 |
+
"""
|
259 |
+
Make `np` and `pd` names available for doctests.
|
260 |
+
"""
|
261 |
+
doctest_namespace["np"] = np
|
262 |
+
doctest_namespace["pd"] = pd
|
263 |
+
|
264 |
+
|
265 |
+
@pytest.fixture(autouse=True)
|
266 |
+
def configure_tests() -> None:
|
267 |
+
"""
|
268 |
+
Configure settings for all tests and test modules.
|
269 |
+
"""
|
270 |
+
pd.set_option("chained_assignment", "raise")
|
271 |
+
|
272 |
+
|
273 |
+
# ----------------------------------------------------------------
|
274 |
+
# Common arguments
|
275 |
+
# ----------------------------------------------------------------
|
276 |
+
@pytest.fixture(params=[0, 1, "index", "columns"], ids=lambda x: f"axis={repr(x)}")
|
277 |
+
def axis(request):
|
278 |
+
"""
|
279 |
+
Fixture for returning the axis numbers of a DataFrame.
|
280 |
+
"""
|
281 |
+
return request.param
|
282 |
+
|
283 |
+
|
284 |
+
axis_frame = axis
|
285 |
+
|
286 |
+
|
287 |
+
@pytest.fixture(params=[1, "columns"], ids=lambda x: f"axis={repr(x)}")
|
288 |
+
def axis_1(request):
|
289 |
+
"""
|
290 |
+
Fixture for returning aliases of axis 1 of a DataFrame.
|
291 |
+
"""
|
292 |
+
return request.param
|
293 |
+
|
294 |
+
|
295 |
+
@pytest.fixture(params=[True, False, None])
|
296 |
+
def observed(request):
|
297 |
+
"""
|
298 |
+
Pass in the observed keyword to groupby for [True, False]
|
299 |
+
This indicates whether categoricals should return values for
|
300 |
+
values which are not in the grouper [False / None], or only values which
|
301 |
+
appear in the grouper [True]. [None] is supported for future compatibility
|
302 |
+
if we decide to change the default (and would need to warn if this
|
303 |
+
parameter is not passed).
|
304 |
+
"""
|
305 |
+
return request.param
|
306 |
+
|
307 |
+
|
308 |
+
@pytest.fixture(params=[True, False, None])
|
309 |
+
def ordered(request):
|
310 |
+
"""
|
311 |
+
Boolean 'ordered' parameter for Categorical.
|
312 |
+
"""
|
313 |
+
return request.param
|
314 |
+
|
315 |
+
|
316 |
+
@pytest.fixture(params=[True, False])
|
317 |
+
def skipna(request):
|
318 |
+
"""
|
319 |
+
Boolean 'skipna' parameter.
|
320 |
+
"""
|
321 |
+
return request.param
|
322 |
+
|
323 |
+
|
324 |
+
@pytest.fixture(params=["first", "last", False])
|
325 |
+
def keep(request):
|
326 |
+
"""
|
327 |
+
Valid values for the 'keep' parameter used in
|
328 |
+
.duplicated or .drop_duplicates
|
329 |
+
"""
|
330 |
+
return request.param
|
331 |
+
|
332 |
+
|
333 |
+
@pytest.fixture(params=["both", "neither", "left", "right"])
|
334 |
+
def inclusive_endpoints_fixture(request):
|
335 |
+
"""
|
336 |
+
Fixture for trying all interval 'inclusive' parameters.
|
337 |
+
"""
|
338 |
+
return request.param
|
339 |
+
|
340 |
+
|
341 |
+
@pytest.fixture(params=["left", "right", "both", "neither"])
|
342 |
+
def closed(request):
|
343 |
+
"""
|
344 |
+
Fixture for trying all interval closed parameters.
|
345 |
+
"""
|
346 |
+
return request.param
|
347 |
+
|
348 |
+
|
349 |
+
@pytest.fixture(params=["left", "right", "both", "neither"])
|
350 |
+
def other_closed(request):
|
351 |
+
"""
|
352 |
+
Secondary closed fixture to allow parametrizing over all pairs of closed.
|
353 |
+
"""
|
354 |
+
return request.param
|
355 |
+
|
356 |
+
|
357 |
+
@pytest.fixture(
|
358 |
+
params=[
|
359 |
+
None,
|
360 |
+
"gzip",
|
361 |
+
"bz2",
|
362 |
+
"zip",
|
363 |
+
"xz",
|
364 |
+
"tar",
|
365 |
+
pytest.param("zstd", marks=td.skip_if_no("zstandard")),
|
366 |
+
]
|
367 |
+
)
|
368 |
+
def compression(request):
|
369 |
+
"""
|
370 |
+
Fixture for trying common compression types in compression tests.
|
371 |
+
"""
|
372 |
+
return request.param
|
373 |
+
|
374 |
+
|
375 |
+
@pytest.fixture(
|
376 |
+
params=[
|
377 |
+
"gzip",
|
378 |
+
"bz2",
|
379 |
+
"zip",
|
380 |
+
"xz",
|
381 |
+
"tar",
|
382 |
+
pytest.param("zstd", marks=td.skip_if_no("zstandard")),
|
383 |
+
]
|
384 |
+
)
|
385 |
+
def compression_only(request):
|
386 |
+
"""
|
387 |
+
Fixture for trying common compression types in compression tests excluding
|
388 |
+
uncompressed case.
|
389 |
+
"""
|
390 |
+
return request.param
|
391 |
+
|
392 |
+
|
393 |
+
@pytest.fixture(params=[True, False])
|
394 |
+
def writable(request):
|
395 |
+
"""
|
396 |
+
Fixture that an array is writable.
|
397 |
+
"""
|
398 |
+
return request.param
|
399 |
+
|
400 |
+
|
401 |
+
@pytest.fixture(params=["inner", "outer", "left", "right"])
|
402 |
+
def join_type(request):
|
403 |
+
"""
|
404 |
+
Fixture for trying all types of join operations.
|
405 |
+
"""
|
406 |
+
return request.param
|
407 |
+
|
408 |
+
|
409 |
+
@pytest.fixture(params=["nlargest", "nsmallest"])
|
410 |
+
def nselect_method(request):
|
411 |
+
"""
|
412 |
+
Fixture for trying all nselect methods.
|
413 |
+
"""
|
414 |
+
return request.param
|
415 |
+
|
416 |
+
|
417 |
+
# ----------------------------------------------------------------
|
418 |
+
# Missing values & co.
|
419 |
+
# ----------------------------------------------------------------
|
420 |
+
@pytest.fixture(params=tm.NULL_OBJECTS, ids=lambda x: type(x).__name__)
|
421 |
+
def nulls_fixture(request):
|
422 |
+
"""
|
423 |
+
Fixture for each null type in pandas.
|
424 |
+
"""
|
425 |
+
return request.param
|
426 |
+
|
427 |
+
|
428 |
+
nulls_fixture2 = nulls_fixture # Generate cartesian product of nulls_fixture
|
429 |
+
|
430 |
+
|
431 |
+
@pytest.fixture(params=[None, np.nan, pd.NaT])
|
432 |
+
def unique_nulls_fixture(request):
|
433 |
+
"""
|
434 |
+
Fixture for each null type in pandas, each null type exactly once.
|
435 |
+
"""
|
436 |
+
return request.param
|
437 |
+
|
438 |
+
|
439 |
+
# Generate cartesian product of unique_nulls_fixture:
|
440 |
+
unique_nulls_fixture2 = unique_nulls_fixture
|
441 |
+
|
442 |
+
|
443 |
+
@pytest.fixture(params=tm.NP_NAT_OBJECTS, ids=lambda x: type(x).__name__)
|
444 |
+
def np_nat_fixture(request):
|
445 |
+
"""
|
446 |
+
Fixture for each NaT type in numpy.
|
447 |
+
"""
|
448 |
+
return request.param
|
449 |
+
|
450 |
+
|
451 |
+
# Generate cartesian product of np_nat_fixture:
|
452 |
+
np_nat_fixture2 = np_nat_fixture
|
453 |
+
|
454 |
+
|
455 |
+
# ----------------------------------------------------------------
|
456 |
+
# Classes
|
457 |
+
# ----------------------------------------------------------------
|
458 |
+
|
459 |
+
|
460 |
+
@pytest.fixture(params=[DataFrame, Series])
|
461 |
+
def frame_or_series(request):
|
462 |
+
"""
|
463 |
+
Fixture to parametrize over DataFrame and Series.
|
464 |
+
"""
|
465 |
+
return request.param
|
466 |
+
|
467 |
+
|
468 |
+
@pytest.fixture(params=[Index, Series], ids=["index", "series"])
|
469 |
+
def index_or_series(request):
|
470 |
+
"""
|
471 |
+
Fixture to parametrize over Index and Series, made necessary by a mypy
|
472 |
+
bug, giving an error:
|
473 |
+
|
474 |
+
List item 0 has incompatible type "Type[Series]"; expected "Type[PandasObject]"
|
475 |
+
|
476 |
+
See GH#29725
|
477 |
+
"""
|
478 |
+
return request.param
|
479 |
+
|
480 |
+
|
481 |
+
# Generate cartesian product of index_or_series fixture:
|
482 |
+
index_or_series2 = index_or_series
|
483 |
+
|
484 |
+
|
485 |
+
@pytest.fixture(params=[Index, Series, pd.array], ids=["index", "series", "array"])
|
486 |
+
def index_or_series_or_array(request):
|
487 |
+
"""
|
488 |
+
Fixture to parametrize over Index, Series, and ExtensionArray
|
489 |
+
"""
|
490 |
+
return request.param
|
491 |
+
|
492 |
+
|
493 |
+
@pytest.fixture(params=[Index, Series, DataFrame, pd.array], ids=lambda x: x.__name__)
|
494 |
+
def box_with_array(request):
|
495 |
+
"""
|
496 |
+
Fixture to test behavior for Index, Series, DataFrame, and pandas Array
|
497 |
+
classes
|
498 |
+
"""
|
499 |
+
return request.param
|
500 |
+
|
501 |
+
|
502 |
+
box_with_array2 = box_with_array
|
503 |
+
|
504 |
+
|
505 |
+
@pytest.fixture
|
506 |
+
def dict_subclass() -> type[dict]:
|
507 |
+
"""
|
508 |
+
Fixture for a dictionary subclass.
|
509 |
+
"""
|
510 |
+
|
511 |
+
class TestSubDict(dict):
|
512 |
+
def __init__(self, *args, **kwargs) -> None:
|
513 |
+
dict.__init__(self, *args, **kwargs)
|
514 |
+
|
515 |
+
return TestSubDict
|
516 |
+
|
517 |
+
|
518 |
+
@pytest.fixture
|
519 |
+
def non_dict_mapping_subclass() -> type[abc.Mapping]:
|
520 |
+
"""
|
521 |
+
Fixture for a non-mapping dictionary subclass.
|
522 |
+
"""
|
523 |
+
|
524 |
+
class TestNonDictMapping(abc.Mapping):
|
525 |
+
def __init__(self, underlying_dict) -> None:
|
526 |
+
self._data = underlying_dict
|
527 |
+
|
528 |
+
def __getitem__(self, key):
|
529 |
+
return self._data.__getitem__(key)
|
530 |
+
|
531 |
+
def __iter__(self) -> Iterator:
|
532 |
+
return self._data.__iter__()
|
533 |
+
|
534 |
+
def __len__(self) -> int:
|
535 |
+
return self._data.__len__()
|
536 |
+
|
537 |
+
return TestNonDictMapping
|
538 |
+
|
539 |
+
|
540 |
+
# ----------------------------------------------------------------
|
541 |
+
# Indices
|
542 |
+
# ----------------------------------------------------------------
|
543 |
+
@pytest.fixture
|
544 |
+
def multiindex_year_month_day_dataframe_random_data():
|
545 |
+
"""
|
546 |
+
DataFrame with 3 level MultiIndex (year, month, day) covering
|
547 |
+
first 100 business days from 2000-01-01 with random data
|
548 |
+
"""
|
549 |
+
tdf = DataFrame(
|
550 |
+
np.random.default_rng(2).standard_normal((100, 4)),
|
551 |
+
columns=Index(list("ABCD"), dtype=object),
|
552 |
+
index=date_range("2000-01-01", periods=100, freq="B"),
|
553 |
+
)
|
554 |
+
ymd = tdf.groupby([lambda x: x.year, lambda x: x.month, lambda x: x.day]).sum()
|
555 |
+
# use int64 Index, to make sure things work
|
556 |
+
ymd.index = ymd.index.set_levels([lev.astype("i8") for lev in ymd.index.levels])
|
557 |
+
ymd.index.set_names(["year", "month", "day"], inplace=True)
|
558 |
+
return ymd
|
559 |
+
|
560 |
+
|
561 |
+
@pytest.fixture
|
562 |
+
def lexsorted_two_level_string_multiindex() -> MultiIndex:
|
563 |
+
"""
|
564 |
+
2-level MultiIndex, lexsorted, with string names.
|
565 |
+
"""
|
566 |
+
return MultiIndex(
|
567 |
+
levels=[["foo", "bar", "baz", "qux"], ["one", "two", "three"]],
|
568 |
+
codes=[[0, 0, 0, 1, 1, 2, 2, 3, 3, 3], [0, 1, 2, 0, 1, 1, 2, 0, 1, 2]],
|
569 |
+
names=["first", "second"],
|
570 |
+
)
|
571 |
+
|
572 |
+
|
573 |
+
@pytest.fixture
|
574 |
+
def multiindex_dataframe_random_data(
|
575 |
+
lexsorted_two_level_string_multiindex,
|
576 |
+
) -> DataFrame:
|
577 |
+
"""DataFrame with 2 level MultiIndex with random data"""
|
578 |
+
index = lexsorted_two_level_string_multiindex
|
579 |
+
return DataFrame(
|
580 |
+
np.random.default_rng(2).standard_normal((10, 3)),
|
581 |
+
index=index,
|
582 |
+
columns=Index(["A", "B", "C"], name="exp"),
|
583 |
+
)
|
584 |
+
|
585 |
+
|
586 |
+
def _create_multiindex():
|
587 |
+
"""
|
588 |
+
MultiIndex used to test the general functionality of this object
|
589 |
+
"""
|
590 |
+
|
591 |
+
# See Also: tests.multi.conftest.idx
|
592 |
+
major_axis = Index(["foo", "bar", "baz", "qux"])
|
593 |
+
minor_axis = Index(["one", "two"])
|
594 |
+
|
595 |
+
major_codes = np.array([0, 0, 1, 2, 3, 3])
|
596 |
+
minor_codes = np.array([0, 1, 0, 1, 0, 1])
|
597 |
+
index_names = ["first", "second"]
|
598 |
+
return MultiIndex(
|
599 |
+
levels=[major_axis, minor_axis],
|
600 |
+
codes=[major_codes, minor_codes],
|
601 |
+
names=index_names,
|
602 |
+
verify_integrity=False,
|
603 |
+
)
|
604 |
+
|
605 |
+
|
606 |
+
def _create_mi_with_dt64tz_level():
|
607 |
+
"""
|
608 |
+
MultiIndex with a level that is a tzaware DatetimeIndex.
|
609 |
+
"""
|
610 |
+
# GH#8367 round trip with pickle
|
611 |
+
return MultiIndex.from_product(
|
612 |
+
[[1, 2], ["a", "b"], date_range("20130101", periods=3, tz="US/Eastern")],
|
613 |
+
names=["one", "two", "three"],
|
614 |
+
)
|
615 |
+
|
616 |
+
|
617 |
+
indices_dict = {
|
618 |
+
"string": Index([f"pandas_{i}" for i in range(100)]),
|
619 |
+
"datetime": date_range("2020-01-01", periods=100),
|
620 |
+
"datetime-tz": date_range("2020-01-01", periods=100, tz="US/Pacific"),
|
621 |
+
"period": period_range("2020-01-01", periods=100, freq="D"),
|
622 |
+
"timedelta": timedelta_range(start="1 day", periods=100, freq="D"),
|
623 |
+
"range": RangeIndex(100),
|
624 |
+
"int8": Index(np.arange(100), dtype="int8"),
|
625 |
+
"int16": Index(np.arange(100), dtype="int16"),
|
626 |
+
"int32": Index(np.arange(100), dtype="int32"),
|
627 |
+
"int64": Index(np.arange(100), dtype="int64"),
|
628 |
+
"uint8": Index(np.arange(100), dtype="uint8"),
|
629 |
+
"uint16": Index(np.arange(100), dtype="uint16"),
|
630 |
+
"uint32": Index(np.arange(100), dtype="uint32"),
|
631 |
+
"uint64": Index(np.arange(100), dtype="uint64"),
|
632 |
+
"float32": Index(np.arange(100), dtype="float32"),
|
633 |
+
"float64": Index(np.arange(100), dtype="float64"),
|
634 |
+
"bool-object": Index([True, False] * 5, dtype=object),
|
635 |
+
"bool-dtype": Index([True, False] * 5, dtype=bool),
|
636 |
+
"complex64": Index(
|
637 |
+
np.arange(100, dtype="complex64") + 1.0j * np.arange(100, dtype="complex64")
|
638 |
+
),
|
639 |
+
"complex128": Index(
|
640 |
+
np.arange(100, dtype="complex128") + 1.0j * np.arange(100, dtype="complex128")
|
641 |
+
),
|
642 |
+
"categorical": CategoricalIndex(list("abcd") * 25),
|
643 |
+
"interval": IntervalIndex.from_breaks(np.linspace(0, 100, num=101)),
|
644 |
+
"empty": Index([]),
|
645 |
+
"tuples": MultiIndex.from_tuples(zip(["foo", "bar", "baz"], [1, 2, 3])),
|
646 |
+
"mi-with-dt64tz-level": _create_mi_with_dt64tz_level(),
|
647 |
+
"multi": _create_multiindex(),
|
648 |
+
"repeats": Index([0, 0, 1, 1, 2, 2]),
|
649 |
+
"nullable_int": Index(np.arange(100), dtype="Int64"),
|
650 |
+
"nullable_uint": Index(np.arange(100), dtype="UInt16"),
|
651 |
+
"nullable_float": Index(np.arange(100), dtype="Float32"),
|
652 |
+
"nullable_bool": Index(np.arange(100).astype(bool), dtype="boolean"),
|
653 |
+
"string-python": Index(
|
654 |
+
pd.array([f"pandas_{i}" for i in range(100)], dtype="string[python]")
|
655 |
+
),
|
656 |
+
}
|
657 |
+
if has_pyarrow:
|
658 |
+
idx = Index(pd.array([f"pandas_{i}" for i in range(100)], dtype="string[pyarrow]"))
|
659 |
+
indices_dict["string-pyarrow"] = idx
|
660 |
+
|
661 |
+
|
662 |
+
@pytest.fixture(params=indices_dict.keys())
|
663 |
+
def index(request):
|
664 |
+
"""
|
665 |
+
Fixture for many "simple" kinds of indices.
|
666 |
+
|
667 |
+
These indices are unlikely to cover corner cases, e.g.
|
668 |
+
- no names
|
669 |
+
- no NaTs/NaNs
|
670 |
+
- no values near implementation bounds
|
671 |
+
- ...
|
672 |
+
"""
|
673 |
+
# copy to avoid mutation, e.g. setting .name
|
674 |
+
return indices_dict[request.param].copy()
|
675 |
+
|
676 |
+
|
677 |
+
# Needed to generate cartesian product of indices
|
678 |
+
index_fixture2 = index
|
679 |
+
|
680 |
+
|
681 |
+
@pytest.fixture(
|
682 |
+
params=[
|
683 |
+
key for key, value in indices_dict.items() if not isinstance(value, MultiIndex)
|
684 |
+
]
|
685 |
+
)
|
686 |
+
def index_flat(request):
|
687 |
+
"""
|
688 |
+
index fixture, but excluding MultiIndex cases.
|
689 |
+
"""
|
690 |
+
key = request.param
|
691 |
+
return indices_dict[key].copy()
|
692 |
+
|
693 |
+
|
694 |
+
# Alias so we can test with cartesian product of index_flat
|
695 |
+
index_flat2 = index_flat
|
696 |
+
|
697 |
+
|
698 |
+
@pytest.fixture(
|
699 |
+
params=[
|
700 |
+
key
|
701 |
+
for key, value in indices_dict.items()
|
702 |
+
if not (
|
703 |
+
key.startswith(("int", "uint", "float"))
|
704 |
+
or key in ["range", "empty", "repeats", "bool-dtype"]
|
705 |
+
)
|
706 |
+
and not isinstance(value, MultiIndex)
|
707 |
+
]
|
708 |
+
)
|
709 |
+
def index_with_missing(request):
|
710 |
+
"""
|
711 |
+
Fixture for indices with missing values.
|
712 |
+
|
713 |
+
Integer-dtype and empty cases are excluded because they cannot hold missing
|
714 |
+
values.
|
715 |
+
|
716 |
+
MultiIndex is excluded because isna() is not defined for MultiIndex.
|
717 |
+
"""
|
718 |
+
|
719 |
+
# GH 35538. Use deep copy to avoid illusive bug on np-dev
|
720 |
+
# GHA pipeline that writes into indices_dict despite copy
|
721 |
+
ind = indices_dict[request.param].copy(deep=True)
|
722 |
+
vals = ind.values.copy()
|
723 |
+
if request.param in ["tuples", "mi-with-dt64tz-level", "multi"]:
|
724 |
+
# For setting missing values in the top level of MultiIndex
|
725 |
+
vals = ind.tolist()
|
726 |
+
vals[0] = (None,) + vals[0][1:]
|
727 |
+
vals[-1] = (None,) + vals[-1][1:]
|
728 |
+
return MultiIndex.from_tuples(vals)
|
729 |
+
else:
|
730 |
+
vals[0] = None
|
731 |
+
vals[-1] = None
|
732 |
+
return type(ind)(vals)
|
733 |
+
|
734 |
+
|
735 |
+
# ----------------------------------------------------------------
|
736 |
+
# Series'
|
737 |
+
# ----------------------------------------------------------------
|
738 |
+
@pytest.fixture
|
739 |
+
def string_series() -> Series:
|
740 |
+
"""
|
741 |
+
Fixture for Series of floats with Index of unique strings
|
742 |
+
"""
|
743 |
+
return Series(
|
744 |
+
np.arange(30, dtype=np.float64) * 1.1,
|
745 |
+
index=Index([f"i_{i}" for i in range(30)], dtype=object),
|
746 |
+
name="series",
|
747 |
+
)
|
748 |
+
|
749 |
+
|
750 |
+
@pytest.fixture
|
751 |
+
def object_series() -> Series:
|
752 |
+
"""
|
753 |
+
Fixture for Series of dtype object with Index of unique strings
|
754 |
+
"""
|
755 |
+
data = [f"foo_{i}" for i in range(30)]
|
756 |
+
index = Index([f"bar_{i}" for i in range(30)], dtype=object)
|
757 |
+
return Series(data, index=index, name="objects", dtype=object)
|
758 |
+
|
759 |
+
|
760 |
+
@pytest.fixture
|
761 |
+
def datetime_series() -> Series:
|
762 |
+
"""
|
763 |
+
Fixture for Series of floats with DatetimeIndex
|
764 |
+
"""
|
765 |
+
return Series(
|
766 |
+
np.random.default_rng(2).standard_normal(30),
|
767 |
+
index=date_range("2000-01-01", periods=30, freq="B"),
|
768 |
+
name="ts",
|
769 |
+
)
|
770 |
+
|
771 |
+
|
772 |
+
def _create_series(index):
|
773 |
+
"""Helper for the _series dict"""
|
774 |
+
size = len(index)
|
775 |
+
data = np.random.default_rng(2).standard_normal(size)
|
776 |
+
return Series(data, index=index, name="a", copy=False)
|
777 |
+
|
778 |
+
|
779 |
+
_series = {
|
780 |
+
f"series-with-{index_id}-index": _create_series(index)
|
781 |
+
for index_id, index in indices_dict.items()
|
782 |
+
}
|
783 |
+
|
784 |
+
|
785 |
+
@pytest.fixture
|
786 |
+
def series_with_simple_index(index) -> Series:
|
787 |
+
"""
|
788 |
+
Fixture for tests on series with changing types of indices.
|
789 |
+
"""
|
790 |
+
return _create_series(index)
|
791 |
+
|
792 |
+
|
793 |
+
_narrow_series = {
|
794 |
+
f"{dtype.__name__}-series": Series(
|
795 |
+
range(30), index=[f"i-{i}" for i in range(30)], name="a", dtype=dtype
|
796 |
+
)
|
797 |
+
for dtype in tm.NARROW_NP_DTYPES
|
798 |
+
}
|
799 |
+
|
800 |
+
|
801 |
+
_index_or_series_objs = {**indices_dict, **_series, **_narrow_series}
|
802 |
+
|
803 |
+
|
804 |
+
@pytest.fixture(params=_index_or_series_objs.keys())
|
805 |
+
def index_or_series_obj(request):
|
806 |
+
"""
|
807 |
+
Fixture for tests on indexes, series and series with a narrow dtype
|
808 |
+
copy to avoid mutation, e.g. setting .name
|
809 |
+
"""
|
810 |
+
return _index_or_series_objs[request.param].copy(deep=True)
|
811 |
+
|
812 |
+
|
813 |
+
_typ_objects_series = {
|
814 |
+
f"{dtype.__name__}-series": Series(dtype) for dtype in tm.PYTHON_DATA_TYPES
|
815 |
+
}
|
816 |
+
|
817 |
+
|
818 |
+
_index_or_series_memory_objs = {
|
819 |
+
**indices_dict,
|
820 |
+
**_series,
|
821 |
+
**_narrow_series,
|
822 |
+
**_typ_objects_series,
|
823 |
+
}
|
824 |
+
|
825 |
+
|
826 |
+
@pytest.fixture(params=_index_or_series_memory_objs.keys())
|
827 |
+
def index_or_series_memory_obj(request):
|
828 |
+
"""
|
829 |
+
Fixture for tests on indexes, series, series with a narrow dtype and
|
830 |
+
series with empty objects type
|
831 |
+
copy to avoid mutation, e.g. setting .name
|
832 |
+
"""
|
833 |
+
return _index_or_series_memory_objs[request.param].copy(deep=True)
|
834 |
+
|
835 |
+
|
836 |
+
# ----------------------------------------------------------------
|
837 |
+
# DataFrames
|
838 |
+
# ----------------------------------------------------------------
|
839 |
+
@pytest.fixture
|
840 |
+
def int_frame() -> DataFrame:
|
841 |
+
"""
|
842 |
+
Fixture for DataFrame of ints with index of unique strings
|
843 |
+
|
844 |
+
Columns are ['A', 'B', 'C', 'D']
|
845 |
+
"""
|
846 |
+
return DataFrame(
|
847 |
+
np.ones((30, 4), dtype=np.int64),
|
848 |
+
index=Index([f"foo_{i}" for i in range(30)], dtype=object),
|
849 |
+
columns=Index(list("ABCD"), dtype=object),
|
850 |
+
)
|
851 |
+
|
852 |
+
|
853 |
+
@pytest.fixture
|
854 |
+
def float_frame() -> DataFrame:
|
855 |
+
"""
|
856 |
+
Fixture for DataFrame of floats with index of unique strings
|
857 |
+
|
858 |
+
Columns are ['A', 'B', 'C', 'D'].
|
859 |
+
"""
|
860 |
+
return DataFrame(
|
861 |
+
np.random.default_rng(2).standard_normal((30, 4)),
|
862 |
+
index=Index([f"foo_{i}" for i in range(30)]),
|
863 |
+
columns=Index(list("ABCD")),
|
864 |
+
)
|
865 |
+
|
866 |
+
|
867 |
+
@pytest.fixture
|
868 |
+
def rand_series_with_duplicate_datetimeindex() -> Series:
|
869 |
+
"""
|
870 |
+
Fixture for Series with a DatetimeIndex that has duplicates.
|
871 |
+
"""
|
872 |
+
dates = [
|
873 |
+
datetime(2000, 1, 2),
|
874 |
+
datetime(2000, 1, 2),
|
875 |
+
datetime(2000, 1, 2),
|
876 |
+
datetime(2000, 1, 3),
|
877 |
+
datetime(2000, 1, 3),
|
878 |
+
datetime(2000, 1, 3),
|
879 |
+
datetime(2000, 1, 4),
|
880 |
+
datetime(2000, 1, 4),
|
881 |
+
datetime(2000, 1, 4),
|
882 |
+
datetime(2000, 1, 5),
|
883 |
+
]
|
884 |
+
|
885 |
+
return Series(np.random.default_rng(2).standard_normal(len(dates)), index=dates)
|
886 |
+
|
887 |
+
|
888 |
+
# ----------------------------------------------------------------
|
889 |
+
# Scalars
|
890 |
+
# ----------------------------------------------------------------
|
891 |
+
@pytest.fixture(
|
892 |
+
params=[
|
893 |
+
(Interval(left=0, right=5), IntervalDtype("int64", "right")),
|
894 |
+
(Interval(left=0.1, right=0.5), IntervalDtype("float64", "right")),
|
895 |
+
(Period("2012-01", freq="M"), "period[M]"),
|
896 |
+
(Period("2012-02-01", freq="D"), "period[D]"),
|
897 |
+
(
|
898 |
+
Timestamp("2011-01-01", tz="US/Eastern"),
|
899 |
+
DatetimeTZDtype(unit="s", tz="US/Eastern"),
|
900 |
+
),
|
901 |
+
(Timedelta(seconds=500), "timedelta64[ns]"),
|
902 |
+
]
|
903 |
+
)
|
904 |
+
def ea_scalar_and_dtype(request):
|
905 |
+
return request.param
|
906 |
+
|
907 |
+
|
908 |
+
# ----------------------------------------------------------------
|
909 |
+
# Operators & Operations
|
910 |
+
# ----------------------------------------------------------------
|
911 |
+
|
912 |
+
|
913 |
+
@pytest.fixture(params=tm.arithmetic_dunder_methods)
|
914 |
+
def all_arithmetic_operators(request):
|
915 |
+
"""
|
916 |
+
Fixture for dunder names for common arithmetic operations.
|
917 |
+
"""
|
918 |
+
return request.param
|
919 |
+
|
920 |
+
|
921 |
+
@pytest.fixture(
|
922 |
+
params=[
|
923 |
+
operator.add,
|
924 |
+
ops.radd,
|
925 |
+
operator.sub,
|
926 |
+
ops.rsub,
|
927 |
+
operator.mul,
|
928 |
+
ops.rmul,
|
929 |
+
operator.truediv,
|
930 |
+
ops.rtruediv,
|
931 |
+
operator.floordiv,
|
932 |
+
ops.rfloordiv,
|
933 |
+
operator.mod,
|
934 |
+
ops.rmod,
|
935 |
+
operator.pow,
|
936 |
+
ops.rpow,
|
937 |
+
operator.eq,
|
938 |
+
operator.ne,
|
939 |
+
operator.lt,
|
940 |
+
operator.le,
|
941 |
+
operator.gt,
|
942 |
+
operator.ge,
|
943 |
+
operator.and_,
|
944 |
+
ops.rand_,
|
945 |
+
operator.xor,
|
946 |
+
ops.rxor,
|
947 |
+
operator.or_,
|
948 |
+
ops.ror_,
|
949 |
+
]
|
950 |
+
)
|
951 |
+
def all_binary_operators(request):
|
952 |
+
"""
|
953 |
+
Fixture for operator and roperator arithmetic, comparison, and logical ops.
|
954 |
+
"""
|
955 |
+
return request.param
|
956 |
+
|
957 |
+
|
958 |
+
@pytest.fixture(
|
959 |
+
params=[
|
960 |
+
operator.add,
|
961 |
+
ops.radd,
|
962 |
+
operator.sub,
|
963 |
+
ops.rsub,
|
964 |
+
operator.mul,
|
965 |
+
ops.rmul,
|
966 |
+
operator.truediv,
|
967 |
+
ops.rtruediv,
|
968 |
+
operator.floordiv,
|
969 |
+
ops.rfloordiv,
|
970 |
+
operator.mod,
|
971 |
+
ops.rmod,
|
972 |
+
operator.pow,
|
973 |
+
ops.rpow,
|
974 |
+
]
|
975 |
+
)
|
976 |
+
def all_arithmetic_functions(request):
|
977 |
+
"""
|
978 |
+
Fixture for operator and roperator arithmetic functions.
|
979 |
+
|
980 |
+
Notes
|
981 |
+
-----
|
982 |
+
This includes divmod and rdivmod, whereas all_arithmetic_operators
|
983 |
+
does not.
|
984 |
+
"""
|
985 |
+
return request.param
|
986 |
+
|
987 |
+
|
988 |
+
_all_numeric_reductions = [
|
989 |
+
"count",
|
990 |
+
"sum",
|
991 |
+
"max",
|
992 |
+
"min",
|
993 |
+
"mean",
|
994 |
+
"prod",
|
995 |
+
"std",
|
996 |
+
"var",
|
997 |
+
"median",
|
998 |
+
"kurt",
|
999 |
+
"skew",
|
1000 |
+
"sem",
|
1001 |
+
]
|
1002 |
+
|
1003 |
+
|
1004 |
+
@pytest.fixture(params=_all_numeric_reductions)
|
1005 |
+
def all_numeric_reductions(request):
|
1006 |
+
"""
|
1007 |
+
Fixture for numeric reduction names.
|
1008 |
+
"""
|
1009 |
+
return request.param
|
1010 |
+
|
1011 |
+
|
1012 |
+
_all_boolean_reductions = ["all", "any"]
|
1013 |
+
|
1014 |
+
|
1015 |
+
@pytest.fixture(params=_all_boolean_reductions)
|
1016 |
+
def all_boolean_reductions(request):
|
1017 |
+
"""
|
1018 |
+
Fixture for boolean reduction names.
|
1019 |
+
"""
|
1020 |
+
return request.param
|
1021 |
+
|
1022 |
+
|
1023 |
+
_all_reductions = _all_numeric_reductions + _all_boolean_reductions
|
1024 |
+
|
1025 |
+
|
1026 |
+
@pytest.fixture(params=_all_reductions)
|
1027 |
+
def all_reductions(request):
|
1028 |
+
"""
|
1029 |
+
Fixture for all (boolean + numeric) reduction names.
|
1030 |
+
"""
|
1031 |
+
return request.param
|
1032 |
+
|
1033 |
+
|
1034 |
+
@pytest.fixture(
|
1035 |
+
params=[
|
1036 |
+
operator.eq,
|
1037 |
+
operator.ne,
|
1038 |
+
operator.gt,
|
1039 |
+
operator.ge,
|
1040 |
+
operator.lt,
|
1041 |
+
operator.le,
|
1042 |
+
]
|
1043 |
+
)
|
1044 |
+
def comparison_op(request):
|
1045 |
+
"""
|
1046 |
+
Fixture for operator module comparison functions.
|
1047 |
+
"""
|
1048 |
+
return request.param
|
1049 |
+
|
1050 |
+
|
1051 |
+
@pytest.fixture(params=["__le__", "__lt__", "__ge__", "__gt__"])
|
1052 |
+
def compare_operators_no_eq_ne(request):
|
1053 |
+
"""
|
1054 |
+
Fixture for dunder names for compare operations except == and !=
|
1055 |
+
|
1056 |
+
* >=
|
1057 |
+
* >
|
1058 |
+
* <
|
1059 |
+
* <=
|
1060 |
+
"""
|
1061 |
+
return request.param
|
1062 |
+
|
1063 |
+
|
1064 |
+
@pytest.fixture(
|
1065 |
+
params=["__and__", "__rand__", "__or__", "__ror__", "__xor__", "__rxor__"]
|
1066 |
+
)
|
1067 |
+
def all_logical_operators(request):
|
1068 |
+
"""
|
1069 |
+
Fixture for dunder names for common logical operations
|
1070 |
+
|
1071 |
+
* |
|
1072 |
+
* &
|
1073 |
+
* ^
|
1074 |
+
"""
|
1075 |
+
return request.param
|
1076 |
+
|
1077 |
+
|
1078 |
+
_all_numeric_accumulations = ["cumsum", "cumprod", "cummin", "cummax"]
|
1079 |
+
|
1080 |
+
|
1081 |
+
@pytest.fixture(params=_all_numeric_accumulations)
|
1082 |
+
def all_numeric_accumulations(request):
|
1083 |
+
"""
|
1084 |
+
Fixture for numeric accumulation names
|
1085 |
+
"""
|
1086 |
+
return request.param
|
1087 |
+
|
1088 |
+
|
1089 |
+
# ----------------------------------------------------------------
|
1090 |
+
# Data sets/files
|
1091 |
+
# ----------------------------------------------------------------
|
1092 |
+
@pytest.fixture
|
1093 |
+
def strict_data_files(pytestconfig):
|
1094 |
+
"""
|
1095 |
+
Returns the configuration for the test setting `--no-strict-data-files`.
|
1096 |
+
"""
|
1097 |
+
return pytestconfig.getoption("--no-strict-data-files")
|
1098 |
+
|
1099 |
+
|
1100 |
+
@pytest.fixture
|
1101 |
+
def datapath(strict_data_files: str) -> Callable[..., str]:
|
1102 |
+
"""
|
1103 |
+
Get the path to a data file.
|
1104 |
+
|
1105 |
+
Parameters
|
1106 |
+
----------
|
1107 |
+
path : str
|
1108 |
+
Path to the file, relative to ``pandas/tests/``
|
1109 |
+
|
1110 |
+
Returns
|
1111 |
+
-------
|
1112 |
+
path including ``pandas/tests``.
|
1113 |
+
|
1114 |
+
Raises
|
1115 |
+
------
|
1116 |
+
ValueError
|
1117 |
+
If the path doesn't exist and the --no-strict-data-files option is not set.
|
1118 |
+
"""
|
1119 |
+
BASE_PATH = os.path.join(os.path.dirname(__file__), "tests")
|
1120 |
+
|
1121 |
+
def deco(*args):
|
1122 |
+
path = os.path.join(BASE_PATH, *args)
|
1123 |
+
if not os.path.exists(path):
|
1124 |
+
if strict_data_files:
|
1125 |
+
raise ValueError(
|
1126 |
+
f"Could not find file {path} and --no-strict-data-files is not set."
|
1127 |
+
)
|
1128 |
+
pytest.skip(f"Could not find {path}.")
|
1129 |
+
return path
|
1130 |
+
|
1131 |
+
return deco
|
1132 |
+
|
1133 |
+
|
1134 |
+
# ----------------------------------------------------------------
|
1135 |
+
# Time zones
|
1136 |
+
# ----------------------------------------------------------------
|
1137 |
+
TIMEZONES = [
|
1138 |
+
None,
|
1139 |
+
"UTC",
|
1140 |
+
"US/Eastern",
|
1141 |
+
"Asia/Tokyo",
|
1142 |
+
"dateutil/US/Pacific",
|
1143 |
+
"dateutil/Asia/Singapore",
|
1144 |
+
"+01:15",
|
1145 |
+
"-02:15",
|
1146 |
+
"UTC+01:15",
|
1147 |
+
"UTC-02:15",
|
1148 |
+
tzutc(),
|
1149 |
+
tzlocal(),
|
1150 |
+
FixedOffset(300),
|
1151 |
+
FixedOffset(0),
|
1152 |
+
FixedOffset(-300),
|
1153 |
+
timezone.utc,
|
1154 |
+
timezone(timedelta(hours=1)),
|
1155 |
+
timezone(timedelta(hours=-1), name="foo"),
|
1156 |
+
]
|
1157 |
+
if zoneinfo is not None:
|
1158 |
+
TIMEZONES.extend(
|
1159 |
+
[
|
1160 |
+
zoneinfo.ZoneInfo("US/Pacific"), # type: ignore[list-item]
|
1161 |
+
zoneinfo.ZoneInfo("UTC"), # type: ignore[list-item]
|
1162 |
+
]
|
1163 |
+
)
|
1164 |
+
TIMEZONE_IDS = [repr(i) for i in TIMEZONES]
|
1165 |
+
|
1166 |
+
|
1167 |
+
@td.parametrize_fixture_doc(str(TIMEZONE_IDS))
|
1168 |
+
@pytest.fixture(params=TIMEZONES, ids=TIMEZONE_IDS)
|
1169 |
+
def tz_naive_fixture(request):
|
1170 |
+
"""
|
1171 |
+
Fixture for trying timezones including default (None): {0}
|
1172 |
+
"""
|
1173 |
+
return request.param
|
1174 |
+
|
1175 |
+
|
1176 |
+
@td.parametrize_fixture_doc(str(TIMEZONE_IDS[1:]))
|
1177 |
+
@pytest.fixture(params=TIMEZONES[1:], ids=TIMEZONE_IDS[1:])
|
1178 |
+
def tz_aware_fixture(request):
|
1179 |
+
"""
|
1180 |
+
Fixture for trying explicit timezones: {0}
|
1181 |
+
"""
|
1182 |
+
return request.param
|
1183 |
+
|
1184 |
+
|
1185 |
+
# Generate cartesian product of tz_aware_fixture:
|
1186 |
+
tz_aware_fixture2 = tz_aware_fixture
|
1187 |
+
|
1188 |
+
|
1189 |
+
_UTCS = ["utc", "dateutil/UTC", utc, tzutc(), timezone.utc]
|
1190 |
+
if zoneinfo is not None:
|
1191 |
+
_UTCS.append(zoneinfo.ZoneInfo("UTC"))
|
1192 |
+
|
1193 |
+
|
1194 |
+
@pytest.fixture(params=_UTCS)
|
1195 |
+
def utc_fixture(request):
|
1196 |
+
"""
|
1197 |
+
Fixture to provide variants of UTC timezone strings and tzinfo objects.
|
1198 |
+
"""
|
1199 |
+
return request.param
|
1200 |
+
|
1201 |
+
|
1202 |
+
utc_fixture2 = utc_fixture
|
1203 |
+
|
1204 |
+
|
1205 |
+
@pytest.fixture(params=["s", "ms", "us", "ns"])
|
1206 |
+
def unit(request):
|
1207 |
+
"""
|
1208 |
+
datetime64 units we support.
|
1209 |
+
"""
|
1210 |
+
return request.param
|
1211 |
+
|
1212 |
+
|
1213 |
+
unit2 = unit
|
1214 |
+
|
1215 |
+
|
1216 |
+
# ----------------------------------------------------------------
|
1217 |
+
# Dtypes
|
1218 |
+
# ----------------------------------------------------------------
|
1219 |
+
@pytest.fixture(params=tm.STRING_DTYPES)
|
1220 |
+
def string_dtype(request):
|
1221 |
+
"""
|
1222 |
+
Parametrized fixture for string dtypes.
|
1223 |
+
|
1224 |
+
* str
|
1225 |
+
* 'str'
|
1226 |
+
* 'U'
|
1227 |
+
"""
|
1228 |
+
return request.param
|
1229 |
+
|
1230 |
+
|
1231 |
+
@pytest.fixture(
|
1232 |
+
params=[
|
1233 |
+
"string[python]",
|
1234 |
+
pytest.param("string[pyarrow]", marks=td.skip_if_no("pyarrow")),
|
1235 |
+
]
|
1236 |
+
)
|
1237 |
+
def nullable_string_dtype(request):
|
1238 |
+
"""
|
1239 |
+
Parametrized fixture for string dtypes.
|
1240 |
+
|
1241 |
+
* 'string[python]'
|
1242 |
+
* 'string[pyarrow]'
|
1243 |
+
"""
|
1244 |
+
return request.param
|
1245 |
+
|
1246 |
+
|
1247 |
+
@pytest.fixture(
|
1248 |
+
params=[
|
1249 |
+
"python",
|
1250 |
+
pytest.param("pyarrow", marks=td.skip_if_no("pyarrow")),
|
1251 |
+
pytest.param("pyarrow_numpy", marks=td.skip_if_no("pyarrow")),
|
1252 |
+
]
|
1253 |
+
)
|
1254 |
+
def string_storage(request):
|
1255 |
+
"""
|
1256 |
+
Parametrized fixture for pd.options.mode.string_storage.
|
1257 |
+
|
1258 |
+
* 'python'
|
1259 |
+
* 'pyarrow'
|
1260 |
+
* 'pyarrow_numpy'
|
1261 |
+
"""
|
1262 |
+
return request.param
|
1263 |
+
|
1264 |
+
|
1265 |
+
@pytest.fixture(
|
1266 |
+
params=[
|
1267 |
+
"numpy_nullable",
|
1268 |
+
pytest.param("pyarrow", marks=td.skip_if_no("pyarrow")),
|
1269 |
+
]
|
1270 |
+
)
|
1271 |
+
def dtype_backend(request):
|
1272 |
+
"""
|
1273 |
+
Parametrized fixture for pd.options.mode.string_storage.
|
1274 |
+
|
1275 |
+
* 'python'
|
1276 |
+
* 'pyarrow'
|
1277 |
+
"""
|
1278 |
+
return request.param
|
1279 |
+
|
1280 |
+
|
1281 |
+
# Alias so we can test with cartesian product of string_storage
|
1282 |
+
string_storage2 = string_storage
|
1283 |
+
|
1284 |
+
|
1285 |
+
@pytest.fixture(params=tm.BYTES_DTYPES)
|
1286 |
+
def bytes_dtype(request):
|
1287 |
+
"""
|
1288 |
+
Parametrized fixture for bytes dtypes.
|
1289 |
+
|
1290 |
+
* bytes
|
1291 |
+
* 'bytes'
|
1292 |
+
"""
|
1293 |
+
return request.param
|
1294 |
+
|
1295 |
+
|
1296 |
+
@pytest.fixture(params=tm.OBJECT_DTYPES)
|
1297 |
+
def object_dtype(request):
|
1298 |
+
"""
|
1299 |
+
Parametrized fixture for object dtypes.
|
1300 |
+
|
1301 |
+
* object
|
1302 |
+
* 'object'
|
1303 |
+
"""
|
1304 |
+
return request.param
|
1305 |
+
|
1306 |
+
|
1307 |
+
@pytest.fixture(
|
1308 |
+
params=[
|
1309 |
+
"object",
|
1310 |
+
"string[python]",
|
1311 |
+
pytest.param("string[pyarrow]", marks=td.skip_if_no("pyarrow")),
|
1312 |
+
pytest.param("string[pyarrow_numpy]", marks=td.skip_if_no("pyarrow")),
|
1313 |
+
]
|
1314 |
+
)
|
1315 |
+
def any_string_dtype(request):
|
1316 |
+
"""
|
1317 |
+
Parametrized fixture for string dtypes.
|
1318 |
+
* 'object'
|
1319 |
+
* 'string[python]'
|
1320 |
+
* 'string[pyarrow]'
|
1321 |
+
"""
|
1322 |
+
return request.param
|
1323 |
+
|
1324 |
+
|
1325 |
+
@pytest.fixture(params=tm.DATETIME64_DTYPES)
|
1326 |
+
def datetime64_dtype(request):
|
1327 |
+
"""
|
1328 |
+
Parametrized fixture for datetime64 dtypes.
|
1329 |
+
|
1330 |
+
* 'datetime64[ns]'
|
1331 |
+
* 'M8[ns]'
|
1332 |
+
"""
|
1333 |
+
return request.param
|
1334 |
+
|
1335 |
+
|
1336 |
+
@pytest.fixture(params=tm.TIMEDELTA64_DTYPES)
|
1337 |
+
def timedelta64_dtype(request):
|
1338 |
+
"""
|
1339 |
+
Parametrized fixture for timedelta64 dtypes.
|
1340 |
+
|
1341 |
+
* 'timedelta64[ns]'
|
1342 |
+
* 'm8[ns]'
|
1343 |
+
"""
|
1344 |
+
return request.param
|
1345 |
+
|
1346 |
+
|
1347 |
+
@pytest.fixture
|
1348 |
+
def fixed_now_ts() -> Timestamp:
|
1349 |
+
"""
|
1350 |
+
Fixture emits fixed Timestamp.now()
|
1351 |
+
"""
|
1352 |
+
return Timestamp( # pyright: ignore[reportGeneralTypeIssues]
|
1353 |
+
year=2021, month=1, day=1, hour=12, minute=4, second=13, microsecond=22
|
1354 |
+
)
|
1355 |
+
|
1356 |
+
|
1357 |
+
@pytest.fixture(params=tm.FLOAT_NUMPY_DTYPES)
|
1358 |
+
def float_numpy_dtype(request):
|
1359 |
+
"""
|
1360 |
+
Parameterized fixture for float dtypes.
|
1361 |
+
|
1362 |
+
* float
|
1363 |
+
* 'float32'
|
1364 |
+
* 'float64'
|
1365 |
+
"""
|
1366 |
+
return request.param
|
1367 |
+
|
1368 |
+
|
1369 |
+
@pytest.fixture(params=tm.FLOAT_EA_DTYPES)
|
1370 |
+
def float_ea_dtype(request):
|
1371 |
+
"""
|
1372 |
+
Parameterized fixture for float dtypes.
|
1373 |
+
|
1374 |
+
* 'Float32'
|
1375 |
+
* 'Float64'
|
1376 |
+
"""
|
1377 |
+
return request.param
|
1378 |
+
|
1379 |
+
|
1380 |
+
@pytest.fixture(params=tm.ALL_FLOAT_DTYPES)
|
1381 |
+
def any_float_dtype(request):
|
1382 |
+
"""
|
1383 |
+
Parameterized fixture for float dtypes.
|
1384 |
+
|
1385 |
+
* float
|
1386 |
+
* 'float32'
|
1387 |
+
* 'float64'
|
1388 |
+
* 'Float32'
|
1389 |
+
* 'Float64'
|
1390 |
+
"""
|
1391 |
+
return request.param
|
1392 |
+
|
1393 |
+
|
1394 |
+
@pytest.fixture(params=tm.COMPLEX_DTYPES)
|
1395 |
+
def complex_dtype(request):
|
1396 |
+
"""
|
1397 |
+
Parameterized fixture for complex dtypes.
|
1398 |
+
|
1399 |
+
* complex
|
1400 |
+
* 'complex64'
|
1401 |
+
* 'complex128'
|
1402 |
+
"""
|
1403 |
+
return request.param
|
1404 |
+
|
1405 |
+
|
1406 |
+
@pytest.fixture(params=tm.SIGNED_INT_NUMPY_DTYPES)
|
1407 |
+
def any_signed_int_numpy_dtype(request):
|
1408 |
+
"""
|
1409 |
+
Parameterized fixture for signed integer dtypes.
|
1410 |
+
|
1411 |
+
* int
|
1412 |
+
* 'int8'
|
1413 |
+
* 'int16'
|
1414 |
+
* 'int32'
|
1415 |
+
* 'int64'
|
1416 |
+
"""
|
1417 |
+
return request.param
|
1418 |
+
|
1419 |
+
|
1420 |
+
@pytest.fixture(params=tm.UNSIGNED_INT_NUMPY_DTYPES)
|
1421 |
+
def any_unsigned_int_numpy_dtype(request):
|
1422 |
+
"""
|
1423 |
+
Parameterized fixture for unsigned integer dtypes.
|
1424 |
+
|
1425 |
+
* 'uint8'
|
1426 |
+
* 'uint16'
|
1427 |
+
* 'uint32'
|
1428 |
+
* 'uint64'
|
1429 |
+
"""
|
1430 |
+
return request.param
|
1431 |
+
|
1432 |
+
|
1433 |
+
@pytest.fixture(params=tm.ALL_INT_NUMPY_DTYPES)
|
1434 |
+
def any_int_numpy_dtype(request):
|
1435 |
+
"""
|
1436 |
+
Parameterized fixture for any integer dtype.
|
1437 |
+
|
1438 |
+
* int
|
1439 |
+
* 'int8'
|
1440 |
+
* 'uint8'
|
1441 |
+
* 'int16'
|
1442 |
+
* 'uint16'
|
1443 |
+
* 'int32'
|
1444 |
+
* 'uint32'
|
1445 |
+
* 'int64'
|
1446 |
+
* 'uint64'
|
1447 |
+
"""
|
1448 |
+
return request.param
|
1449 |
+
|
1450 |
+
|
1451 |
+
@pytest.fixture(params=tm.ALL_INT_EA_DTYPES)
|
1452 |
+
def any_int_ea_dtype(request):
|
1453 |
+
"""
|
1454 |
+
Parameterized fixture for any nullable integer dtype.
|
1455 |
+
|
1456 |
+
* 'UInt8'
|
1457 |
+
* 'Int8'
|
1458 |
+
* 'UInt16'
|
1459 |
+
* 'Int16'
|
1460 |
+
* 'UInt32'
|
1461 |
+
* 'Int32'
|
1462 |
+
* 'UInt64'
|
1463 |
+
* 'Int64'
|
1464 |
+
"""
|
1465 |
+
return request.param
|
1466 |
+
|
1467 |
+
|
1468 |
+
@pytest.fixture(params=tm.ALL_INT_DTYPES)
|
1469 |
+
def any_int_dtype(request):
|
1470 |
+
"""
|
1471 |
+
Parameterized fixture for any nullable integer dtype.
|
1472 |
+
|
1473 |
+
* int
|
1474 |
+
* 'int8'
|
1475 |
+
* 'uint8'
|
1476 |
+
* 'int16'
|
1477 |
+
* 'uint16'
|
1478 |
+
* 'int32'
|
1479 |
+
* 'uint32'
|
1480 |
+
* 'int64'
|
1481 |
+
* 'uint64'
|
1482 |
+
* 'UInt8'
|
1483 |
+
* 'Int8'
|
1484 |
+
* 'UInt16'
|
1485 |
+
* 'Int16'
|
1486 |
+
* 'UInt32'
|
1487 |
+
* 'Int32'
|
1488 |
+
* 'UInt64'
|
1489 |
+
* 'Int64'
|
1490 |
+
"""
|
1491 |
+
return request.param
|
1492 |
+
|
1493 |
+
|
1494 |
+
@pytest.fixture(params=tm.ALL_INT_EA_DTYPES + tm.FLOAT_EA_DTYPES)
|
1495 |
+
def any_numeric_ea_dtype(request):
|
1496 |
+
"""
|
1497 |
+
Parameterized fixture for any nullable integer dtype and
|
1498 |
+
any float ea dtypes.
|
1499 |
+
|
1500 |
+
* 'UInt8'
|
1501 |
+
* 'Int8'
|
1502 |
+
* 'UInt16'
|
1503 |
+
* 'Int16'
|
1504 |
+
* 'UInt32'
|
1505 |
+
* 'Int32'
|
1506 |
+
* 'UInt64'
|
1507 |
+
* 'Int64'
|
1508 |
+
* 'Float32'
|
1509 |
+
* 'Float64'
|
1510 |
+
"""
|
1511 |
+
return request.param
|
1512 |
+
|
1513 |
+
|
1514 |
+
# Unsupported operand types for + ("List[Union[str, ExtensionDtype, dtype[Any],
|
1515 |
+
# Type[object]]]" and "List[str]")
|
1516 |
+
@pytest.fixture(
|
1517 |
+
params=tm.ALL_INT_EA_DTYPES
|
1518 |
+
+ tm.FLOAT_EA_DTYPES
|
1519 |
+
+ tm.ALL_INT_PYARROW_DTYPES_STR_REPR
|
1520 |
+
+ tm.FLOAT_PYARROW_DTYPES_STR_REPR # type: ignore[operator]
|
1521 |
+
)
|
1522 |
+
def any_numeric_ea_and_arrow_dtype(request):
|
1523 |
+
"""
|
1524 |
+
Parameterized fixture for any nullable integer dtype and
|
1525 |
+
any float ea dtypes.
|
1526 |
+
|
1527 |
+
* 'UInt8'
|
1528 |
+
* 'Int8'
|
1529 |
+
* 'UInt16'
|
1530 |
+
* 'Int16'
|
1531 |
+
* 'UInt32'
|
1532 |
+
* 'Int32'
|
1533 |
+
* 'UInt64'
|
1534 |
+
* 'Int64'
|
1535 |
+
* 'Float32'
|
1536 |
+
* 'Float64'
|
1537 |
+
* 'uint8[pyarrow]'
|
1538 |
+
* 'int8[pyarrow]'
|
1539 |
+
* 'uint16[pyarrow]'
|
1540 |
+
* 'int16[pyarrow]'
|
1541 |
+
* 'uint32[pyarrow]'
|
1542 |
+
* 'int32[pyarrow]'
|
1543 |
+
* 'uint64[pyarrow]'
|
1544 |
+
* 'int64[pyarrow]'
|
1545 |
+
* 'float32[pyarrow]'
|
1546 |
+
* 'float64[pyarrow]'
|
1547 |
+
"""
|
1548 |
+
return request.param
|
1549 |
+
|
1550 |
+
|
1551 |
+
@pytest.fixture(params=tm.SIGNED_INT_EA_DTYPES)
|
1552 |
+
def any_signed_int_ea_dtype(request):
|
1553 |
+
"""
|
1554 |
+
Parameterized fixture for any signed nullable integer dtype.
|
1555 |
+
|
1556 |
+
* 'Int8'
|
1557 |
+
* 'Int16'
|
1558 |
+
* 'Int32'
|
1559 |
+
* 'Int64'
|
1560 |
+
"""
|
1561 |
+
return request.param
|
1562 |
+
|
1563 |
+
|
1564 |
+
@pytest.fixture(params=tm.ALL_REAL_NUMPY_DTYPES)
|
1565 |
+
def any_real_numpy_dtype(request):
|
1566 |
+
"""
|
1567 |
+
Parameterized fixture for any (purely) real numeric dtype.
|
1568 |
+
|
1569 |
+
* int
|
1570 |
+
* 'int8'
|
1571 |
+
* 'uint8'
|
1572 |
+
* 'int16'
|
1573 |
+
* 'uint16'
|
1574 |
+
* 'int32'
|
1575 |
+
* 'uint32'
|
1576 |
+
* 'int64'
|
1577 |
+
* 'uint64'
|
1578 |
+
* float
|
1579 |
+
* 'float32'
|
1580 |
+
* 'float64'
|
1581 |
+
"""
|
1582 |
+
return request.param
|
1583 |
+
|
1584 |
+
|
1585 |
+
@pytest.fixture(params=tm.ALL_REAL_DTYPES)
|
1586 |
+
def any_real_numeric_dtype(request):
|
1587 |
+
"""
|
1588 |
+
Parameterized fixture for any (purely) real numeric dtype.
|
1589 |
+
|
1590 |
+
* int
|
1591 |
+
* 'int8'
|
1592 |
+
* 'uint8'
|
1593 |
+
* 'int16'
|
1594 |
+
* 'uint16'
|
1595 |
+
* 'int32'
|
1596 |
+
* 'uint32'
|
1597 |
+
* 'int64'
|
1598 |
+
* 'uint64'
|
1599 |
+
* float
|
1600 |
+
* 'float32'
|
1601 |
+
* 'float64'
|
1602 |
+
|
1603 |
+
and associated ea dtypes.
|
1604 |
+
"""
|
1605 |
+
return request.param
|
1606 |
+
|
1607 |
+
|
1608 |
+
@pytest.fixture(params=tm.ALL_NUMPY_DTYPES)
|
1609 |
+
def any_numpy_dtype(request):
|
1610 |
+
"""
|
1611 |
+
Parameterized fixture for all numpy dtypes.
|
1612 |
+
|
1613 |
+
* bool
|
1614 |
+
* 'bool'
|
1615 |
+
* int
|
1616 |
+
* 'int8'
|
1617 |
+
* 'uint8'
|
1618 |
+
* 'int16'
|
1619 |
+
* 'uint16'
|
1620 |
+
* 'int32'
|
1621 |
+
* 'uint32'
|
1622 |
+
* 'int64'
|
1623 |
+
* 'uint64'
|
1624 |
+
* float
|
1625 |
+
* 'float32'
|
1626 |
+
* 'float64'
|
1627 |
+
* complex
|
1628 |
+
* 'complex64'
|
1629 |
+
* 'complex128'
|
1630 |
+
* str
|
1631 |
+
* 'str'
|
1632 |
+
* 'U'
|
1633 |
+
* bytes
|
1634 |
+
* 'bytes'
|
1635 |
+
* 'datetime64[ns]'
|
1636 |
+
* 'M8[ns]'
|
1637 |
+
* 'timedelta64[ns]'
|
1638 |
+
* 'm8[ns]'
|
1639 |
+
* object
|
1640 |
+
* 'object'
|
1641 |
+
"""
|
1642 |
+
return request.param
|
1643 |
+
|
1644 |
+
|
1645 |
+
@pytest.fixture(params=tm.ALL_REAL_NULLABLE_DTYPES)
|
1646 |
+
def any_real_nullable_dtype(request):
|
1647 |
+
"""
|
1648 |
+
Parameterized fixture for all real dtypes that can hold NA.
|
1649 |
+
|
1650 |
+
* float
|
1651 |
+
* 'float32'
|
1652 |
+
* 'float64'
|
1653 |
+
* 'Float32'
|
1654 |
+
* 'Float64'
|
1655 |
+
* 'UInt8'
|
1656 |
+
* 'UInt16'
|
1657 |
+
* 'UInt32'
|
1658 |
+
* 'UInt64'
|
1659 |
+
* 'Int8'
|
1660 |
+
* 'Int16'
|
1661 |
+
* 'Int32'
|
1662 |
+
* 'Int64'
|
1663 |
+
* 'uint8[pyarrow]'
|
1664 |
+
* 'uint16[pyarrow]'
|
1665 |
+
* 'uint32[pyarrow]'
|
1666 |
+
* 'uint64[pyarrow]'
|
1667 |
+
* 'int8[pyarrow]'
|
1668 |
+
* 'int16[pyarrow]'
|
1669 |
+
* 'int32[pyarrow]'
|
1670 |
+
* 'int64[pyarrow]'
|
1671 |
+
* 'float[pyarrow]'
|
1672 |
+
* 'double[pyarrow]'
|
1673 |
+
"""
|
1674 |
+
return request.param
|
1675 |
+
|
1676 |
+
|
1677 |
+
@pytest.fixture(params=tm.ALL_NUMERIC_DTYPES)
|
1678 |
+
def any_numeric_dtype(request):
|
1679 |
+
"""
|
1680 |
+
Parameterized fixture for all numeric dtypes.
|
1681 |
+
|
1682 |
+
* int
|
1683 |
+
* 'int8'
|
1684 |
+
* 'uint8'
|
1685 |
+
* 'int16'
|
1686 |
+
* 'uint16'
|
1687 |
+
* 'int32'
|
1688 |
+
* 'uint32'
|
1689 |
+
* 'int64'
|
1690 |
+
* 'uint64'
|
1691 |
+
* float
|
1692 |
+
* 'float32'
|
1693 |
+
* 'float64'
|
1694 |
+
* complex
|
1695 |
+
* 'complex64'
|
1696 |
+
* 'complex128'
|
1697 |
+
* 'UInt8'
|
1698 |
+
* 'Int8'
|
1699 |
+
* 'UInt16'
|
1700 |
+
* 'Int16'
|
1701 |
+
* 'UInt32'
|
1702 |
+
* 'Int32'
|
1703 |
+
* 'UInt64'
|
1704 |
+
* 'Int64'
|
1705 |
+
* 'Float32'
|
1706 |
+
* 'Float64'
|
1707 |
+
"""
|
1708 |
+
return request.param
|
1709 |
+
|
1710 |
+
|
1711 |
+
# categoricals are handled separately
|
1712 |
+
_any_skipna_inferred_dtype = [
|
1713 |
+
("string", ["a", np.nan, "c"]),
|
1714 |
+
("string", ["a", pd.NA, "c"]),
|
1715 |
+
("mixed", ["a", pd.NaT, "c"]), # pd.NaT not considered valid by is_string_array
|
1716 |
+
("bytes", [b"a", np.nan, b"c"]),
|
1717 |
+
("empty", [np.nan, np.nan, np.nan]),
|
1718 |
+
("empty", []),
|
1719 |
+
("mixed-integer", ["a", np.nan, 2]),
|
1720 |
+
("mixed", ["a", np.nan, 2.0]),
|
1721 |
+
("floating", [1.0, np.nan, 2.0]),
|
1722 |
+
("integer", [1, np.nan, 2]),
|
1723 |
+
("mixed-integer-float", [1, np.nan, 2.0]),
|
1724 |
+
("decimal", [Decimal(1), np.nan, Decimal(2)]),
|
1725 |
+
("boolean", [True, np.nan, False]),
|
1726 |
+
("boolean", [True, pd.NA, False]),
|
1727 |
+
("datetime64", [np.datetime64("2013-01-01"), np.nan, np.datetime64("2018-01-01")]),
|
1728 |
+
("datetime", [Timestamp("20130101"), np.nan, Timestamp("20180101")]),
|
1729 |
+
("date", [date(2013, 1, 1), np.nan, date(2018, 1, 1)]),
|
1730 |
+
("complex", [1 + 1j, np.nan, 2 + 2j]),
|
1731 |
+
# The following dtype is commented out due to GH 23554
|
1732 |
+
# ('timedelta64', [np.timedelta64(1, 'D'),
|
1733 |
+
# np.nan, np.timedelta64(2, 'D')]),
|
1734 |
+
("timedelta", [timedelta(1), np.nan, timedelta(2)]),
|
1735 |
+
("time", [time(1), np.nan, time(2)]),
|
1736 |
+
("period", [Period(2013), pd.NaT, Period(2018)]),
|
1737 |
+
("interval", [Interval(0, 1), np.nan, Interval(0, 2)]),
|
1738 |
+
]
|
1739 |
+
ids, _ = zip(*_any_skipna_inferred_dtype) # use inferred type as fixture-id
|
1740 |
+
|
1741 |
+
|
1742 |
+
@pytest.fixture(params=_any_skipna_inferred_dtype, ids=ids)
|
1743 |
+
def any_skipna_inferred_dtype(request):
|
1744 |
+
"""
|
1745 |
+
Fixture for all inferred dtypes from _libs.lib.infer_dtype
|
1746 |
+
|
1747 |
+
The covered (inferred) types are:
|
1748 |
+
* 'string'
|
1749 |
+
* 'empty'
|
1750 |
+
* 'bytes'
|
1751 |
+
* 'mixed'
|
1752 |
+
* 'mixed-integer'
|
1753 |
+
* 'mixed-integer-float'
|
1754 |
+
* 'floating'
|
1755 |
+
* 'integer'
|
1756 |
+
* 'decimal'
|
1757 |
+
* 'boolean'
|
1758 |
+
* 'datetime64'
|
1759 |
+
* 'datetime'
|
1760 |
+
* 'date'
|
1761 |
+
* 'timedelta'
|
1762 |
+
* 'time'
|
1763 |
+
* 'period'
|
1764 |
+
* 'interval'
|
1765 |
+
|
1766 |
+
Returns
|
1767 |
+
-------
|
1768 |
+
inferred_dtype : str
|
1769 |
+
The string for the inferred dtype from _libs.lib.infer_dtype
|
1770 |
+
values : np.ndarray
|
1771 |
+
An array of object dtype that will be inferred to have
|
1772 |
+
`inferred_dtype`
|
1773 |
+
|
1774 |
+
Examples
|
1775 |
+
--------
|
1776 |
+
>>> from pandas._libs import lib
|
1777 |
+
>>>
|
1778 |
+
>>> def test_something(any_skipna_inferred_dtype):
|
1779 |
+
... inferred_dtype, values = any_skipna_inferred_dtype
|
1780 |
+
... # will pass
|
1781 |
+
... assert lib.infer_dtype(values, skipna=True) == inferred_dtype
|
1782 |
+
"""
|
1783 |
+
inferred_dtype, values = request.param
|
1784 |
+
values = np.array(values, dtype=object) # object dtype to avoid casting
|
1785 |
+
|
1786 |
+
# correctness of inference tested in tests/dtypes/test_inference.py
|
1787 |
+
return inferred_dtype, values
|
1788 |
+
|
1789 |
+
|
1790 |
+
# ----------------------------------------------------------------
|
1791 |
+
# Misc
|
1792 |
+
# ----------------------------------------------------------------
|
1793 |
+
@pytest.fixture
|
1794 |
+
def ip():
|
1795 |
+
"""
|
1796 |
+
Get an instance of IPython.InteractiveShell.
|
1797 |
+
|
1798 |
+
Will raise a skip if IPython is not installed.
|
1799 |
+
"""
|
1800 |
+
pytest.importorskip("IPython", minversion="6.0.0")
|
1801 |
+
from IPython.core.interactiveshell import InteractiveShell
|
1802 |
+
|
1803 |
+
# GH#35711 make sure sqlite history file handle is not leaked
|
1804 |
+
from traitlets.config import Config # isort:skip
|
1805 |
+
|
1806 |
+
c = Config()
|
1807 |
+
c.HistoryManager.hist_file = ":memory:"
|
1808 |
+
|
1809 |
+
return InteractiveShell(config=c)
|
1810 |
+
|
1811 |
+
|
1812 |
+
@pytest.fixture(params=["bsr", "coo", "csc", "csr", "dia", "dok", "lil"])
|
1813 |
+
def spmatrix(request):
|
1814 |
+
"""
|
1815 |
+
Yields scipy sparse matrix classes.
|
1816 |
+
"""
|
1817 |
+
sparse = pytest.importorskip("scipy.sparse")
|
1818 |
+
|
1819 |
+
return getattr(sparse, request.param + "_matrix")
|
1820 |
+
|
1821 |
+
|
1822 |
+
@pytest.fixture(
|
1823 |
+
params=[
|
1824 |
+
getattr(pd.offsets, o)
|
1825 |
+
for o in pd.offsets.__all__
|
1826 |
+
if issubclass(getattr(pd.offsets, o), pd.offsets.Tick) and o != "Tick"
|
1827 |
+
]
|
1828 |
+
)
|
1829 |
+
def tick_classes(request):
|
1830 |
+
"""
|
1831 |
+
Fixture for Tick based datetime offsets available for a time series.
|
1832 |
+
"""
|
1833 |
+
return request.param
|
1834 |
+
|
1835 |
+
|
1836 |
+
@pytest.fixture(params=[None, lambda x: x])
|
1837 |
+
def sort_by_key(request):
|
1838 |
+
"""
|
1839 |
+
Simple fixture for testing keys in sorting methods.
|
1840 |
+
Tests None (no key) and the identity key.
|
1841 |
+
"""
|
1842 |
+
return request.param
|
1843 |
+
|
1844 |
+
|
1845 |
+
@pytest.fixture(
|
1846 |
+
params=[
|
1847 |
+
("foo", None, None),
|
1848 |
+
("Egon", "Venkman", None),
|
1849 |
+
("NCC1701D", "NCC1701D", "NCC1701D"),
|
1850 |
+
# possibly-matching NAs
|
1851 |
+
(np.nan, np.nan, np.nan),
|
1852 |
+
(np.nan, pd.NaT, None),
|
1853 |
+
(np.nan, pd.NA, None),
|
1854 |
+
(pd.NA, pd.NA, pd.NA),
|
1855 |
+
]
|
1856 |
+
)
|
1857 |
+
def names(request) -> tuple[Hashable, Hashable, Hashable]:
|
1858 |
+
"""
|
1859 |
+
A 3-tuple of names, the first two for operands, the last for a result.
|
1860 |
+
"""
|
1861 |
+
return request.param
|
1862 |
+
|
1863 |
+
|
1864 |
+
@pytest.fixture(params=[tm.setitem, tm.loc, tm.iloc])
|
1865 |
+
def indexer_sli(request):
|
1866 |
+
"""
|
1867 |
+
Parametrize over __setitem__, loc.__setitem__, iloc.__setitem__
|
1868 |
+
"""
|
1869 |
+
return request.param
|
1870 |
+
|
1871 |
+
|
1872 |
+
@pytest.fixture(params=[tm.loc, tm.iloc])
|
1873 |
+
def indexer_li(request):
|
1874 |
+
"""
|
1875 |
+
Parametrize over loc.__getitem__, iloc.__getitem__
|
1876 |
+
"""
|
1877 |
+
return request.param
|
1878 |
+
|
1879 |
+
|
1880 |
+
@pytest.fixture(params=[tm.setitem, tm.iloc])
|
1881 |
+
def indexer_si(request):
|
1882 |
+
"""
|
1883 |
+
Parametrize over __setitem__, iloc.__setitem__
|
1884 |
+
"""
|
1885 |
+
return request.param
|
1886 |
+
|
1887 |
+
|
1888 |
+
@pytest.fixture(params=[tm.setitem, tm.loc])
|
1889 |
+
def indexer_sl(request):
|
1890 |
+
"""
|
1891 |
+
Parametrize over __setitem__, loc.__setitem__
|
1892 |
+
"""
|
1893 |
+
return request.param
|
1894 |
+
|
1895 |
+
|
1896 |
+
@pytest.fixture(params=[tm.at, tm.loc])
|
1897 |
+
def indexer_al(request):
|
1898 |
+
"""
|
1899 |
+
Parametrize over at.__setitem__, loc.__setitem__
|
1900 |
+
"""
|
1901 |
+
return request.param
|
1902 |
+
|
1903 |
+
|
1904 |
+
@pytest.fixture(params=[tm.iat, tm.iloc])
|
1905 |
+
def indexer_ial(request):
|
1906 |
+
"""
|
1907 |
+
Parametrize over iat.__setitem__, iloc.__setitem__
|
1908 |
+
"""
|
1909 |
+
return request.param
|
1910 |
+
|
1911 |
+
|
1912 |
+
@pytest.fixture
|
1913 |
+
def using_array_manager() -> bool:
|
1914 |
+
"""
|
1915 |
+
Fixture to check if the array manager is being used.
|
1916 |
+
"""
|
1917 |
+
return _get_option("mode.data_manager", silent=True) == "array"
|
1918 |
+
|
1919 |
+
|
1920 |
+
@pytest.fixture
|
1921 |
+
def using_copy_on_write() -> bool:
|
1922 |
+
"""
|
1923 |
+
Fixture to check if Copy-on-Write is enabled.
|
1924 |
+
"""
|
1925 |
+
return (
|
1926 |
+
pd.options.mode.copy_on_write is True
|
1927 |
+
and _get_option("mode.data_manager", silent=True) == "block"
|
1928 |
+
)
|
1929 |
+
|
1930 |
+
|
1931 |
+
@pytest.fixture
|
1932 |
+
def warn_copy_on_write() -> bool:
|
1933 |
+
"""
|
1934 |
+
Fixture to check if Copy-on-Write is in warning mode.
|
1935 |
+
"""
|
1936 |
+
return (
|
1937 |
+
pd.options.mode.copy_on_write == "warn"
|
1938 |
+
and _get_option("mode.data_manager", silent=True) == "block"
|
1939 |
+
)
|
1940 |
+
|
1941 |
+
|
1942 |
+
@pytest.fixture
|
1943 |
+
def using_infer_string() -> bool:
|
1944 |
+
"""
|
1945 |
+
Fixture to check if infer string option is enabled.
|
1946 |
+
"""
|
1947 |
+
return pd.options.future.infer_string is True
|
1948 |
+
|
1949 |
+
|
1950 |
+
warsaws = ["Europe/Warsaw", "dateutil/Europe/Warsaw"]
|
1951 |
+
if zoneinfo is not None:
|
1952 |
+
warsaws.append(zoneinfo.ZoneInfo("Europe/Warsaw")) # type: ignore[arg-type]
|
1953 |
+
|
1954 |
+
|
1955 |
+
@pytest.fixture(params=warsaws)
|
1956 |
+
def warsaw(request) -> str:
|
1957 |
+
"""
|
1958 |
+
tzinfo for Europe/Warsaw using pytz, dateutil, or zoneinfo.
|
1959 |
+
"""
|
1960 |
+
return request.param
|
1961 |
+
|
1962 |
+
|
1963 |
+
@pytest.fixture()
|
1964 |
+
def arrow_string_storage():
|
1965 |
+
return ("pyarrow", "pyarrow_numpy")
|
llmeval-env/lib/python3.10/site-packages/pandas/core/__init__.py
ADDED
File without changes
|
llmeval-env/lib/python3.10/site-packages/pandas/core/__pycache__/apply.cpython-310.pyc
ADDED
Binary file (49.8 kB). View file
|
|
llmeval-env/lib/python3.10/site-packages/pandas/core/__pycache__/arraylike.cpython-310.pyc
ADDED
Binary file (14.5 kB). View file
|
|
llmeval-env/lib/python3.10/site-packages/pandas/core/__pycache__/config_init.cpython-310.pyc
ADDED
Binary file (20.7 kB). View file
|
|
llmeval-env/lib/python3.10/site-packages/pandas/core/__pycache__/missing.cpython-310.pyc
ADDED
Binary file (26.6 kB). View file
|
|
llmeval-env/lib/python3.10/site-packages/pandas/core/__pycache__/nanops.cpython-310.pyc
ADDED
Binary file (37 kB). View file
|
|
llmeval-env/lib/python3.10/site-packages/pandas/core/__pycache__/series.cpython-310.pyc
ADDED
Binary file (176 kB). View file
|
|
llmeval-env/lib/python3.10/site-packages/pandas/core/accessor.py
ADDED
@@ -0,0 +1,340 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
"""
|
2 |
+
|
3 |
+
accessor.py contains base classes for implementing accessor properties
|
4 |
+
that can be mixed into or pinned onto other pandas classes.
|
5 |
+
|
6 |
+
"""
|
7 |
+
from __future__ import annotations
|
8 |
+
|
9 |
+
from typing import (
|
10 |
+
Callable,
|
11 |
+
final,
|
12 |
+
)
|
13 |
+
import warnings
|
14 |
+
|
15 |
+
from pandas.util._decorators import doc
|
16 |
+
from pandas.util._exceptions import find_stack_level
|
17 |
+
|
18 |
+
|
19 |
+
class DirNamesMixin:
|
20 |
+
_accessors: set[str] = set()
|
21 |
+
_hidden_attrs: frozenset[str] = frozenset()
|
22 |
+
|
23 |
+
@final
|
24 |
+
def _dir_deletions(self) -> set[str]:
|
25 |
+
"""
|
26 |
+
Delete unwanted __dir__ for this object.
|
27 |
+
"""
|
28 |
+
return self._accessors | self._hidden_attrs
|
29 |
+
|
30 |
+
def _dir_additions(self) -> set[str]:
|
31 |
+
"""
|
32 |
+
Add additional __dir__ for this object.
|
33 |
+
"""
|
34 |
+
return {accessor for accessor in self._accessors if hasattr(self, accessor)}
|
35 |
+
|
36 |
+
def __dir__(self) -> list[str]:
|
37 |
+
"""
|
38 |
+
Provide method name lookup and completion.
|
39 |
+
|
40 |
+
Notes
|
41 |
+
-----
|
42 |
+
Only provide 'public' methods.
|
43 |
+
"""
|
44 |
+
rv = set(super().__dir__())
|
45 |
+
rv = (rv - self._dir_deletions()) | self._dir_additions()
|
46 |
+
return sorted(rv)
|
47 |
+
|
48 |
+
|
49 |
+
class PandasDelegate:
|
50 |
+
"""
|
51 |
+
Abstract base class for delegating methods/properties.
|
52 |
+
"""
|
53 |
+
|
54 |
+
def _delegate_property_get(self, name: str, *args, **kwargs):
|
55 |
+
raise TypeError(f"You cannot access the property {name}")
|
56 |
+
|
57 |
+
def _delegate_property_set(self, name: str, value, *args, **kwargs):
|
58 |
+
raise TypeError(f"The property {name} cannot be set")
|
59 |
+
|
60 |
+
def _delegate_method(self, name: str, *args, **kwargs):
|
61 |
+
raise TypeError(f"You cannot call method {name}")
|
62 |
+
|
63 |
+
@classmethod
|
64 |
+
def _add_delegate_accessors(
|
65 |
+
cls,
|
66 |
+
delegate,
|
67 |
+
accessors: list[str],
|
68 |
+
typ: str,
|
69 |
+
overwrite: bool = False,
|
70 |
+
accessor_mapping: Callable[[str], str] = lambda x: x,
|
71 |
+
raise_on_missing: bool = True,
|
72 |
+
) -> None:
|
73 |
+
"""
|
74 |
+
Add accessors to cls from the delegate class.
|
75 |
+
|
76 |
+
Parameters
|
77 |
+
----------
|
78 |
+
cls
|
79 |
+
Class to add the methods/properties to.
|
80 |
+
delegate
|
81 |
+
Class to get methods/properties and doc-strings.
|
82 |
+
accessors : list of str
|
83 |
+
List of accessors to add.
|
84 |
+
typ : {'property', 'method'}
|
85 |
+
overwrite : bool, default False
|
86 |
+
Overwrite the method/property in the target class if it exists.
|
87 |
+
accessor_mapping: Callable, default lambda x: x
|
88 |
+
Callable to map the delegate's function to the cls' function.
|
89 |
+
raise_on_missing: bool, default True
|
90 |
+
Raise if an accessor does not exist on delegate.
|
91 |
+
False skips the missing accessor.
|
92 |
+
"""
|
93 |
+
|
94 |
+
def _create_delegator_property(name: str):
|
95 |
+
def _getter(self):
|
96 |
+
return self._delegate_property_get(name)
|
97 |
+
|
98 |
+
def _setter(self, new_values):
|
99 |
+
return self._delegate_property_set(name, new_values)
|
100 |
+
|
101 |
+
_getter.__name__ = name
|
102 |
+
_setter.__name__ = name
|
103 |
+
|
104 |
+
return property(
|
105 |
+
fget=_getter,
|
106 |
+
fset=_setter,
|
107 |
+
doc=getattr(delegate, accessor_mapping(name)).__doc__,
|
108 |
+
)
|
109 |
+
|
110 |
+
def _create_delegator_method(name: str):
|
111 |
+
def f(self, *args, **kwargs):
|
112 |
+
return self._delegate_method(name, *args, **kwargs)
|
113 |
+
|
114 |
+
f.__name__ = name
|
115 |
+
f.__doc__ = getattr(delegate, accessor_mapping(name)).__doc__
|
116 |
+
|
117 |
+
return f
|
118 |
+
|
119 |
+
for name in accessors:
|
120 |
+
if (
|
121 |
+
not raise_on_missing
|
122 |
+
and getattr(delegate, accessor_mapping(name), None) is None
|
123 |
+
):
|
124 |
+
continue
|
125 |
+
|
126 |
+
if typ == "property":
|
127 |
+
f = _create_delegator_property(name)
|
128 |
+
else:
|
129 |
+
f = _create_delegator_method(name)
|
130 |
+
|
131 |
+
# don't overwrite existing methods/properties
|
132 |
+
if overwrite or not hasattr(cls, name):
|
133 |
+
setattr(cls, name, f)
|
134 |
+
|
135 |
+
|
136 |
+
def delegate_names(
|
137 |
+
delegate,
|
138 |
+
accessors: list[str],
|
139 |
+
typ: str,
|
140 |
+
overwrite: bool = False,
|
141 |
+
accessor_mapping: Callable[[str], str] = lambda x: x,
|
142 |
+
raise_on_missing: bool = True,
|
143 |
+
):
|
144 |
+
"""
|
145 |
+
Add delegated names to a class using a class decorator. This provides
|
146 |
+
an alternative usage to directly calling `_add_delegate_accessors`
|
147 |
+
below a class definition.
|
148 |
+
|
149 |
+
Parameters
|
150 |
+
----------
|
151 |
+
delegate : object
|
152 |
+
The class to get methods/properties & doc-strings.
|
153 |
+
accessors : Sequence[str]
|
154 |
+
List of accessor to add.
|
155 |
+
typ : {'property', 'method'}
|
156 |
+
overwrite : bool, default False
|
157 |
+
Overwrite the method/property in the target class if it exists.
|
158 |
+
accessor_mapping: Callable, default lambda x: x
|
159 |
+
Callable to map the delegate's function to the cls' function.
|
160 |
+
raise_on_missing: bool, default True
|
161 |
+
Raise if an accessor does not exist on delegate.
|
162 |
+
False skips the missing accessor.
|
163 |
+
|
164 |
+
Returns
|
165 |
+
-------
|
166 |
+
callable
|
167 |
+
A class decorator.
|
168 |
+
|
169 |
+
Examples
|
170 |
+
--------
|
171 |
+
@delegate_names(Categorical, ["categories", "ordered"], "property")
|
172 |
+
class CategoricalAccessor(PandasDelegate):
|
173 |
+
[...]
|
174 |
+
"""
|
175 |
+
|
176 |
+
def add_delegate_accessors(cls):
|
177 |
+
cls._add_delegate_accessors(
|
178 |
+
delegate,
|
179 |
+
accessors,
|
180 |
+
typ,
|
181 |
+
overwrite=overwrite,
|
182 |
+
accessor_mapping=accessor_mapping,
|
183 |
+
raise_on_missing=raise_on_missing,
|
184 |
+
)
|
185 |
+
return cls
|
186 |
+
|
187 |
+
return add_delegate_accessors
|
188 |
+
|
189 |
+
|
190 |
+
# Ported with modifications from xarray; licence at LICENSES/XARRAY_LICENSE
|
191 |
+
# https://github.com/pydata/xarray/blob/master/xarray/core/extensions.py
|
192 |
+
# 1. We don't need to catch and re-raise AttributeErrors as RuntimeErrors
|
193 |
+
# 2. We use a UserWarning instead of a custom Warning
|
194 |
+
|
195 |
+
|
196 |
+
class CachedAccessor:
|
197 |
+
"""
|
198 |
+
Custom property-like object.
|
199 |
+
|
200 |
+
A descriptor for caching accessors.
|
201 |
+
|
202 |
+
Parameters
|
203 |
+
----------
|
204 |
+
name : str
|
205 |
+
Namespace that will be accessed under, e.g. ``df.foo``.
|
206 |
+
accessor : cls
|
207 |
+
Class with the extension methods.
|
208 |
+
|
209 |
+
Notes
|
210 |
+
-----
|
211 |
+
For accessor, The class's __init__ method assumes that one of
|
212 |
+
``Series``, ``DataFrame`` or ``Index`` as the
|
213 |
+
single argument ``data``.
|
214 |
+
"""
|
215 |
+
|
216 |
+
def __init__(self, name: str, accessor) -> None:
|
217 |
+
self._name = name
|
218 |
+
self._accessor = accessor
|
219 |
+
|
220 |
+
def __get__(self, obj, cls):
|
221 |
+
if obj is None:
|
222 |
+
# we're accessing the attribute of the class, i.e., Dataset.geo
|
223 |
+
return self._accessor
|
224 |
+
accessor_obj = self._accessor(obj)
|
225 |
+
# Replace the property with the accessor object. Inspired by:
|
226 |
+
# https://www.pydanny.com/cached-property.html
|
227 |
+
# We need to use object.__setattr__ because we overwrite __setattr__ on
|
228 |
+
# NDFrame
|
229 |
+
object.__setattr__(obj, self._name, accessor_obj)
|
230 |
+
return accessor_obj
|
231 |
+
|
232 |
+
|
233 |
+
@doc(klass="", others="")
|
234 |
+
def _register_accessor(name: str, cls):
|
235 |
+
"""
|
236 |
+
Register a custom accessor on {klass} objects.
|
237 |
+
|
238 |
+
Parameters
|
239 |
+
----------
|
240 |
+
name : str
|
241 |
+
Name under which the accessor should be registered. A warning is issued
|
242 |
+
if this name conflicts with a preexisting attribute.
|
243 |
+
|
244 |
+
Returns
|
245 |
+
-------
|
246 |
+
callable
|
247 |
+
A class decorator.
|
248 |
+
|
249 |
+
See Also
|
250 |
+
--------
|
251 |
+
register_dataframe_accessor : Register a custom accessor on DataFrame objects.
|
252 |
+
register_series_accessor : Register a custom accessor on Series objects.
|
253 |
+
register_index_accessor : Register a custom accessor on Index objects.
|
254 |
+
|
255 |
+
Notes
|
256 |
+
-----
|
257 |
+
When accessed, your accessor will be initialized with the pandas object
|
258 |
+
the user is interacting with. So the signature must be
|
259 |
+
|
260 |
+
.. code-block:: python
|
261 |
+
|
262 |
+
def __init__(self, pandas_object): # noqa: E999
|
263 |
+
...
|
264 |
+
|
265 |
+
For consistency with pandas methods, you should raise an ``AttributeError``
|
266 |
+
if the data passed to your accessor has an incorrect dtype.
|
267 |
+
|
268 |
+
>>> pd.Series(['a', 'b']).dt
|
269 |
+
Traceback (most recent call last):
|
270 |
+
...
|
271 |
+
AttributeError: Can only use .dt accessor with datetimelike values
|
272 |
+
|
273 |
+
Examples
|
274 |
+
--------
|
275 |
+
In your library code::
|
276 |
+
|
277 |
+
import pandas as pd
|
278 |
+
|
279 |
+
@pd.api.extensions.register_dataframe_accessor("geo")
|
280 |
+
class GeoAccessor:
|
281 |
+
def __init__(self, pandas_obj):
|
282 |
+
self._obj = pandas_obj
|
283 |
+
|
284 |
+
@property
|
285 |
+
def center(self):
|
286 |
+
# return the geographic center point of this DataFrame
|
287 |
+
lat = self._obj.latitude
|
288 |
+
lon = self._obj.longitude
|
289 |
+
return (float(lon.mean()), float(lat.mean()))
|
290 |
+
|
291 |
+
def plot(self):
|
292 |
+
# plot this array's data on a map, e.g., using Cartopy
|
293 |
+
pass
|
294 |
+
|
295 |
+
Back in an interactive IPython session:
|
296 |
+
|
297 |
+
.. code-block:: ipython
|
298 |
+
|
299 |
+
In [1]: ds = pd.DataFrame({{"longitude": np.linspace(0, 10),
|
300 |
+
...: "latitude": np.linspace(0, 20)}})
|
301 |
+
In [2]: ds.geo.center
|
302 |
+
Out[2]: (5.0, 10.0)
|
303 |
+
In [3]: ds.geo.plot() # plots data on a map
|
304 |
+
"""
|
305 |
+
|
306 |
+
def decorator(accessor):
|
307 |
+
if hasattr(cls, name):
|
308 |
+
warnings.warn(
|
309 |
+
f"registration of accessor {repr(accessor)} under name "
|
310 |
+
f"{repr(name)} for type {repr(cls)} is overriding a preexisting "
|
311 |
+
f"attribute with the same name.",
|
312 |
+
UserWarning,
|
313 |
+
stacklevel=find_stack_level(),
|
314 |
+
)
|
315 |
+
setattr(cls, name, CachedAccessor(name, accessor))
|
316 |
+
cls._accessors.add(name)
|
317 |
+
return accessor
|
318 |
+
|
319 |
+
return decorator
|
320 |
+
|
321 |
+
|
322 |
+
@doc(_register_accessor, klass="DataFrame")
|
323 |
+
def register_dataframe_accessor(name: str):
|
324 |
+
from pandas import DataFrame
|
325 |
+
|
326 |
+
return _register_accessor(name, DataFrame)
|
327 |
+
|
328 |
+
|
329 |
+
@doc(_register_accessor, klass="Series")
|
330 |
+
def register_series_accessor(name: str):
|
331 |
+
from pandas import Series
|
332 |
+
|
333 |
+
return _register_accessor(name, Series)
|
334 |
+
|
335 |
+
|
336 |
+
@doc(_register_accessor, klass="Index")
|
337 |
+
def register_index_accessor(name: str):
|
338 |
+
from pandas import Index
|
339 |
+
|
340 |
+
return _register_accessor(name, Index)
|
llmeval-env/lib/python3.10/site-packages/pandas/core/algorithms.py
ADDED
@@ -0,0 +1,1747 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
"""
|
2 |
+
Generic data algorithms. This module is experimental at the moment and not
|
3 |
+
intended for public consumption
|
4 |
+
"""
|
5 |
+
from __future__ import annotations
|
6 |
+
|
7 |
+
import decimal
|
8 |
+
import operator
|
9 |
+
from textwrap import dedent
|
10 |
+
from typing import (
|
11 |
+
TYPE_CHECKING,
|
12 |
+
Literal,
|
13 |
+
cast,
|
14 |
+
)
|
15 |
+
import warnings
|
16 |
+
|
17 |
+
import numpy as np
|
18 |
+
|
19 |
+
from pandas._libs import (
|
20 |
+
algos,
|
21 |
+
hashtable as htable,
|
22 |
+
iNaT,
|
23 |
+
lib,
|
24 |
+
)
|
25 |
+
from pandas._typing import (
|
26 |
+
AnyArrayLike,
|
27 |
+
ArrayLike,
|
28 |
+
AxisInt,
|
29 |
+
DtypeObj,
|
30 |
+
TakeIndexer,
|
31 |
+
npt,
|
32 |
+
)
|
33 |
+
from pandas.util._decorators import doc
|
34 |
+
from pandas.util._exceptions import find_stack_level
|
35 |
+
|
36 |
+
from pandas.core.dtypes.cast import (
|
37 |
+
construct_1d_object_array_from_listlike,
|
38 |
+
np_find_common_type,
|
39 |
+
)
|
40 |
+
from pandas.core.dtypes.common import (
|
41 |
+
ensure_float64,
|
42 |
+
ensure_object,
|
43 |
+
ensure_platform_int,
|
44 |
+
is_array_like,
|
45 |
+
is_bool_dtype,
|
46 |
+
is_complex_dtype,
|
47 |
+
is_dict_like,
|
48 |
+
is_extension_array_dtype,
|
49 |
+
is_float_dtype,
|
50 |
+
is_integer,
|
51 |
+
is_integer_dtype,
|
52 |
+
is_list_like,
|
53 |
+
is_object_dtype,
|
54 |
+
is_signed_integer_dtype,
|
55 |
+
needs_i8_conversion,
|
56 |
+
)
|
57 |
+
from pandas.core.dtypes.concat import concat_compat
|
58 |
+
from pandas.core.dtypes.dtypes import (
|
59 |
+
BaseMaskedDtype,
|
60 |
+
CategoricalDtype,
|
61 |
+
ExtensionDtype,
|
62 |
+
NumpyEADtype,
|
63 |
+
)
|
64 |
+
from pandas.core.dtypes.generic import (
|
65 |
+
ABCDatetimeArray,
|
66 |
+
ABCExtensionArray,
|
67 |
+
ABCIndex,
|
68 |
+
ABCMultiIndex,
|
69 |
+
ABCSeries,
|
70 |
+
ABCTimedeltaArray,
|
71 |
+
)
|
72 |
+
from pandas.core.dtypes.missing import (
|
73 |
+
isna,
|
74 |
+
na_value_for_dtype,
|
75 |
+
)
|
76 |
+
|
77 |
+
from pandas.core.array_algos.take import take_nd
|
78 |
+
from pandas.core.construction import (
|
79 |
+
array as pd_array,
|
80 |
+
ensure_wrapped_if_datetimelike,
|
81 |
+
extract_array,
|
82 |
+
)
|
83 |
+
from pandas.core.indexers import validate_indices
|
84 |
+
|
85 |
+
if TYPE_CHECKING:
|
86 |
+
from pandas._typing import (
|
87 |
+
ListLike,
|
88 |
+
NumpySorter,
|
89 |
+
NumpyValueArrayLike,
|
90 |
+
)
|
91 |
+
|
92 |
+
from pandas import (
|
93 |
+
Categorical,
|
94 |
+
Index,
|
95 |
+
Series,
|
96 |
+
)
|
97 |
+
from pandas.core.arrays import (
|
98 |
+
BaseMaskedArray,
|
99 |
+
ExtensionArray,
|
100 |
+
)
|
101 |
+
|
102 |
+
|
103 |
+
# --------------- #
|
104 |
+
# dtype access #
|
105 |
+
# --------------- #
|
106 |
+
def _ensure_data(values: ArrayLike) -> np.ndarray:
|
107 |
+
"""
|
108 |
+
routine to ensure that our data is of the correct
|
109 |
+
input dtype for lower-level routines
|
110 |
+
|
111 |
+
This will coerce:
|
112 |
+
- ints -> int64
|
113 |
+
- uint -> uint64
|
114 |
+
- bool -> uint8
|
115 |
+
- datetimelike -> i8
|
116 |
+
- datetime64tz -> i8 (in local tz)
|
117 |
+
- categorical -> codes
|
118 |
+
|
119 |
+
Parameters
|
120 |
+
----------
|
121 |
+
values : np.ndarray or ExtensionArray
|
122 |
+
|
123 |
+
Returns
|
124 |
+
-------
|
125 |
+
np.ndarray
|
126 |
+
"""
|
127 |
+
|
128 |
+
if not isinstance(values, ABCMultiIndex):
|
129 |
+
# extract_array would raise
|
130 |
+
values = extract_array(values, extract_numpy=True)
|
131 |
+
|
132 |
+
if is_object_dtype(values.dtype):
|
133 |
+
return ensure_object(np.asarray(values))
|
134 |
+
|
135 |
+
elif isinstance(values.dtype, BaseMaskedDtype):
|
136 |
+
# i.e. BooleanArray, FloatingArray, IntegerArray
|
137 |
+
values = cast("BaseMaskedArray", values)
|
138 |
+
if not values._hasna:
|
139 |
+
# No pd.NAs -> We can avoid an object-dtype cast (and copy) GH#41816
|
140 |
+
# recurse to avoid re-implementing logic for eg bool->uint8
|
141 |
+
return _ensure_data(values._data)
|
142 |
+
return np.asarray(values)
|
143 |
+
|
144 |
+
elif isinstance(values.dtype, CategoricalDtype):
|
145 |
+
# NB: cases that go through here should NOT be using _reconstruct_data
|
146 |
+
# on the back-end.
|
147 |
+
values = cast("Categorical", values)
|
148 |
+
return values.codes
|
149 |
+
|
150 |
+
elif is_bool_dtype(values.dtype):
|
151 |
+
if isinstance(values, np.ndarray):
|
152 |
+
# i.e. actually dtype == np.dtype("bool")
|
153 |
+
return np.asarray(values).view("uint8")
|
154 |
+
else:
|
155 |
+
# e.g. Sparse[bool, False] # TODO: no test cases get here
|
156 |
+
return np.asarray(values).astype("uint8", copy=False)
|
157 |
+
|
158 |
+
elif is_integer_dtype(values.dtype):
|
159 |
+
return np.asarray(values)
|
160 |
+
|
161 |
+
elif is_float_dtype(values.dtype):
|
162 |
+
# Note: checking `values.dtype == "float128"` raises on Windows and 32bit
|
163 |
+
# error: Item "ExtensionDtype" of "Union[Any, ExtensionDtype, dtype[Any]]"
|
164 |
+
# has no attribute "itemsize"
|
165 |
+
if values.dtype.itemsize in [2, 12, 16]: # type: ignore[union-attr]
|
166 |
+
# we dont (yet) have float128 hashtable support
|
167 |
+
return ensure_float64(values)
|
168 |
+
return np.asarray(values)
|
169 |
+
|
170 |
+
elif is_complex_dtype(values.dtype):
|
171 |
+
return cast(np.ndarray, values)
|
172 |
+
|
173 |
+
# datetimelike
|
174 |
+
elif needs_i8_conversion(values.dtype):
|
175 |
+
npvalues = values.view("i8")
|
176 |
+
npvalues = cast(np.ndarray, npvalues)
|
177 |
+
return npvalues
|
178 |
+
|
179 |
+
# we have failed, return object
|
180 |
+
values = np.asarray(values, dtype=object)
|
181 |
+
return ensure_object(values)
|
182 |
+
|
183 |
+
|
184 |
+
def _reconstruct_data(
|
185 |
+
values: ArrayLike, dtype: DtypeObj, original: AnyArrayLike
|
186 |
+
) -> ArrayLike:
|
187 |
+
"""
|
188 |
+
reverse of _ensure_data
|
189 |
+
|
190 |
+
Parameters
|
191 |
+
----------
|
192 |
+
values : np.ndarray or ExtensionArray
|
193 |
+
dtype : np.dtype or ExtensionDtype
|
194 |
+
original : AnyArrayLike
|
195 |
+
|
196 |
+
Returns
|
197 |
+
-------
|
198 |
+
ExtensionArray or np.ndarray
|
199 |
+
"""
|
200 |
+
if isinstance(values, ABCExtensionArray) and values.dtype == dtype:
|
201 |
+
# Catch DatetimeArray/TimedeltaArray
|
202 |
+
return values
|
203 |
+
|
204 |
+
if not isinstance(dtype, np.dtype):
|
205 |
+
# i.e. ExtensionDtype; note we have ruled out above the possibility
|
206 |
+
# that values.dtype == dtype
|
207 |
+
cls = dtype.construct_array_type()
|
208 |
+
|
209 |
+
values = cls._from_sequence(values, dtype=dtype)
|
210 |
+
|
211 |
+
else:
|
212 |
+
values = values.astype(dtype, copy=False)
|
213 |
+
|
214 |
+
return values
|
215 |
+
|
216 |
+
|
217 |
+
def _ensure_arraylike(values, func_name: str) -> ArrayLike:
|
218 |
+
"""
|
219 |
+
ensure that we are arraylike if not already
|
220 |
+
"""
|
221 |
+
if not isinstance(values, (ABCIndex, ABCSeries, ABCExtensionArray, np.ndarray)):
|
222 |
+
# GH#52986
|
223 |
+
if func_name != "isin-targets":
|
224 |
+
# Make an exception for the comps argument in isin.
|
225 |
+
warnings.warn(
|
226 |
+
f"{func_name} with argument that is not not a Series, Index, "
|
227 |
+
"ExtensionArray, or np.ndarray is deprecated and will raise in a "
|
228 |
+
"future version.",
|
229 |
+
FutureWarning,
|
230 |
+
stacklevel=find_stack_level(),
|
231 |
+
)
|
232 |
+
|
233 |
+
inferred = lib.infer_dtype(values, skipna=False)
|
234 |
+
if inferred in ["mixed", "string", "mixed-integer"]:
|
235 |
+
# "mixed-integer" to ensure we do not cast ["ss", 42] to str GH#22160
|
236 |
+
if isinstance(values, tuple):
|
237 |
+
values = list(values)
|
238 |
+
values = construct_1d_object_array_from_listlike(values)
|
239 |
+
else:
|
240 |
+
values = np.asarray(values)
|
241 |
+
return values
|
242 |
+
|
243 |
+
|
244 |
+
_hashtables = {
|
245 |
+
"complex128": htable.Complex128HashTable,
|
246 |
+
"complex64": htable.Complex64HashTable,
|
247 |
+
"float64": htable.Float64HashTable,
|
248 |
+
"float32": htable.Float32HashTable,
|
249 |
+
"uint64": htable.UInt64HashTable,
|
250 |
+
"uint32": htable.UInt32HashTable,
|
251 |
+
"uint16": htable.UInt16HashTable,
|
252 |
+
"uint8": htable.UInt8HashTable,
|
253 |
+
"int64": htable.Int64HashTable,
|
254 |
+
"int32": htable.Int32HashTable,
|
255 |
+
"int16": htable.Int16HashTable,
|
256 |
+
"int8": htable.Int8HashTable,
|
257 |
+
"string": htable.StringHashTable,
|
258 |
+
"object": htable.PyObjectHashTable,
|
259 |
+
}
|
260 |
+
|
261 |
+
|
262 |
+
def _get_hashtable_algo(values: np.ndarray):
|
263 |
+
"""
|
264 |
+
Parameters
|
265 |
+
----------
|
266 |
+
values : np.ndarray
|
267 |
+
|
268 |
+
Returns
|
269 |
+
-------
|
270 |
+
htable : HashTable subclass
|
271 |
+
values : ndarray
|
272 |
+
"""
|
273 |
+
values = _ensure_data(values)
|
274 |
+
|
275 |
+
ndtype = _check_object_for_strings(values)
|
276 |
+
hashtable = _hashtables[ndtype]
|
277 |
+
return hashtable, values
|
278 |
+
|
279 |
+
|
280 |
+
def _check_object_for_strings(values: np.ndarray) -> str:
|
281 |
+
"""
|
282 |
+
Check if we can use string hashtable instead of object hashtable.
|
283 |
+
|
284 |
+
Parameters
|
285 |
+
----------
|
286 |
+
values : ndarray
|
287 |
+
|
288 |
+
Returns
|
289 |
+
-------
|
290 |
+
str
|
291 |
+
"""
|
292 |
+
ndtype = values.dtype.name
|
293 |
+
if ndtype == "object":
|
294 |
+
# it's cheaper to use a String Hash Table than Object; we infer
|
295 |
+
# including nulls because that is the only difference between
|
296 |
+
# StringHashTable and ObjectHashtable
|
297 |
+
if lib.is_string_array(values, skipna=False):
|
298 |
+
ndtype = "string"
|
299 |
+
return ndtype
|
300 |
+
|
301 |
+
|
302 |
+
# --------------- #
|
303 |
+
# top-level algos #
|
304 |
+
# --------------- #
|
305 |
+
|
306 |
+
|
307 |
+
def unique(values):
|
308 |
+
"""
|
309 |
+
Return unique values based on a hash table.
|
310 |
+
|
311 |
+
Uniques are returned in order of appearance. This does NOT sort.
|
312 |
+
|
313 |
+
Significantly faster than numpy.unique for long enough sequences.
|
314 |
+
Includes NA values.
|
315 |
+
|
316 |
+
Parameters
|
317 |
+
----------
|
318 |
+
values : 1d array-like
|
319 |
+
|
320 |
+
Returns
|
321 |
+
-------
|
322 |
+
numpy.ndarray or ExtensionArray
|
323 |
+
|
324 |
+
The return can be:
|
325 |
+
|
326 |
+
* Index : when the input is an Index
|
327 |
+
* Categorical : when the input is a Categorical dtype
|
328 |
+
* ndarray : when the input is a Series/ndarray
|
329 |
+
|
330 |
+
Return numpy.ndarray or ExtensionArray.
|
331 |
+
|
332 |
+
See Also
|
333 |
+
--------
|
334 |
+
Index.unique : Return unique values from an Index.
|
335 |
+
Series.unique : Return unique values of Series object.
|
336 |
+
|
337 |
+
Examples
|
338 |
+
--------
|
339 |
+
>>> pd.unique(pd.Series([2, 1, 3, 3]))
|
340 |
+
array([2, 1, 3])
|
341 |
+
|
342 |
+
>>> pd.unique(pd.Series([2] + [1] * 5))
|
343 |
+
array([2, 1])
|
344 |
+
|
345 |
+
>>> pd.unique(pd.Series([pd.Timestamp("20160101"), pd.Timestamp("20160101")]))
|
346 |
+
array(['2016-01-01T00:00:00.000000000'], dtype='datetime64[ns]')
|
347 |
+
|
348 |
+
>>> pd.unique(
|
349 |
+
... pd.Series(
|
350 |
+
... [
|
351 |
+
... pd.Timestamp("20160101", tz="US/Eastern"),
|
352 |
+
... pd.Timestamp("20160101", tz="US/Eastern"),
|
353 |
+
... ]
|
354 |
+
... )
|
355 |
+
... )
|
356 |
+
<DatetimeArray>
|
357 |
+
['2016-01-01 00:00:00-05:00']
|
358 |
+
Length: 1, dtype: datetime64[ns, US/Eastern]
|
359 |
+
|
360 |
+
>>> pd.unique(
|
361 |
+
... pd.Index(
|
362 |
+
... [
|
363 |
+
... pd.Timestamp("20160101", tz="US/Eastern"),
|
364 |
+
... pd.Timestamp("20160101", tz="US/Eastern"),
|
365 |
+
... ]
|
366 |
+
... )
|
367 |
+
... )
|
368 |
+
DatetimeIndex(['2016-01-01 00:00:00-05:00'],
|
369 |
+
dtype='datetime64[ns, US/Eastern]',
|
370 |
+
freq=None)
|
371 |
+
|
372 |
+
>>> pd.unique(np.array(list("baabc"), dtype="O"))
|
373 |
+
array(['b', 'a', 'c'], dtype=object)
|
374 |
+
|
375 |
+
An unordered Categorical will return categories in the
|
376 |
+
order of appearance.
|
377 |
+
|
378 |
+
>>> pd.unique(pd.Series(pd.Categorical(list("baabc"))))
|
379 |
+
['b', 'a', 'c']
|
380 |
+
Categories (3, object): ['a', 'b', 'c']
|
381 |
+
|
382 |
+
>>> pd.unique(pd.Series(pd.Categorical(list("baabc"), categories=list("abc"))))
|
383 |
+
['b', 'a', 'c']
|
384 |
+
Categories (3, object): ['a', 'b', 'c']
|
385 |
+
|
386 |
+
An ordered Categorical preserves the category ordering.
|
387 |
+
|
388 |
+
>>> pd.unique(
|
389 |
+
... pd.Series(
|
390 |
+
... pd.Categorical(list("baabc"), categories=list("abc"), ordered=True)
|
391 |
+
... )
|
392 |
+
... )
|
393 |
+
['b', 'a', 'c']
|
394 |
+
Categories (3, object): ['a' < 'b' < 'c']
|
395 |
+
|
396 |
+
An array of tuples
|
397 |
+
|
398 |
+
>>> pd.unique(pd.Series([("a", "b"), ("b", "a"), ("a", "c"), ("b", "a")]).values)
|
399 |
+
array([('a', 'b'), ('b', 'a'), ('a', 'c')], dtype=object)
|
400 |
+
"""
|
401 |
+
return unique_with_mask(values)
|
402 |
+
|
403 |
+
|
404 |
+
def nunique_ints(values: ArrayLike) -> int:
|
405 |
+
"""
|
406 |
+
Return the number of unique values for integer array-likes.
|
407 |
+
|
408 |
+
Significantly faster than pandas.unique for long enough sequences.
|
409 |
+
No checks are done to ensure input is integral.
|
410 |
+
|
411 |
+
Parameters
|
412 |
+
----------
|
413 |
+
values : 1d array-like
|
414 |
+
|
415 |
+
Returns
|
416 |
+
-------
|
417 |
+
int : The number of unique values in ``values``
|
418 |
+
"""
|
419 |
+
if len(values) == 0:
|
420 |
+
return 0
|
421 |
+
values = _ensure_data(values)
|
422 |
+
# bincount requires intp
|
423 |
+
result = (np.bincount(values.ravel().astype("intp")) != 0).sum()
|
424 |
+
return result
|
425 |
+
|
426 |
+
|
427 |
+
def unique_with_mask(values, mask: npt.NDArray[np.bool_] | None = None):
|
428 |
+
"""See algorithms.unique for docs. Takes a mask for masked arrays."""
|
429 |
+
values = _ensure_arraylike(values, func_name="unique")
|
430 |
+
|
431 |
+
if isinstance(values.dtype, ExtensionDtype):
|
432 |
+
# Dispatch to extension dtype's unique.
|
433 |
+
return values.unique()
|
434 |
+
|
435 |
+
original = values
|
436 |
+
hashtable, values = _get_hashtable_algo(values)
|
437 |
+
|
438 |
+
table = hashtable(len(values))
|
439 |
+
if mask is None:
|
440 |
+
uniques = table.unique(values)
|
441 |
+
uniques = _reconstruct_data(uniques, original.dtype, original)
|
442 |
+
return uniques
|
443 |
+
|
444 |
+
else:
|
445 |
+
uniques, mask = table.unique(values, mask=mask)
|
446 |
+
uniques = _reconstruct_data(uniques, original.dtype, original)
|
447 |
+
assert mask is not None # for mypy
|
448 |
+
return uniques, mask.astype("bool")
|
449 |
+
|
450 |
+
|
451 |
+
unique1d = unique
|
452 |
+
|
453 |
+
|
454 |
+
_MINIMUM_COMP_ARR_LEN = 1_000_000
|
455 |
+
|
456 |
+
|
457 |
+
def isin(comps: ListLike, values: ListLike) -> npt.NDArray[np.bool_]:
|
458 |
+
"""
|
459 |
+
Compute the isin boolean array.
|
460 |
+
|
461 |
+
Parameters
|
462 |
+
----------
|
463 |
+
comps : list-like
|
464 |
+
values : list-like
|
465 |
+
|
466 |
+
Returns
|
467 |
+
-------
|
468 |
+
ndarray[bool]
|
469 |
+
Same length as `comps`.
|
470 |
+
"""
|
471 |
+
if not is_list_like(comps):
|
472 |
+
raise TypeError(
|
473 |
+
"only list-like objects are allowed to be passed "
|
474 |
+
f"to isin(), you passed a `{type(comps).__name__}`"
|
475 |
+
)
|
476 |
+
if not is_list_like(values):
|
477 |
+
raise TypeError(
|
478 |
+
"only list-like objects are allowed to be passed "
|
479 |
+
f"to isin(), you passed a `{type(values).__name__}`"
|
480 |
+
)
|
481 |
+
|
482 |
+
if not isinstance(values, (ABCIndex, ABCSeries, ABCExtensionArray, np.ndarray)):
|
483 |
+
orig_values = list(values)
|
484 |
+
values = _ensure_arraylike(orig_values, func_name="isin-targets")
|
485 |
+
|
486 |
+
if (
|
487 |
+
len(values) > 0
|
488 |
+
and values.dtype.kind in "iufcb"
|
489 |
+
and not is_signed_integer_dtype(comps)
|
490 |
+
):
|
491 |
+
# GH#46485 Use object to avoid upcast to float64 later
|
492 |
+
# TODO: Share with _find_common_type_compat
|
493 |
+
values = construct_1d_object_array_from_listlike(orig_values)
|
494 |
+
|
495 |
+
elif isinstance(values, ABCMultiIndex):
|
496 |
+
# Avoid raising in extract_array
|
497 |
+
values = np.array(values)
|
498 |
+
else:
|
499 |
+
values = extract_array(values, extract_numpy=True, extract_range=True)
|
500 |
+
|
501 |
+
comps_array = _ensure_arraylike(comps, func_name="isin")
|
502 |
+
comps_array = extract_array(comps_array, extract_numpy=True)
|
503 |
+
if not isinstance(comps_array, np.ndarray):
|
504 |
+
# i.e. Extension Array
|
505 |
+
return comps_array.isin(values)
|
506 |
+
|
507 |
+
elif needs_i8_conversion(comps_array.dtype):
|
508 |
+
# Dispatch to DatetimeLikeArrayMixin.isin
|
509 |
+
return pd_array(comps_array).isin(values)
|
510 |
+
elif needs_i8_conversion(values.dtype) and not is_object_dtype(comps_array.dtype):
|
511 |
+
# e.g. comps_array are integers and values are datetime64s
|
512 |
+
return np.zeros(comps_array.shape, dtype=bool)
|
513 |
+
# TODO: not quite right ... Sparse/Categorical
|
514 |
+
elif needs_i8_conversion(values.dtype):
|
515 |
+
return isin(comps_array, values.astype(object))
|
516 |
+
|
517 |
+
elif isinstance(values.dtype, ExtensionDtype):
|
518 |
+
return isin(np.asarray(comps_array), np.asarray(values))
|
519 |
+
|
520 |
+
# GH16012
|
521 |
+
# Ensure np.isin doesn't get object types or it *may* throw an exception
|
522 |
+
# Albeit hashmap has O(1) look-up (vs. O(logn) in sorted array),
|
523 |
+
# isin is faster for small sizes
|
524 |
+
if (
|
525 |
+
len(comps_array) > _MINIMUM_COMP_ARR_LEN
|
526 |
+
and len(values) <= 26
|
527 |
+
and comps_array.dtype != object
|
528 |
+
):
|
529 |
+
# If the values include nan we need to check for nan explicitly
|
530 |
+
# since np.nan it not equal to np.nan
|
531 |
+
if isna(values).any():
|
532 |
+
|
533 |
+
def f(c, v):
|
534 |
+
return np.logical_or(np.isin(c, v).ravel(), np.isnan(c))
|
535 |
+
|
536 |
+
else:
|
537 |
+
f = lambda a, b: np.isin(a, b).ravel()
|
538 |
+
|
539 |
+
else:
|
540 |
+
common = np_find_common_type(values.dtype, comps_array.dtype)
|
541 |
+
values = values.astype(common, copy=False)
|
542 |
+
comps_array = comps_array.astype(common, copy=False)
|
543 |
+
f = htable.ismember
|
544 |
+
|
545 |
+
return f(comps_array, values)
|
546 |
+
|
547 |
+
|
548 |
+
def factorize_array(
|
549 |
+
values: np.ndarray,
|
550 |
+
use_na_sentinel: bool = True,
|
551 |
+
size_hint: int | None = None,
|
552 |
+
na_value: object = None,
|
553 |
+
mask: npt.NDArray[np.bool_] | None = None,
|
554 |
+
) -> tuple[npt.NDArray[np.intp], np.ndarray]:
|
555 |
+
"""
|
556 |
+
Factorize a numpy array to codes and uniques.
|
557 |
+
|
558 |
+
This doesn't do any coercion of types or unboxing before factorization.
|
559 |
+
|
560 |
+
Parameters
|
561 |
+
----------
|
562 |
+
values : ndarray
|
563 |
+
use_na_sentinel : bool, default True
|
564 |
+
If True, the sentinel -1 will be used for NaN values. If False,
|
565 |
+
NaN values will be encoded as non-negative integers and will not drop the
|
566 |
+
NaN from the uniques of the values.
|
567 |
+
size_hint : int, optional
|
568 |
+
Passed through to the hashtable's 'get_labels' method
|
569 |
+
na_value : object, optional
|
570 |
+
A value in `values` to consider missing. Note: only use this
|
571 |
+
parameter when you know that you don't have any values pandas would
|
572 |
+
consider missing in the array (NaN for float data, iNaT for
|
573 |
+
datetimes, etc.).
|
574 |
+
mask : ndarray[bool], optional
|
575 |
+
If not None, the mask is used as indicator for missing values
|
576 |
+
(True = missing, False = valid) instead of `na_value` or
|
577 |
+
condition "val != val".
|
578 |
+
|
579 |
+
Returns
|
580 |
+
-------
|
581 |
+
codes : ndarray[np.intp]
|
582 |
+
uniques : ndarray
|
583 |
+
"""
|
584 |
+
original = values
|
585 |
+
if values.dtype.kind in "mM":
|
586 |
+
# _get_hashtable_algo will cast dt64/td64 to i8 via _ensure_data, so we
|
587 |
+
# need to do the same to na_value. We are assuming here that the passed
|
588 |
+
# na_value is an appropriately-typed NaT.
|
589 |
+
# e.g. test_where_datetimelike_categorical
|
590 |
+
na_value = iNaT
|
591 |
+
|
592 |
+
hash_klass, values = _get_hashtable_algo(values)
|
593 |
+
|
594 |
+
table = hash_klass(size_hint or len(values))
|
595 |
+
uniques, codes = table.factorize(
|
596 |
+
values,
|
597 |
+
na_sentinel=-1,
|
598 |
+
na_value=na_value,
|
599 |
+
mask=mask,
|
600 |
+
ignore_na=use_na_sentinel,
|
601 |
+
)
|
602 |
+
|
603 |
+
# re-cast e.g. i8->dt64/td64, uint8->bool
|
604 |
+
uniques = _reconstruct_data(uniques, original.dtype, original)
|
605 |
+
|
606 |
+
codes = ensure_platform_int(codes)
|
607 |
+
return codes, uniques
|
608 |
+
|
609 |
+
|
610 |
+
@doc(
|
611 |
+
values=dedent(
|
612 |
+
"""\
|
613 |
+
values : sequence
|
614 |
+
A 1-D sequence. Sequences that aren't pandas objects are
|
615 |
+
coerced to ndarrays before factorization.
|
616 |
+
"""
|
617 |
+
),
|
618 |
+
sort=dedent(
|
619 |
+
"""\
|
620 |
+
sort : bool, default False
|
621 |
+
Sort `uniques` and shuffle `codes` to maintain the
|
622 |
+
relationship.
|
623 |
+
"""
|
624 |
+
),
|
625 |
+
size_hint=dedent(
|
626 |
+
"""\
|
627 |
+
size_hint : int, optional
|
628 |
+
Hint to the hashtable sizer.
|
629 |
+
"""
|
630 |
+
),
|
631 |
+
)
|
632 |
+
def factorize(
|
633 |
+
values,
|
634 |
+
sort: bool = False,
|
635 |
+
use_na_sentinel: bool = True,
|
636 |
+
size_hint: int | None = None,
|
637 |
+
) -> tuple[np.ndarray, np.ndarray | Index]:
|
638 |
+
"""
|
639 |
+
Encode the object as an enumerated type or categorical variable.
|
640 |
+
|
641 |
+
This method is useful for obtaining a numeric representation of an
|
642 |
+
array when all that matters is identifying distinct values. `factorize`
|
643 |
+
is available as both a top-level function :func:`pandas.factorize`,
|
644 |
+
and as a method :meth:`Series.factorize` and :meth:`Index.factorize`.
|
645 |
+
|
646 |
+
Parameters
|
647 |
+
----------
|
648 |
+
{values}{sort}
|
649 |
+
use_na_sentinel : bool, default True
|
650 |
+
If True, the sentinel -1 will be used for NaN values. If False,
|
651 |
+
NaN values will be encoded as non-negative integers and will not drop the
|
652 |
+
NaN from the uniques of the values.
|
653 |
+
|
654 |
+
.. versionadded:: 1.5.0
|
655 |
+
{size_hint}\
|
656 |
+
|
657 |
+
Returns
|
658 |
+
-------
|
659 |
+
codes : ndarray
|
660 |
+
An integer ndarray that's an indexer into `uniques`.
|
661 |
+
``uniques.take(codes)`` will have the same values as `values`.
|
662 |
+
uniques : ndarray, Index, or Categorical
|
663 |
+
The unique valid values. When `values` is Categorical, `uniques`
|
664 |
+
is a Categorical. When `values` is some other pandas object, an
|
665 |
+
`Index` is returned. Otherwise, a 1-D ndarray is returned.
|
666 |
+
|
667 |
+
.. note::
|
668 |
+
|
669 |
+
Even if there's a missing value in `values`, `uniques` will
|
670 |
+
*not* contain an entry for it.
|
671 |
+
|
672 |
+
See Also
|
673 |
+
--------
|
674 |
+
cut : Discretize continuous-valued array.
|
675 |
+
unique : Find the unique value in an array.
|
676 |
+
|
677 |
+
Notes
|
678 |
+
-----
|
679 |
+
Reference :ref:`the user guide <reshaping.factorize>` for more examples.
|
680 |
+
|
681 |
+
Examples
|
682 |
+
--------
|
683 |
+
These examples all show factorize as a top-level method like
|
684 |
+
``pd.factorize(values)``. The results are identical for methods like
|
685 |
+
:meth:`Series.factorize`.
|
686 |
+
|
687 |
+
>>> codes, uniques = pd.factorize(np.array(['b', 'b', 'a', 'c', 'b'], dtype="O"))
|
688 |
+
>>> codes
|
689 |
+
array([0, 0, 1, 2, 0])
|
690 |
+
>>> uniques
|
691 |
+
array(['b', 'a', 'c'], dtype=object)
|
692 |
+
|
693 |
+
With ``sort=True``, the `uniques` will be sorted, and `codes` will be
|
694 |
+
shuffled so that the relationship is the maintained.
|
695 |
+
|
696 |
+
>>> codes, uniques = pd.factorize(np.array(['b', 'b', 'a', 'c', 'b'], dtype="O"),
|
697 |
+
... sort=True)
|
698 |
+
>>> codes
|
699 |
+
array([1, 1, 0, 2, 1])
|
700 |
+
>>> uniques
|
701 |
+
array(['a', 'b', 'c'], dtype=object)
|
702 |
+
|
703 |
+
When ``use_na_sentinel=True`` (the default), missing values are indicated in
|
704 |
+
the `codes` with the sentinel value ``-1`` and missing values are not
|
705 |
+
included in `uniques`.
|
706 |
+
|
707 |
+
>>> codes, uniques = pd.factorize(np.array(['b', None, 'a', 'c', 'b'], dtype="O"))
|
708 |
+
>>> codes
|
709 |
+
array([ 0, -1, 1, 2, 0])
|
710 |
+
>>> uniques
|
711 |
+
array(['b', 'a', 'c'], dtype=object)
|
712 |
+
|
713 |
+
Thus far, we've only factorized lists (which are internally coerced to
|
714 |
+
NumPy arrays). When factorizing pandas objects, the type of `uniques`
|
715 |
+
will differ. For Categoricals, a `Categorical` is returned.
|
716 |
+
|
717 |
+
>>> cat = pd.Categorical(['a', 'a', 'c'], categories=['a', 'b', 'c'])
|
718 |
+
>>> codes, uniques = pd.factorize(cat)
|
719 |
+
>>> codes
|
720 |
+
array([0, 0, 1])
|
721 |
+
>>> uniques
|
722 |
+
['a', 'c']
|
723 |
+
Categories (3, object): ['a', 'b', 'c']
|
724 |
+
|
725 |
+
Notice that ``'b'`` is in ``uniques.categories``, despite not being
|
726 |
+
present in ``cat.values``.
|
727 |
+
|
728 |
+
For all other pandas objects, an Index of the appropriate type is
|
729 |
+
returned.
|
730 |
+
|
731 |
+
>>> cat = pd.Series(['a', 'a', 'c'])
|
732 |
+
>>> codes, uniques = pd.factorize(cat)
|
733 |
+
>>> codes
|
734 |
+
array([0, 0, 1])
|
735 |
+
>>> uniques
|
736 |
+
Index(['a', 'c'], dtype='object')
|
737 |
+
|
738 |
+
If NaN is in the values, and we want to include NaN in the uniques of the
|
739 |
+
values, it can be achieved by setting ``use_na_sentinel=False``.
|
740 |
+
|
741 |
+
>>> values = np.array([1, 2, 1, np.nan])
|
742 |
+
>>> codes, uniques = pd.factorize(values) # default: use_na_sentinel=True
|
743 |
+
>>> codes
|
744 |
+
array([ 0, 1, 0, -1])
|
745 |
+
>>> uniques
|
746 |
+
array([1., 2.])
|
747 |
+
|
748 |
+
>>> codes, uniques = pd.factorize(values, use_na_sentinel=False)
|
749 |
+
>>> codes
|
750 |
+
array([0, 1, 0, 2])
|
751 |
+
>>> uniques
|
752 |
+
array([ 1., 2., nan])
|
753 |
+
"""
|
754 |
+
# Implementation notes: This method is responsible for 3 things
|
755 |
+
# 1.) coercing data to array-like (ndarray, Index, extension array)
|
756 |
+
# 2.) factorizing codes and uniques
|
757 |
+
# 3.) Maybe boxing the uniques in an Index
|
758 |
+
#
|
759 |
+
# Step 2 is dispatched to extension types (like Categorical). They are
|
760 |
+
# responsible only for factorization. All data coercion, sorting and boxing
|
761 |
+
# should happen here.
|
762 |
+
if isinstance(values, (ABCIndex, ABCSeries)):
|
763 |
+
return values.factorize(sort=sort, use_na_sentinel=use_na_sentinel)
|
764 |
+
|
765 |
+
values = _ensure_arraylike(values, func_name="factorize")
|
766 |
+
original = values
|
767 |
+
|
768 |
+
if (
|
769 |
+
isinstance(values, (ABCDatetimeArray, ABCTimedeltaArray))
|
770 |
+
and values.freq is not None
|
771 |
+
):
|
772 |
+
# The presence of 'freq' means we can fast-path sorting and know there
|
773 |
+
# aren't NAs
|
774 |
+
codes, uniques = values.factorize(sort=sort)
|
775 |
+
return codes, uniques
|
776 |
+
|
777 |
+
elif not isinstance(values, np.ndarray):
|
778 |
+
# i.e. ExtensionArray
|
779 |
+
codes, uniques = values.factorize(use_na_sentinel=use_na_sentinel)
|
780 |
+
|
781 |
+
else:
|
782 |
+
values = np.asarray(values) # convert DTA/TDA/MultiIndex
|
783 |
+
|
784 |
+
if not use_na_sentinel and values.dtype == object:
|
785 |
+
# factorize can now handle differentiating various types of null values.
|
786 |
+
# These can only occur when the array has object dtype.
|
787 |
+
# However, for backwards compatibility we only use the null for the
|
788 |
+
# provided dtype. This may be revisited in the future, see GH#48476.
|
789 |
+
null_mask = isna(values)
|
790 |
+
if null_mask.any():
|
791 |
+
na_value = na_value_for_dtype(values.dtype, compat=False)
|
792 |
+
# Don't modify (potentially user-provided) array
|
793 |
+
values = np.where(null_mask, na_value, values)
|
794 |
+
|
795 |
+
codes, uniques = factorize_array(
|
796 |
+
values,
|
797 |
+
use_na_sentinel=use_na_sentinel,
|
798 |
+
size_hint=size_hint,
|
799 |
+
)
|
800 |
+
|
801 |
+
if sort and len(uniques) > 0:
|
802 |
+
uniques, codes = safe_sort(
|
803 |
+
uniques,
|
804 |
+
codes,
|
805 |
+
use_na_sentinel=use_na_sentinel,
|
806 |
+
assume_unique=True,
|
807 |
+
verify=False,
|
808 |
+
)
|
809 |
+
|
810 |
+
uniques = _reconstruct_data(uniques, original.dtype, original)
|
811 |
+
|
812 |
+
return codes, uniques
|
813 |
+
|
814 |
+
|
815 |
+
def value_counts(
|
816 |
+
values,
|
817 |
+
sort: bool = True,
|
818 |
+
ascending: bool = False,
|
819 |
+
normalize: bool = False,
|
820 |
+
bins=None,
|
821 |
+
dropna: bool = True,
|
822 |
+
) -> Series:
|
823 |
+
"""
|
824 |
+
Compute a histogram of the counts of non-null values.
|
825 |
+
|
826 |
+
Parameters
|
827 |
+
----------
|
828 |
+
values : ndarray (1-d)
|
829 |
+
sort : bool, default True
|
830 |
+
Sort by values
|
831 |
+
ascending : bool, default False
|
832 |
+
Sort in ascending order
|
833 |
+
normalize: bool, default False
|
834 |
+
If True then compute a relative histogram
|
835 |
+
bins : integer, optional
|
836 |
+
Rather than count values, group them into half-open bins,
|
837 |
+
convenience for pd.cut, only works with numeric data
|
838 |
+
dropna : bool, default True
|
839 |
+
Don't include counts of NaN
|
840 |
+
|
841 |
+
Returns
|
842 |
+
-------
|
843 |
+
Series
|
844 |
+
"""
|
845 |
+
warnings.warn(
|
846 |
+
# GH#53493
|
847 |
+
"pandas.value_counts is deprecated and will be removed in a "
|
848 |
+
"future version. Use pd.Series(obj).value_counts() instead.",
|
849 |
+
FutureWarning,
|
850 |
+
stacklevel=find_stack_level(),
|
851 |
+
)
|
852 |
+
return value_counts_internal(
|
853 |
+
values,
|
854 |
+
sort=sort,
|
855 |
+
ascending=ascending,
|
856 |
+
normalize=normalize,
|
857 |
+
bins=bins,
|
858 |
+
dropna=dropna,
|
859 |
+
)
|
860 |
+
|
861 |
+
|
862 |
+
def value_counts_internal(
|
863 |
+
values,
|
864 |
+
sort: bool = True,
|
865 |
+
ascending: bool = False,
|
866 |
+
normalize: bool = False,
|
867 |
+
bins=None,
|
868 |
+
dropna: bool = True,
|
869 |
+
) -> Series:
|
870 |
+
from pandas import (
|
871 |
+
Index,
|
872 |
+
Series,
|
873 |
+
)
|
874 |
+
|
875 |
+
index_name = getattr(values, "name", None)
|
876 |
+
name = "proportion" if normalize else "count"
|
877 |
+
|
878 |
+
if bins is not None:
|
879 |
+
from pandas.core.reshape.tile import cut
|
880 |
+
|
881 |
+
if isinstance(values, Series):
|
882 |
+
values = values._values
|
883 |
+
|
884 |
+
try:
|
885 |
+
ii = cut(values, bins, include_lowest=True)
|
886 |
+
except TypeError as err:
|
887 |
+
raise TypeError("bins argument only works with numeric data.") from err
|
888 |
+
|
889 |
+
# count, remove nulls (from the index), and but the bins
|
890 |
+
result = ii.value_counts(dropna=dropna)
|
891 |
+
result.name = name
|
892 |
+
result = result[result.index.notna()]
|
893 |
+
result.index = result.index.astype("interval")
|
894 |
+
result = result.sort_index()
|
895 |
+
|
896 |
+
# if we are dropna and we have NO values
|
897 |
+
if dropna and (result._values == 0).all():
|
898 |
+
result = result.iloc[0:0]
|
899 |
+
|
900 |
+
# normalizing is by len of all (regardless of dropna)
|
901 |
+
counts = np.array([len(ii)])
|
902 |
+
|
903 |
+
else:
|
904 |
+
if is_extension_array_dtype(values):
|
905 |
+
# handle Categorical and sparse,
|
906 |
+
result = Series(values, copy=False)._values.value_counts(dropna=dropna)
|
907 |
+
result.name = name
|
908 |
+
result.index.name = index_name
|
909 |
+
counts = result._values
|
910 |
+
if not isinstance(counts, np.ndarray):
|
911 |
+
# e.g. ArrowExtensionArray
|
912 |
+
counts = np.asarray(counts)
|
913 |
+
|
914 |
+
elif isinstance(values, ABCMultiIndex):
|
915 |
+
# GH49558
|
916 |
+
levels = list(range(values.nlevels))
|
917 |
+
result = (
|
918 |
+
Series(index=values, name=name)
|
919 |
+
.groupby(level=levels, dropna=dropna)
|
920 |
+
.size()
|
921 |
+
)
|
922 |
+
result.index.names = values.names
|
923 |
+
counts = result._values
|
924 |
+
|
925 |
+
else:
|
926 |
+
values = _ensure_arraylike(values, func_name="value_counts")
|
927 |
+
keys, counts, _ = value_counts_arraylike(values, dropna)
|
928 |
+
if keys.dtype == np.float16:
|
929 |
+
keys = keys.astype(np.float32)
|
930 |
+
|
931 |
+
# For backwards compatibility, we let Index do its normal type
|
932 |
+
# inference, _except_ for if if infers from object to bool.
|
933 |
+
idx = Index(keys)
|
934 |
+
if idx.dtype == bool and keys.dtype == object:
|
935 |
+
idx = idx.astype(object)
|
936 |
+
elif (
|
937 |
+
idx.dtype != keys.dtype # noqa: PLR1714 # # pylint: disable=R1714
|
938 |
+
and idx.dtype != "string[pyarrow_numpy]"
|
939 |
+
):
|
940 |
+
warnings.warn(
|
941 |
+
# GH#56161
|
942 |
+
"The behavior of value_counts with object-dtype is deprecated. "
|
943 |
+
"In a future version, this will *not* perform dtype inference "
|
944 |
+
"on the resulting index. To retain the old behavior, use "
|
945 |
+
"`result.index = result.index.infer_objects()`",
|
946 |
+
FutureWarning,
|
947 |
+
stacklevel=find_stack_level(),
|
948 |
+
)
|
949 |
+
idx.name = index_name
|
950 |
+
|
951 |
+
result = Series(counts, index=idx, name=name, copy=False)
|
952 |
+
|
953 |
+
if sort:
|
954 |
+
result = result.sort_values(ascending=ascending)
|
955 |
+
|
956 |
+
if normalize:
|
957 |
+
result = result / counts.sum()
|
958 |
+
|
959 |
+
return result
|
960 |
+
|
961 |
+
|
962 |
+
# Called once from SparseArray, otherwise could be private
|
963 |
+
def value_counts_arraylike(
|
964 |
+
values: np.ndarray, dropna: bool, mask: npt.NDArray[np.bool_] | None = None
|
965 |
+
) -> tuple[ArrayLike, npt.NDArray[np.int64], int]:
|
966 |
+
"""
|
967 |
+
Parameters
|
968 |
+
----------
|
969 |
+
values : np.ndarray
|
970 |
+
dropna : bool
|
971 |
+
mask : np.ndarray[bool] or None, default None
|
972 |
+
|
973 |
+
Returns
|
974 |
+
-------
|
975 |
+
uniques : np.ndarray
|
976 |
+
counts : np.ndarray[np.int64]
|
977 |
+
"""
|
978 |
+
original = values
|
979 |
+
values = _ensure_data(values)
|
980 |
+
|
981 |
+
keys, counts, na_counter = htable.value_count(values, dropna, mask=mask)
|
982 |
+
|
983 |
+
if needs_i8_conversion(original.dtype):
|
984 |
+
# datetime, timedelta, or period
|
985 |
+
|
986 |
+
if dropna:
|
987 |
+
mask = keys != iNaT
|
988 |
+
keys, counts = keys[mask], counts[mask]
|
989 |
+
|
990 |
+
res_keys = _reconstruct_data(keys, original.dtype, original)
|
991 |
+
return res_keys, counts, na_counter
|
992 |
+
|
993 |
+
|
994 |
+
def duplicated(
|
995 |
+
values: ArrayLike,
|
996 |
+
keep: Literal["first", "last", False] = "first",
|
997 |
+
mask: npt.NDArray[np.bool_] | None = None,
|
998 |
+
) -> npt.NDArray[np.bool_]:
|
999 |
+
"""
|
1000 |
+
Return boolean ndarray denoting duplicate values.
|
1001 |
+
|
1002 |
+
Parameters
|
1003 |
+
----------
|
1004 |
+
values : np.ndarray or ExtensionArray
|
1005 |
+
Array over which to check for duplicate values.
|
1006 |
+
keep : {'first', 'last', False}, default 'first'
|
1007 |
+
- ``first`` : Mark duplicates as ``True`` except for the first
|
1008 |
+
occurrence.
|
1009 |
+
- ``last`` : Mark duplicates as ``True`` except for the last
|
1010 |
+
occurrence.
|
1011 |
+
- False : Mark all duplicates as ``True``.
|
1012 |
+
mask : ndarray[bool], optional
|
1013 |
+
array indicating which elements to exclude from checking
|
1014 |
+
|
1015 |
+
Returns
|
1016 |
+
-------
|
1017 |
+
duplicated : ndarray[bool]
|
1018 |
+
"""
|
1019 |
+
values = _ensure_data(values)
|
1020 |
+
return htable.duplicated(values, keep=keep, mask=mask)
|
1021 |
+
|
1022 |
+
|
1023 |
+
def mode(
|
1024 |
+
values: ArrayLike, dropna: bool = True, mask: npt.NDArray[np.bool_] | None = None
|
1025 |
+
) -> ArrayLike:
|
1026 |
+
"""
|
1027 |
+
Returns the mode(s) of an array.
|
1028 |
+
|
1029 |
+
Parameters
|
1030 |
+
----------
|
1031 |
+
values : array-like
|
1032 |
+
Array over which to check for duplicate values.
|
1033 |
+
dropna : bool, default True
|
1034 |
+
Don't consider counts of NaN/NaT.
|
1035 |
+
|
1036 |
+
Returns
|
1037 |
+
-------
|
1038 |
+
np.ndarray or ExtensionArray
|
1039 |
+
"""
|
1040 |
+
values = _ensure_arraylike(values, func_name="mode")
|
1041 |
+
original = values
|
1042 |
+
|
1043 |
+
if needs_i8_conversion(values.dtype):
|
1044 |
+
# Got here with ndarray; dispatch to DatetimeArray/TimedeltaArray.
|
1045 |
+
values = ensure_wrapped_if_datetimelike(values)
|
1046 |
+
values = cast("ExtensionArray", values)
|
1047 |
+
return values._mode(dropna=dropna)
|
1048 |
+
|
1049 |
+
values = _ensure_data(values)
|
1050 |
+
|
1051 |
+
npresult, res_mask = htable.mode(values, dropna=dropna, mask=mask)
|
1052 |
+
if res_mask is not None:
|
1053 |
+
return npresult, res_mask # type: ignore[return-value]
|
1054 |
+
|
1055 |
+
try:
|
1056 |
+
npresult = np.sort(npresult)
|
1057 |
+
except TypeError as err:
|
1058 |
+
warnings.warn(
|
1059 |
+
f"Unable to sort modes: {err}",
|
1060 |
+
stacklevel=find_stack_level(),
|
1061 |
+
)
|
1062 |
+
|
1063 |
+
result = _reconstruct_data(npresult, original.dtype, original)
|
1064 |
+
return result
|
1065 |
+
|
1066 |
+
|
1067 |
+
def rank(
|
1068 |
+
values: ArrayLike,
|
1069 |
+
axis: AxisInt = 0,
|
1070 |
+
method: str = "average",
|
1071 |
+
na_option: str = "keep",
|
1072 |
+
ascending: bool = True,
|
1073 |
+
pct: bool = False,
|
1074 |
+
) -> npt.NDArray[np.float64]:
|
1075 |
+
"""
|
1076 |
+
Rank the values along a given axis.
|
1077 |
+
|
1078 |
+
Parameters
|
1079 |
+
----------
|
1080 |
+
values : np.ndarray or ExtensionArray
|
1081 |
+
Array whose values will be ranked. The number of dimensions in this
|
1082 |
+
array must not exceed 2.
|
1083 |
+
axis : int, default 0
|
1084 |
+
Axis over which to perform rankings.
|
1085 |
+
method : {'average', 'min', 'max', 'first', 'dense'}, default 'average'
|
1086 |
+
The method by which tiebreaks are broken during the ranking.
|
1087 |
+
na_option : {'keep', 'top'}, default 'keep'
|
1088 |
+
The method by which NaNs are placed in the ranking.
|
1089 |
+
- ``keep``: rank each NaN value with a NaN ranking
|
1090 |
+
- ``top``: replace each NaN with either +/- inf so that they
|
1091 |
+
there are ranked at the top
|
1092 |
+
ascending : bool, default True
|
1093 |
+
Whether or not the elements should be ranked in ascending order.
|
1094 |
+
pct : bool, default False
|
1095 |
+
Whether or not to the display the returned rankings in integer form
|
1096 |
+
(e.g. 1, 2, 3) or in percentile form (e.g. 0.333..., 0.666..., 1).
|
1097 |
+
"""
|
1098 |
+
is_datetimelike = needs_i8_conversion(values.dtype)
|
1099 |
+
values = _ensure_data(values)
|
1100 |
+
|
1101 |
+
if values.ndim == 1:
|
1102 |
+
ranks = algos.rank_1d(
|
1103 |
+
values,
|
1104 |
+
is_datetimelike=is_datetimelike,
|
1105 |
+
ties_method=method,
|
1106 |
+
ascending=ascending,
|
1107 |
+
na_option=na_option,
|
1108 |
+
pct=pct,
|
1109 |
+
)
|
1110 |
+
elif values.ndim == 2:
|
1111 |
+
ranks = algos.rank_2d(
|
1112 |
+
values,
|
1113 |
+
axis=axis,
|
1114 |
+
is_datetimelike=is_datetimelike,
|
1115 |
+
ties_method=method,
|
1116 |
+
ascending=ascending,
|
1117 |
+
na_option=na_option,
|
1118 |
+
pct=pct,
|
1119 |
+
)
|
1120 |
+
else:
|
1121 |
+
raise TypeError("Array with ndim > 2 are not supported.")
|
1122 |
+
|
1123 |
+
return ranks
|
1124 |
+
|
1125 |
+
|
1126 |
+
# ---- #
|
1127 |
+
# take #
|
1128 |
+
# ---- #
|
1129 |
+
|
1130 |
+
|
1131 |
+
def take(
|
1132 |
+
arr,
|
1133 |
+
indices: TakeIndexer,
|
1134 |
+
axis: AxisInt = 0,
|
1135 |
+
allow_fill: bool = False,
|
1136 |
+
fill_value=None,
|
1137 |
+
):
|
1138 |
+
"""
|
1139 |
+
Take elements from an array.
|
1140 |
+
|
1141 |
+
Parameters
|
1142 |
+
----------
|
1143 |
+
arr : array-like or scalar value
|
1144 |
+
Non array-likes (sequences/scalars without a dtype) are coerced
|
1145 |
+
to an ndarray.
|
1146 |
+
|
1147 |
+
.. deprecated:: 2.1.0
|
1148 |
+
Passing an argument other than a numpy.ndarray, ExtensionArray,
|
1149 |
+
Index, or Series is deprecated.
|
1150 |
+
|
1151 |
+
indices : sequence of int or one-dimensional np.ndarray of int
|
1152 |
+
Indices to be taken.
|
1153 |
+
axis : int, default 0
|
1154 |
+
The axis over which to select values.
|
1155 |
+
allow_fill : bool, default False
|
1156 |
+
How to handle negative values in `indices`.
|
1157 |
+
|
1158 |
+
* False: negative values in `indices` indicate positional indices
|
1159 |
+
from the right (the default). This is similar to :func:`numpy.take`.
|
1160 |
+
|
1161 |
+
* True: negative values in `indices` indicate
|
1162 |
+
missing values. These values are set to `fill_value`. Any other
|
1163 |
+
negative values raise a ``ValueError``.
|
1164 |
+
|
1165 |
+
fill_value : any, optional
|
1166 |
+
Fill value to use for NA-indices when `allow_fill` is True.
|
1167 |
+
This may be ``None``, in which case the default NA value for
|
1168 |
+
the type (``self.dtype.na_value``) is used.
|
1169 |
+
|
1170 |
+
For multi-dimensional `arr`, each *element* is filled with
|
1171 |
+
`fill_value`.
|
1172 |
+
|
1173 |
+
Returns
|
1174 |
+
-------
|
1175 |
+
ndarray or ExtensionArray
|
1176 |
+
Same type as the input.
|
1177 |
+
|
1178 |
+
Raises
|
1179 |
+
------
|
1180 |
+
IndexError
|
1181 |
+
When `indices` is out of bounds for the array.
|
1182 |
+
ValueError
|
1183 |
+
When the indexer contains negative values other than ``-1``
|
1184 |
+
and `allow_fill` is True.
|
1185 |
+
|
1186 |
+
Notes
|
1187 |
+
-----
|
1188 |
+
When `allow_fill` is False, `indices` may be whatever dimensionality
|
1189 |
+
is accepted by NumPy for `arr`.
|
1190 |
+
|
1191 |
+
When `allow_fill` is True, `indices` should be 1-D.
|
1192 |
+
|
1193 |
+
See Also
|
1194 |
+
--------
|
1195 |
+
numpy.take : Take elements from an array along an axis.
|
1196 |
+
|
1197 |
+
Examples
|
1198 |
+
--------
|
1199 |
+
>>> import pandas as pd
|
1200 |
+
|
1201 |
+
With the default ``allow_fill=False``, negative numbers indicate
|
1202 |
+
positional indices from the right.
|
1203 |
+
|
1204 |
+
>>> pd.api.extensions.take(np.array([10, 20, 30]), [0, 0, -1])
|
1205 |
+
array([10, 10, 30])
|
1206 |
+
|
1207 |
+
Setting ``allow_fill=True`` will place `fill_value` in those positions.
|
1208 |
+
|
1209 |
+
>>> pd.api.extensions.take(np.array([10, 20, 30]), [0, 0, -1], allow_fill=True)
|
1210 |
+
array([10., 10., nan])
|
1211 |
+
|
1212 |
+
>>> pd.api.extensions.take(np.array([10, 20, 30]), [0, 0, -1], allow_fill=True,
|
1213 |
+
... fill_value=-10)
|
1214 |
+
array([ 10, 10, -10])
|
1215 |
+
"""
|
1216 |
+
if not isinstance(arr, (np.ndarray, ABCExtensionArray, ABCIndex, ABCSeries)):
|
1217 |
+
# GH#52981
|
1218 |
+
warnings.warn(
|
1219 |
+
"pd.api.extensions.take accepting non-standard inputs is deprecated "
|
1220 |
+
"and will raise in a future version. Pass either a numpy.ndarray, "
|
1221 |
+
"ExtensionArray, Index, or Series instead.",
|
1222 |
+
FutureWarning,
|
1223 |
+
stacklevel=find_stack_level(),
|
1224 |
+
)
|
1225 |
+
|
1226 |
+
if not is_array_like(arr):
|
1227 |
+
arr = np.asarray(arr)
|
1228 |
+
|
1229 |
+
indices = ensure_platform_int(indices)
|
1230 |
+
|
1231 |
+
if allow_fill:
|
1232 |
+
# Pandas style, -1 means NA
|
1233 |
+
validate_indices(indices, arr.shape[axis])
|
1234 |
+
result = take_nd(
|
1235 |
+
arr, indices, axis=axis, allow_fill=True, fill_value=fill_value
|
1236 |
+
)
|
1237 |
+
else:
|
1238 |
+
# NumPy style
|
1239 |
+
result = arr.take(indices, axis=axis)
|
1240 |
+
return result
|
1241 |
+
|
1242 |
+
|
1243 |
+
# ------------ #
|
1244 |
+
# searchsorted #
|
1245 |
+
# ------------ #
|
1246 |
+
|
1247 |
+
|
1248 |
+
def searchsorted(
|
1249 |
+
arr: ArrayLike,
|
1250 |
+
value: NumpyValueArrayLike | ExtensionArray,
|
1251 |
+
side: Literal["left", "right"] = "left",
|
1252 |
+
sorter: NumpySorter | None = None,
|
1253 |
+
) -> npt.NDArray[np.intp] | np.intp:
|
1254 |
+
"""
|
1255 |
+
Find indices where elements should be inserted to maintain order.
|
1256 |
+
|
1257 |
+
Find the indices into a sorted array `arr` (a) such that, if the
|
1258 |
+
corresponding elements in `value` were inserted before the indices,
|
1259 |
+
the order of `arr` would be preserved.
|
1260 |
+
|
1261 |
+
Assuming that `arr` is sorted:
|
1262 |
+
|
1263 |
+
====== ================================
|
1264 |
+
`side` returned index `i` satisfies
|
1265 |
+
====== ================================
|
1266 |
+
left ``arr[i-1] < value <= self[i]``
|
1267 |
+
right ``arr[i-1] <= value < self[i]``
|
1268 |
+
====== ================================
|
1269 |
+
|
1270 |
+
Parameters
|
1271 |
+
----------
|
1272 |
+
arr: np.ndarray, ExtensionArray, Series
|
1273 |
+
Input array. If `sorter` is None, then it must be sorted in
|
1274 |
+
ascending order, otherwise `sorter` must be an array of indices
|
1275 |
+
that sort it.
|
1276 |
+
value : array-like or scalar
|
1277 |
+
Values to insert into `arr`.
|
1278 |
+
side : {'left', 'right'}, optional
|
1279 |
+
If 'left', the index of the first suitable location found is given.
|
1280 |
+
If 'right', return the last such index. If there is no suitable
|
1281 |
+
index, return either 0 or N (where N is the length of `self`).
|
1282 |
+
sorter : 1-D array-like, optional
|
1283 |
+
Optional array of integer indices that sort array a into ascending
|
1284 |
+
order. They are typically the result of argsort.
|
1285 |
+
|
1286 |
+
Returns
|
1287 |
+
-------
|
1288 |
+
array of ints or int
|
1289 |
+
If value is array-like, array of insertion points.
|
1290 |
+
If value is scalar, a single integer.
|
1291 |
+
|
1292 |
+
See Also
|
1293 |
+
--------
|
1294 |
+
numpy.searchsorted : Similar method from NumPy.
|
1295 |
+
"""
|
1296 |
+
if sorter is not None:
|
1297 |
+
sorter = ensure_platform_int(sorter)
|
1298 |
+
|
1299 |
+
if (
|
1300 |
+
isinstance(arr, np.ndarray)
|
1301 |
+
and arr.dtype.kind in "iu"
|
1302 |
+
and (is_integer(value) or is_integer_dtype(value))
|
1303 |
+
):
|
1304 |
+
# if `arr` and `value` have different dtypes, `arr` would be
|
1305 |
+
# recast by numpy, causing a slow search.
|
1306 |
+
# Before searching below, we therefore try to give `value` the
|
1307 |
+
# same dtype as `arr`, while guarding against integer overflows.
|
1308 |
+
iinfo = np.iinfo(arr.dtype.type)
|
1309 |
+
value_arr = np.array([value]) if is_integer(value) else np.array(value)
|
1310 |
+
if (value_arr >= iinfo.min).all() and (value_arr <= iinfo.max).all():
|
1311 |
+
# value within bounds, so no overflow, so can convert value dtype
|
1312 |
+
# to dtype of arr
|
1313 |
+
dtype = arr.dtype
|
1314 |
+
else:
|
1315 |
+
dtype = value_arr.dtype
|
1316 |
+
|
1317 |
+
if is_integer(value):
|
1318 |
+
# We know that value is int
|
1319 |
+
value = cast(int, dtype.type(value))
|
1320 |
+
else:
|
1321 |
+
value = pd_array(cast(ArrayLike, value), dtype=dtype)
|
1322 |
+
else:
|
1323 |
+
# E.g. if `arr` is an array with dtype='datetime64[ns]'
|
1324 |
+
# and `value` is a pd.Timestamp, we may need to convert value
|
1325 |
+
arr = ensure_wrapped_if_datetimelike(arr)
|
1326 |
+
|
1327 |
+
# Argument 1 to "searchsorted" of "ndarray" has incompatible type
|
1328 |
+
# "Union[NumpyValueArrayLike, ExtensionArray]"; expected "NumpyValueArrayLike"
|
1329 |
+
return arr.searchsorted(value, side=side, sorter=sorter) # type: ignore[arg-type]
|
1330 |
+
|
1331 |
+
|
1332 |
+
# ---- #
|
1333 |
+
# diff #
|
1334 |
+
# ---- #
|
1335 |
+
|
1336 |
+
_diff_special = {"float64", "float32", "int64", "int32", "int16", "int8"}
|
1337 |
+
|
1338 |
+
|
1339 |
+
def diff(arr, n: int, axis: AxisInt = 0):
|
1340 |
+
"""
|
1341 |
+
difference of n between self,
|
1342 |
+
analogous to s-s.shift(n)
|
1343 |
+
|
1344 |
+
Parameters
|
1345 |
+
----------
|
1346 |
+
arr : ndarray or ExtensionArray
|
1347 |
+
n : int
|
1348 |
+
number of periods
|
1349 |
+
axis : {0, 1}
|
1350 |
+
axis to shift on
|
1351 |
+
stacklevel : int, default 3
|
1352 |
+
The stacklevel for the lost dtype warning.
|
1353 |
+
|
1354 |
+
Returns
|
1355 |
+
-------
|
1356 |
+
shifted
|
1357 |
+
"""
|
1358 |
+
|
1359 |
+
n = int(n)
|
1360 |
+
na = np.nan
|
1361 |
+
dtype = arr.dtype
|
1362 |
+
|
1363 |
+
is_bool = is_bool_dtype(dtype)
|
1364 |
+
if is_bool:
|
1365 |
+
op = operator.xor
|
1366 |
+
else:
|
1367 |
+
op = operator.sub
|
1368 |
+
|
1369 |
+
if isinstance(dtype, NumpyEADtype):
|
1370 |
+
# NumpyExtensionArray cannot necessarily hold shifted versions of itself.
|
1371 |
+
arr = arr.to_numpy()
|
1372 |
+
dtype = arr.dtype
|
1373 |
+
|
1374 |
+
if not isinstance(arr, np.ndarray):
|
1375 |
+
# i.e ExtensionArray
|
1376 |
+
if hasattr(arr, f"__{op.__name__}__"):
|
1377 |
+
if axis != 0:
|
1378 |
+
raise ValueError(f"cannot diff {type(arr).__name__} on axis={axis}")
|
1379 |
+
return op(arr, arr.shift(n))
|
1380 |
+
else:
|
1381 |
+
raise TypeError(
|
1382 |
+
f"{type(arr).__name__} has no 'diff' method. "
|
1383 |
+
"Convert to a suitable dtype prior to calling 'diff'."
|
1384 |
+
)
|
1385 |
+
|
1386 |
+
is_timedelta = False
|
1387 |
+
if arr.dtype.kind in "mM":
|
1388 |
+
dtype = np.int64
|
1389 |
+
arr = arr.view("i8")
|
1390 |
+
na = iNaT
|
1391 |
+
is_timedelta = True
|
1392 |
+
|
1393 |
+
elif is_bool:
|
1394 |
+
# We have to cast in order to be able to hold np.nan
|
1395 |
+
dtype = np.object_
|
1396 |
+
|
1397 |
+
elif dtype.kind in "iu":
|
1398 |
+
# We have to cast in order to be able to hold np.nan
|
1399 |
+
|
1400 |
+
# int8, int16 are incompatible with float64,
|
1401 |
+
# see https://github.com/cython/cython/issues/2646
|
1402 |
+
if arr.dtype.name in ["int8", "int16"]:
|
1403 |
+
dtype = np.float32
|
1404 |
+
else:
|
1405 |
+
dtype = np.float64
|
1406 |
+
|
1407 |
+
orig_ndim = arr.ndim
|
1408 |
+
if orig_ndim == 1:
|
1409 |
+
# reshape so we can always use algos.diff_2d
|
1410 |
+
arr = arr.reshape(-1, 1)
|
1411 |
+
# TODO: require axis == 0
|
1412 |
+
|
1413 |
+
dtype = np.dtype(dtype)
|
1414 |
+
out_arr = np.empty(arr.shape, dtype=dtype)
|
1415 |
+
|
1416 |
+
na_indexer = [slice(None)] * 2
|
1417 |
+
na_indexer[axis] = slice(None, n) if n >= 0 else slice(n, None)
|
1418 |
+
out_arr[tuple(na_indexer)] = na
|
1419 |
+
|
1420 |
+
if arr.dtype.name in _diff_special:
|
1421 |
+
# TODO: can diff_2d dtype specialization troubles be fixed by defining
|
1422 |
+
# out_arr inside diff_2d?
|
1423 |
+
algos.diff_2d(arr, out_arr, n, axis, datetimelike=is_timedelta)
|
1424 |
+
else:
|
1425 |
+
# To keep mypy happy, _res_indexer is a list while res_indexer is
|
1426 |
+
# a tuple, ditto for lag_indexer.
|
1427 |
+
_res_indexer = [slice(None)] * 2
|
1428 |
+
_res_indexer[axis] = slice(n, None) if n >= 0 else slice(None, n)
|
1429 |
+
res_indexer = tuple(_res_indexer)
|
1430 |
+
|
1431 |
+
_lag_indexer = [slice(None)] * 2
|
1432 |
+
_lag_indexer[axis] = slice(None, -n) if n > 0 else slice(-n, None)
|
1433 |
+
lag_indexer = tuple(_lag_indexer)
|
1434 |
+
|
1435 |
+
out_arr[res_indexer] = op(arr[res_indexer], arr[lag_indexer])
|
1436 |
+
|
1437 |
+
if is_timedelta:
|
1438 |
+
out_arr = out_arr.view("timedelta64[ns]")
|
1439 |
+
|
1440 |
+
if orig_ndim == 1:
|
1441 |
+
out_arr = out_arr[:, 0]
|
1442 |
+
return out_arr
|
1443 |
+
|
1444 |
+
|
1445 |
+
# --------------------------------------------------------------------
|
1446 |
+
# Helper functions
|
1447 |
+
|
1448 |
+
|
1449 |
+
# Note: safe_sort is in algorithms.py instead of sorting.py because it is
|
1450 |
+
# low-dependency, is used in this module, and used private methods from
|
1451 |
+
# this module.
|
1452 |
+
def safe_sort(
|
1453 |
+
values: Index | ArrayLike,
|
1454 |
+
codes: npt.NDArray[np.intp] | None = None,
|
1455 |
+
use_na_sentinel: bool = True,
|
1456 |
+
assume_unique: bool = False,
|
1457 |
+
verify: bool = True,
|
1458 |
+
) -> AnyArrayLike | tuple[AnyArrayLike, np.ndarray]:
|
1459 |
+
"""
|
1460 |
+
Sort ``values`` and reorder corresponding ``codes``.
|
1461 |
+
|
1462 |
+
``values`` should be unique if ``codes`` is not None.
|
1463 |
+
Safe for use with mixed types (int, str), orders ints before strs.
|
1464 |
+
|
1465 |
+
Parameters
|
1466 |
+
----------
|
1467 |
+
values : list-like
|
1468 |
+
Sequence; must be unique if ``codes`` is not None.
|
1469 |
+
codes : np.ndarray[intp] or None, default None
|
1470 |
+
Indices to ``values``. All out of bound indices are treated as
|
1471 |
+
"not found" and will be masked with ``-1``.
|
1472 |
+
use_na_sentinel : bool, default True
|
1473 |
+
If True, the sentinel -1 will be used for NaN values. If False,
|
1474 |
+
NaN values will be encoded as non-negative integers and will not drop the
|
1475 |
+
NaN from the uniques of the values.
|
1476 |
+
assume_unique : bool, default False
|
1477 |
+
When True, ``values`` are assumed to be unique, which can speed up
|
1478 |
+
the calculation. Ignored when ``codes`` is None.
|
1479 |
+
verify : bool, default True
|
1480 |
+
Check if codes are out of bound for the values and put out of bound
|
1481 |
+
codes equal to ``-1``. If ``verify=False``, it is assumed there
|
1482 |
+
are no out of bound codes. Ignored when ``codes`` is None.
|
1483 |
+
|
1484 |
+
Returns
|
1485 |
+
-------
|
1486 |
+
ordered : AnyArrayLike
|
1487 |
+
Sorted ``values``
|
1488 |
+
new_codes : ndarray
|
1489 |
+
Reordered ``codes``; returned when ``codes`` is not None.
|
1490 |
+
|
1491 |
+
Raises
|
1492 |
+
------
|
1493 |
+
TypeError
|
1494 |
+
* If ``values`` is not list-like or if ``codes`` is neither None
|
1495 |
+
nor list-like
|
1496 |
+
* If ``values`` cannot be sorted
|
1497 |
+
ValueError
|
1498 |
+
* If ``codes`` is not None and ``values`` contain duplicates.
|
1499 |
+
"""
|
1500 |
+
if not isinstance(values, (np.ndarray, ABCExtensionArray, ABCIndex)):
|
1501 |
+
raise TypeError(
|
1502 |
+
"Only np.ndarray, ExtensionArray, and Index objects are allowed to "
|
1503 |
+
"be passed to safe_sort as values"
|
1504 |
+
)
|
1505 |
+
|
1506 |
+
sorter = None
|
1507 |
+
ordered: AnyArrayLike
|
1508 |
+
|
1509 |
+
if (
|
1510 |
+
not isinstance(values.dtype, ExtensionDtype)
|
1511 |
+
and lib.infer_dtype(values, skipna=False) == "mixed-integer"
|
1512 |
+
):
|
1513 |
+
ordered = _sort_mixed(values)
|
1514 |
+
else:
|
1515 |
+
try:
|
1516 |
+
sorter = values.argsort()
|
1517 |
+
ordered = values.take(sorter)
|
1518 |
+
except (TypeError, decimal.InvalidOperation):
|
1519 |
+
# Previous sorters failed or were not applicable, try `_sort_mixed`
|
1520 |
+
# which would work, but which fails for special case of 1d arrays
|
1521 |
+
# with tuples.
|
1522 |
+
if values.size and isinstance(values[0], tuple):
|
1523 |
+
# error: Argument 1 to "_sort_tuples" has incompatible type
|
1524 |
+
# "Union[Index, ExtensionArray, ndarray[Any, Any]]"; expected
|
1525 |
+
# "ndarray[Any, Any]"
|
1526 |
+
ordered = _sort_tuples(values) # type: ignore[arg-type]
|
1527 |
+
else:
|
1528 |
+
ordered = _sort_mixed(values)
|
1529 |
+
|
1530 |
+
# codes:
|
1531 |
+
|
1532 |
+
if codes is None:
|
1533 |
+
return ordered
|
1534 |
+
|
1535 |
+
if not is_list_like(codes):
|
1536 |
+
raise TypeError(
|
1537 |
+
"Only list-like objects or None are allowed to "
|
1538 |
+
"be passed to safe_sort as codes"
|
1539 |
+
)
|
1540 |
+
codes = ensure_platform_int(np.asarray(codes))
|
1541 |
+
|
1542 |
+
if not assume_unique and not len(unique(values)) == len(values):
|
1543 |
+
raise ValueError("values should be unique if codes is not None")
|
1544 |
+
|
1545 |
+
if sorter is None:
|
1546 |
+
# mixed types
|
1547 |
+
# error: Argument 1 to "_get_hashtable_algo" has incompatible type
|
1548 |
+
# "Union[Index, ExtensionArray, ndarray[Any, Any]]"; expected
|
1549 |
+
# "ndarray[Any, Any]"
|
1550 |
+
hash_klass, values = _get_hashtable_algo(values) # type: ignore[arg-type]
|
1551 |
+
t = hash_klass(len(values))
|
1552 |
+
t.map_locations(values)
|
1553 |
+
sorter = ensure_platform_int(t.lookup(ordered))
|
1554 |
+
|
1555 |
+
if use_na_sentinel:
|
1556 |
+
# take_nd is faster, but only works for na_sentinels of -1
|
1557 |
+
order2 = sorter.argsort()
|
1558 |
+
if verify:
|
1559 |
+
mask = (codes < -len(values)) | (codes >= len(values))
|
1560 |
+
codes[mask] = 0
|
1561 |
+
else:
|
1562 |
+
mask = None
|
1563 |
+
new_codes = take_nd(order2, codes, fill_value=-1)
|
1564 |
+
else:
|
1565 |
+
reverse_indexer = np.empty(len(sorter), dtype=int)
|
1566 |
+
reverse_indexer.put(sorter, np.arange(len(sorter)))
|
1567 |
+
# Out of bound indices will be masked with `-1` next, so we
|
1568 |
+
# may deal with them here without performance loss using `mode='wrap'`
|
1569 |
+
new_codes = reverse_indexer.take(codes, mode="wrap")
|
1570 |
+
|
1571 |
+
if use_na_sentinel:
|
1572 |
+
mask = codes == -1
|
1573 |
+
if verify:
|
1574 |
+
mask = mask | (codes < -len(values)) | (codes >= len(values))
|
1575 |
+
|
1576 |
+
if use_na_sentinel and mask is not None:
|
1577 |
+
np.putmask(new_codes, mask, -1)
|
1578 |
+
|
1579 |
+
return ordered, ensure_platform_int(new_codes)
|
1580 |
+
|
1581 |
+
|
1582 |
+
def _sort_mixed(values) -> AnyArrayLike:
|
1583 |
+
"""order ints before strings before nulls in 1d arrays"""
|
1584 |
+
str_pos = np.array([isinstance(x, str) for x in values], dtype=bool)
|
1585 |
+
null_pos = np.array([isna(x) for x in values], dtype=bool)
|
1586 |
+
num_pos = ~str_pos & ~null_pos
|
1587 |
+
str_argsort = np.argsort(values[str_pos])
|
1588 |
+
num_argsort = np.argsort(values[num_pos])
|
1589 |
+
# convert boolean arrays to positional indices, then order by underlying values
|
1590 |
+
str_locs = str_pos.nonzero()[0].take(str_argsort)
|
1591 |
+
num_locs = num_pos.nonzero()[0].take(num_argsort)
|
1592 |
+
null_locs = null_pos.nonzero()[0]
|
1593 |
+
locs = np.concatenate([num_locs, str_locs, null_locs])
|
1594 |
+
return values.take(locs)
|
1595 |
+
|
1596 |
+
|
1597 |
+
def _sort_tuples(values: np.ndarray) -> np.ndarray:
|
1598 |
+
"""
|
1599 |
+
Convert array of tuples (1d) to array of arrays (2d).
|
1600 |
+
We need to keep the columns separately as they contain different types and
|
1601 |
+
nans (can't use `np.sort` as it may fail when str and nan are mixed in a
|
1602 |
+
column as types cannot be compared).
|
1603 |
+
"""
|
1604 |
+
from pandas.core.internals.construction import to_arrays
|
1605 |
+
from pandas.core.sorting import lexsort_indexer
|
1606 |
+
|
1607 |
+
arrays, _ = to_arrays(values, None)
|
1608 |
+
indexer = lexsort_indexer(arrays, orders=True)
|
1609 |
+
return values[indexer]
|
1610 |
+
|
1611 |
+
|
1612 |
+
def union_with_duplicates(
|
1613 |
+
lvals: ArrayLike | Index, rvals: ArrayLike | Index
|
1614 |
+
) -> ArrayLike | Index:
|
1615 |
+
"""
|
1616 |
+
Extracts the union from lvals and rvals with respect to duplicates and nans in
|
1617 |
+
both arrays.
|
1618 |
+
|
1619 |
+
Parameters
|
1620 |
+
----------
|
1621 |
+
lvals: np.ndarray or ExtensionArray
|
1622 |
+
left values which is ordered in front.
|
1623 |
+
rvals: np.ndarray or ExtensionArray
|
1624 |
+
right values ordered after lvals.
|
1625 |
+
|
1626 |
+
Returns
|
1627 |
+
-------
|
1628 |
+
np.ndarray or ExtensionArray
|
1629 |
+
Containing the unsorted union of both arrays.
|
1630 |
+
|
1631 |
+
Notes
|
1632 |
+
-----
|
1633 |
+
Caller is responsible for ensuring lvals.dtype == rvals.dtype.
|
1634 |
+
"""
|
1635 |
+
from pandas import Series
|
1636 |
+
|
1637 |
+
with warnings.catch_warnings():
|
1638 |
+
# filter warning from object dtype inference; we will end up discarding
|
1639 |
+
# the index here, so the deprecation does not affect the end result here.
|
1640 |
+
warnings.filterwarnings(
|
1641 |
+
"ignore",
|
1642 |
+
"The behavior of value_counts with object-dtype is deprecated",
|
1643 |
+
category=FutureWarning,
|
1644 |
+
)
|
1645 |
+
l_count = value_counts_internal(lvals, dropna=False)
|
1646 |
+
r_count = value_counts_internal(rvals, dropna=False)
|
1647 |
+
l_count, r_count = l_count.align(r_count, fill_value=0)
|
1648 |
+
final_count = np.maximum(l_count.values, r_count.values)
|
1649 |
+
final_count = Series(final_count, index=l_count.index, dtype="int", copy=False)
|
1650 |
+
if isinstance(lvals, ABCMultiIndex) and isinstance(rvals, ABCMultiIndex):
|
1651 |
+
unique_vals = lvals.append(rvals).unique()
|
1652 |
+
else:
|
1653 |
+
if isinstance(lvals, ABCIndex):
|
1654 |
+
lvals = lvals._values
|
1655 |
+
if isinstance(rvals, ABCIndex):
|
1656 |
+
rvals = rvals._values
|
1657 |
+
# error: List item 0 has incompatible type "Union[ExtensionArray,
|
1658 |
+
# ndarray[Any, Any], Index]"; expected "Union[ExtensionArray,
|
1659 |
+
# ndarray[Any, Any]]"
|
1660 |
+
combined = concat_compat([lvals, rvals]) # type: ignore[list-item]
|
1661 |
+
unique_vals = unique(combined)
|
1662 |
+
unique_vals = ensure_wrapped_if_datetimelike(unique_vals)
|
1663 |
+
repeats = final_count.reindex(unique_vals).values
|
1664 |
+
return np.repeat(unique_vals, repeats)
|
1665 |
+
|
1666 |
+
|
1667 |
+
def map_array(
|
1668 |
+
arr: ArrayLike,
|
1669 |
+
mapper,
|
1670 |
+
na_action: Literal["ignore"] | None = None,
|
1671 |
+
convert: bool = True,
|
1672 |
+
) -> np.ndarray | ExtensionArray | Index:
|
1673 |
+
"""
|
1674 |
+
Map values using an input mapping or function.
|
1675 |
+
|
1676 |
+
Parameters
|
1677 |
+
----------
|
1678 |
+
mapper : function, dict, or Series
|
1679 |
+
Mapping correspondence.
|
1680 |
+
na_action : {None, 'ignore'}, default None
|
1681 |
+
If 'ignore', propagate NA values, without passing them to the
|
1682 |
+
mapping correspondence.
|
1683 |
+
convert : bool, default True
|
1684 |
+
Try to find better dtype for elementwise function results. If
|
1685 |
+
False, leave as dtype=object.
|
1686 |
+
|
1687 |
+
Returns
|
1688 |
+
-------
|
1689 |
+
Union[ndarray, Index, ExtensionArray]
|
1690 |
+
The output of the mapping function applied to the array.
|
1691 |
+
If the function returns a tuple with more than one element
|
1692 |
+
a MultiIndex will be returned.
|
1693 |
+
"""
|
1694 |
+
if na_action not in (None, "ignore"):
|
1695 |
+
msg = f"na_action must either be 'ignore' or None, {na_action} was passed"
|
1696 |
+
raise ValueError(msg)
|
1697 |
+
|
1698 |
+
# we can fastpath dict/Series to an efficient map
|
1699 |
+
# as we know that we are not going to have to yield
|
1700 |
+
# python types
|
1701 |
+
if is_dict_like(mapper):
|
1702 |
+
if isinstance(mapper, dict) and hasattr(mapper, "__missing__"):
|
1703 |
+
# If a dictionary subclass defines a default value method,
|
1704 |
+
# convert mapper to a lookup function (GH #15999).
|
1705 |
+
dict_with_default = mapper
|
1706 |
+
mapper = lambda x: dict_with_default[
|
1707 |
+
np.nan if isinstance(x, float) and np.isnan(x) else x
|
1708 |
+
]
|
1709 |
+
else:
|
1710 |
+
# Dictionary does not have a default. Thus it's safe to
|
1711 |
+
# convert to an Series for efficiency.
|
1712 |
+
# we specify the keys here to handle the
|
1713 |
+
# possibility that they are tuples
|
1714 |
+
|
1715 |
+
# The return value of mapping with an empty mapper is
|
1716 |
+
# expected to be pd.Series(np.nan, ...). As np.nan is
|
1717 |
+
# of dtype float64 the return value of this method should
|
1718 |
+
# be float64 as well
|
1719 |
+
from pandas import Series
|
1720 |
+
|
1721 |
+
if len(mapper) == 0:
|
1722 |
+
mapper = Series(mapper, dtype=np.float64)
|
1723 |
+
else:
|
1724 |
+
mapper = Series(mapper)
|
1725 |
+
|
1726 |
+
if isinstance(mapper, ABCSeries):
|
1727 |
+
if na_action == "ignore":
|
1728 |
+
mapper = mapper[mapper.index.notna()]
|
1729 |
+
|
1730 |
+
# Since values were input this means we came from either
|
1731 |
+
# a dict or a series and mapper should be an index
|
1732 |
+
indexer = mapper.index.get_indexer(arr)
|
1733 |
+
new_values = take_nd(mapper._values, indexer)
|
1734 |
+
|
1735 |
+
return new_values
|
1736 |
+
|
1737 |
+
if not len(arr):
|
1738 |
+
return arr.copy()
|
1739 |
+
|
1740 |
+
# we must convert to python types
|
1741 |
+
values = arr.astype(object, copy=False)
|
1742 |
+
if na_action is None:
|
1743 |
+
return lib.map_infer(values, mapper, convert=convert)
|
1744 |
+
else:
|
1745 |
+
return lib.map_infer_mask(
|
1746 |
+
values, mapper, mask=isna(values).view(np.uint8), convert=convert
|
1747 |
+
)
|
llmeval-env/lib/python3.10/site-packages/pandas/core/api.py
ADDED
@@ -0,0 +1,140 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from pandas._libs import (
|
2 |
+
NaT,
|
3 |
+
Period,
|
4 |
+
Timedelta,
|
5 |
+
Timestamp,
|
6 |
+
)
|
7 |
+
from pandas._libs.missing import NA
|
8 |
+
|
9 |
+
from pandas.core.dtypes.dtypes import (
|
10 |
+
ArrowDtype,
|
11 |
+
CategoricalDtype,
|
12 |
+
DatetimeTZDtype,
|
13 |
+
IntervalDtype,
|
14 |
+
PeriodDtype,
|
15 |
+
)
|
16 |
+
from pandas.core.dtypes.missing import (
|
17 |
+
isna,
|
18 |
+
isnull,
|
19 |
+
notna,
|
20 |
+
notnull,
|
21 |
+
)
|
22 |
+
|
23 |
+
from pandas.core.algorithms import (
|
24 |
+
factorize,
|
25 |
+
unique,
|
26 |
+
value_counts,
|
27 |
+
)
|
28 |
+
from pandas.core.arrays import Categorical
|
29 |
+
from pandas.core.arrays.boolean import BooleanDtype
|
30 |
+
from pandas.core.arrays.floating import (
|
31 |
+
Float32Dtype,
|
32 |
+
Float64Dtype,
|
33 |
+
)
|
34 |
+
from pandas.core.arrays.integer import (
|
35 |
+
Int8Dtype,
|
36 |
+
Int16Dtype,
|
37 |
+
Int32Dtype,
|
38 |
+
Int64Dtype,
|
39 |
+
UInt8Dtype,
|
40 |
+
UInt16Dtype,
|
41 |
+
UInt32Dtype,
|
42 |
+
UInt64Dtype,
|
43 |
+
)
|
44 |
+
from pandas.core.arrays.string_ import StringDtype
|
45 |
+
from pandas.core.construction import array
|
46 |
+
from pandas.core.flags import Flags
|
47 |
+
from pandas.core.groupby import (
|
48 |
+
Grouper,
|
49 |
+
NamedAgg,
|
50 |
+
)
|
51 |
+
from pandas.core.indexes.api import (
|
52 |
+
CategoricalIndex,
|
53 |
+
DatetimeIndex,
|
54 |
+
Index,
|
55 |
+
IntervalIndex,
|
56 |
+
MultiIndex,
|
57 |
+
PeriodIndex,
|
58 |
+
RangeIndex,
|
59 |
+
TimedeltaIndex,
|
60 |
+
)
|
61 |
+
from pandas.core.indexes.datetimes import (
|
62 |
+
bdate_range,
|
63 |
+
date_range,
|
64 |
+
)
|
65 |
+
from pandas.core.indexes.interval import (
|
66 |
+
Interval,
|
67 |
+
interval_range,
|
68 |
+
)
|
69 |
+
from pandas.core.indexes.period import period_range
|
70 |
+
from pandas.core.indexes.timedeltas import timedelta_range
|
71 |
+
from pandas.core.indexing import IndexSlice
|
72 |
+
from pandas.core.series import Series
|
73 |
+
from pandas.core.tools.datetimes import to_datetime
|
74 |
+
from pandas.core.tools.numeric import to_numeric
|
75 |
+
from pandas.core.tools.timedeltas import to_timedelta
|
76 |
+
|
77 |
+
from pandas.io.formats.format import set_eng_float_format
|
78 |
+
from pandas.tseries.offsets import DateOffset
|
79 |
+
|
80 |
+
# DataFrame needs to be imported after NamedAgg to avoid a circular import
|
81 |
+
from pandas.core.frame import DataFrame # isort:skip
|
82 |
+
|
83 |
+
__all__ = [
|
84 |
+
"array",
|
85 |
+
"ArrowDtype",
|
86 |
+
"bdate_range",
|
87 |
+
"BooleanDtype",
|
88 |
+
"Categorical",
|
89 |
+
"CategoricalDtype",
|
90 |
+
"CategoricalIndex",
|
91 |
+
"DataFrame",
|
92 |
+
"DateOffset",
|
93 |
+
"date_range",
|
94 |
+
"DatetimeIndex",
|
95 |
+
"DatetimeTZDtype",
|
96 |
+
"factorize",
|
97 |
+
"Flags",
|
98 |
+
"Float32Dtype",
|
99 |
+
"Float64Dtype",
|
100 |
+
"Grouper",
|
101 |
+
"Index",
|
102 |
+
"IndexSlice",
|
103 |
+
"Int16Dtype",
|
104 |
+
"Int32Dtype",
|
105 |
+
"Int64Dtype",
|
106 |
+
"Int8Dtype",
|
107 |
+
"Interval",
|
108 |
+
"IntervalDtype",
|
109 |
+
"IntervalIndex",
|
110 |
+
"interval_range",
|
111 |
+
"isna",
|
112 |
+
"isnull",
|
113 |
+
"MultiIndex",
|
114 |
+
"NA",
|
115 |
+
"NamedAgg",
|
116 |
+
"NaT",
|
117 |
+
"notna",
|
118 |
+
"notnull",
|
119 |
+
"Period",
|
120 |
+
"PeriodDtype",
|
121 |
+
"PeriodIndex",
|
122 |
+
"period_range",
|
123 |
+
"RangeIndex",
|
124 |
+
"Series",
|
125 |
+
"set_eng_float_format",
|
126 |
+
"StringDtype",
|
127 |
+
"Timedelta",
|
128 |
+
"TimedeltaIndex",
|
129 |
+
"timedelta_range",
|
130 |
+
"Timestamp",
|
131 |
+
"to_datetime",
|
132 |
+
"to_numeric",
|
133 |
+
"to_timedelta",
|
134 |
+
"UInt16Dtype",
|
135 |
+
"UInt32Dtype",
|
136 |
+
"UInt64Dtype",
|
137 |
+
"UInt8Dtype",
|
138 |
+
"unique",
|
139 |
+
"value_counts",
|
140 |
+
]
|
llmeval-env/lib/python3.10/site-packages/pandas/core/apply.py
ADDED
@@ -0,0 +1,2062 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from __future__ import annotations
|
2 |
+
|
3 |
+
import abc
|
4 |
+
from collections import defaultdict
|
5 |
+
import functools
|
6 |
+
from functools import partial
|
7 |
+
import inspect
|
8 |
+
from typing import (
|
9 |
+
TYPE_CHECKING,
|
10 |
+
Any,
|
11 |
+
Callable,
|
12 |
+
Literal,
|
13 |
+
cast,
|
14 |
+
)
|
15 |
+
import warnings
|
16 |
+
|
17 |
+
import numpy as np
|
18 |
+
|
19 |
+
from pandas._config import option_context
|
20 |
+
|
21 |
+
from pandas._libs import lib
|
22 |
+
from pandas._libs.internals import BlockValuesRefs
|
23 |
+
from pandas._typing import (
|
24 |
+
AggFuncType,
|
25 |
+
AggFuncTypeBase,
|
26 |
+
AggFuncTypeDict,
|
27 |
+
AggObjType,
|
28 |
+
Axis,
|
29 |
+
AxisInt,
|
30 |
+
NDFrameT,
|
31 |
+
npt,
|
32 |
+
)
|
33 |
+
from pandas.compat._optional import import_optional_dependency
|
34 |
+
from pandas.errors import SpecificationError
|
35 |
+
from pandas.util._decorators import cache_readonly
|
36 |
+
from pandas.util._exceptions import find_stack_level
|
37 |
+
|
38 |
+
from pandas.core.dtypes.cast import is_nested_object
|
39 |
+
from pandas.core.dtypes.common import (
|
40 |
+
is_dict_like,
|
41 |
+
is_extension_array_dtype,
|
42 |
+
is_list_like,
|
43 |
+
is_numeric_dtype,
|
44 |
+
is_sequence,
|
45 |
+
)
|
46 |
+
from pandas.core.dtypes.dtypes import (
|
47 |
+
CategoricalDtype,
|
48 |
+
ExtensionDtype,
|
49 |
+
)
|
50 |
+
from pandas.core.dtypes.generic import (
|
51 |
+
ABCDataFrame,
|
52 |
+
ABCNDFrame,
|
53 |
+
ABCSeries,
|
54 |
+
)
|
55 |
+
|
56 |
+
from pandas.core._numba.executor import generate_apply_looper
|
57 |
+
import pandas.core.common as com
|
58 |
+
from pandas.core.construction import ensure_wrapped_if_datetimelike
|
59 |
+
|
60 |
+
if TYPE_CHECKING:
|
61 |
+
from collections.abc import (
|
62 |
+
Generator,
|
63 |
+
Hashable,
|
64 |
+
Iterable,
|
65 |
+
MutableMapping,
|
66 |
+
Sequence,
|
67 |
+
)
|
68 |
+
|
69 |
+
from pandas import (
|
70 |
+
DataFrame,
|
71 |
+
Index,
|
72 |
+
Series,
|
73 |
+
)
|
74 |
+
from pandas.core.groupby import GroupBy
|
75 |
+
from pandas.core.resample import Resampler
|
76 |
+
from pandas.core.window.rolling import BaseWindow
|
77 |
+
|
78 |
+
|
79 |
+
ResType = dict[int, Any]
|
80 |
+
|
81 |
+
|
82 |
+
def frame_apply(
|
83 |
+
obj: DataFrame,
|
84 |
+
func: AggFuncType,
|
85 |
+
axis: Axis = 0,
|
86 |
+
raw: bool = False,
|
87 |
+
result_type: str | None = None,
|
88 |
+
by_row: Literal[False, "compat"] = "compat",
|
89 |
+
engine: str = "python",
|
90 |
+
engine_kwargs: dict[str, bool] | None = None,
|
91 |
+
args=None,
|
92 |
+
kwargs=None,
|
93 |
+
) -> FrameApply:
|
94 |
+
"""construct and return a row or column based frame apply object"""
|
95 |
+
axis = obj._get_axis_number(axis)
|
96 |
+
klass: type[FrameApply]
|
97 |
+
if axis == 0:
|
98 |
+
klass = FrameRowApply
|
99 |
+
elif axis == 1:
|
100 |
+
klass = FrameColumnApply
|
101 |
+
|
102 |
+
_, func, _, _ = reconstruct_func(func, **kwargs)
|
103 |
+
assert func is not None
|
104 |
+
|
105 |
+
return klass(
|
106 |
+
obj,
|
107 |
+
func,
|
108 |
+
raw=raw,
|
109 |
+
result_type=result_type,
|
110 |
+
by_row=by_row,
|
111 |
+
engine=engine,
|
112 |
+
engine_kwargs=engine_kwargs,
|
113 |
+
args=args,
|
114 |
+
kwargs=kwargs,
|
115 |
+
)
|
116 |
+
|
117 |
+
|
118 |
+
class Apply(metaclass=abc.ABCMeta):
|
119 |
+
axis: AxisInt
|
120 |
+
|
121 |
+
def __init__(
|
122 |
+
self,
|
123 |
+
obj: AggObjType,
|
124 |
+
func: AggFuncType,
|
125 |
+
raw: bool,
|
126 |
+
result_type: str | None,
|
127 |
+
*,
|
128 |
+
by_row: Literal[False, "compat", "_compat"] = "compat",
|
129 |
+
engine: str = "python",
|
130 |
+
engine_kwargs: dict[str, bool] | None = None,
|
131 |
+
args,
|
132 |
+
kwargs,
|
133 |
+
) -> None:
|
134 |
+
self.obj = obj
|
135 |
+
self.raw = raw
|
136 |
+
|
137 |
+
assert by_row is False or by_row in ["compat", "_compat"]
|
138 |
+
self.by_row = by_row
|
139 |
+
|
140 |
+
self.args = args or ()
|
141 |
+
self.kwargs = kwargs or {}
|
142 |
+
|
143 |
+
self.engine = engine
|
144 |
+
self.engine_kwargs = {} if engine_kwargs is None else engine_kwargs
|
145 |
+
|
146 |
+
if result_type not in [None, "reduce", "broadcast", "expand"]:
|
147 |
+
raise ValueError(
|
148 |
+
"invalid value for result_type, must be one "
|
149 |
+
"of {None, 'reduce', 'broadcast', 'expand'}"
|
150 |
+
)
|
151 |
+
|
152 |
+
self.result_type = result_type
|
153 |
+
|
154 |
+
self.func = func
|
155 |
+
|
156 |
+
@abc.abstractmethod
|
157 |
+
def apply(self) -> DataFrame | Series:
|
158 |
+
pass
|
159 |
+
|
160 |
+
@abc.abstractmethod
|
161 |
+
def agg_or_apply_list_like(
|
162 |
+
self, op_name: Literal["agg", "apply"]
|
163 |
+
) -> DataFrame | Series:
|
164 |
+
pass
|
165 |
+
|
166 |
+
@abc.abstractmethod
|
167 |
+
def agg_or_apply_dict_like(
|
168 |
+
self, op_name: Literal["agg", "apply"]
|
169 |
+
) -> DataFrame | Series:
|
170 |
+
pass
|
171 |
+
|
172 |
+
def agg(self) -> DataFrame | Series | None:
|
173 |
+
"""
|
174 |
+
Provide an implementation for the aggregators.
|
175 |
+
|
176 |
+
Returns
|
177 |
+
-------
|
178 |
+
Result of aggregation, or None if agg cannot be performed by
|
179 |
+
this method.
|
180 |
+
"""
|
181 |
+
obj = self.obj
|
182 |
+
func = self.func
|
183 |
+
args = self.args
|
184 |
+
kwargs = self.kwargs
|
185 |
+
|
186 |
+
if isinstance(func, str):
|
187 |
+
return self.apply_str()
|
188 |
+
|
189 |
+
if is_dict_like(func):
|
190 |
+
return self.agg_dict_like()
|
191 |
+
elif is_list_like(func):
|
192 |
+
# we require a list, but not a 'str'
|
193 |
+
return self.agg_list_like()
|
194 |
+
|
195 |
+
if callable(func):
|
196 |
+
f = com.get_cython_func(func)
|
197 |
+
if f and not args and not kwargs:
|
198 |
+
warn_alias_replacement(obj, func, f)
|
199 |
+
return getattr(obj, f)()
|
200 |
+
|
201 |
+
# caller can react
|
202 |
+
return None
|
203 |
+
|
204 |
+
def transform(self) -> DataFrame | Series:
|
205 |
+
"""
|
206 |
+
Transform a DataFrame or Series.
|
207 |
+
|
208 |
+
Returns
|
209 |
+
-------
|
210 |
+
DataFrame or Series
|
211 |
+
Result of applying ``func`` along the given axis of the
|
212 |
+
Series or DataFrame.
|
213 |
+
|
214 |
+
Raises
|
215 |
+
------
|
216 |
+
ValueError
|
217 |
+
If the transform function fails or does not transform.
|
218 |
+
"""
|
219 |
+
obj = self.obj
|
220 |
+
func = self.func
|
221 |
+
axis = self.axis
|
222 |
+
args = self.args
|
223 |
+
kwargs = self.kwargs
|
224 |
+
|
225 |
+
is_series = obj.ndim == 1
|
226 |
+
|
227 |
+
if obj._get_axis_number(axis) == 1:
|
228 |
+
assert not is_series
|
229 |
+
return obj.T.transform(func, 0, *args, **kwargs).T
|
230 |
+
|
231 |
+
if is_list_like(func) and not is_dict_like(func):
|
232 |
+
func = cast(list[AggFuncTypeBase], func)
|
233 |
+
# Convert func equivalent dict
|
234 |
+
if is_series:
|
235 |
+
func = {com.get_callable_name(v) or v: v for v in func}
|
236 |
+
else:
|
237 |
+
func = {col: func for col in obj}
|
238 |
+
|
239 |
+
if is_dict_like(func):
|
240 |
+
func = cast(AggFuncTypeDict, func)
|
241 |
+
return self.transform_dict_like(func)
|
242 |
+
|
243 |
+
# func is either str or callable
|
244 |
+
func = cast(AggFuncTypeBase, func)
|
245 |
+
try:
|
246 |
+
result = self.transform_str_or_callable(func)
|
247 |
+
except TypeError:
|
248 |
+
raise
|
249 |
+
except Exception as err:
|
250 |
+
raise ValueError("Transform function failed") from err
|
251 |
+
|
252 |
+
# Functions that transform may return empty Series/DataFrame
|
253 |
+
# when the dtype is not appropriate
|
254 |
+
if (
|
255 |
+
isinstance(result, (ABCSeries, ABCDataFrame))
|
256 |
+
and result.empty
|
257 |
+
and not obj.empty
|
258 |
+
):
|
259 |
+
raise ValueError("Transform function failed")
|
260 |
+
# error: Argument 1 to "__get__" of "AxisProperty" has incompatible type
|
261 |
+
# "Union[Series, DataFrame, GroupBy[Any], SeriesGroupBy,
|
262 |
+
# DataFrameGroupBy, BaseWindow, Resampler]"; expected "Union[DataFrame,
|
263 |
+
# Series]"
|
264 |
+
if not isinstance(result, (ABCSeries, ABCDataFrame)) or not result.index.equals(
|
265 |
+
obj.index # type: ignore[arg-type]
|
266 |
+
):
|
267 |
+
raise ValueError("Function did not transform")
|
268 |
+
|
269 |
+
return result
|
270 |
+
|
271 |
+
def transform_dict_like(self, func) -> DataFrame:
|
272 |
+
"""
|
273 |
+
Compute transform in the case of a dict-like func
|
274 |
+
"""
|
275 |
+
from pandas.core.reshape.concat import concat
|
276 |
+
|
277 |
+
obj = self.obj
|
278 |
+
args = self.args
|
279 |
+
kwargs = self.kwargs
|
280 |
+
|
281 |
+
# transform is currently only for Series/DataFrame
|
282 |
+
assert isinstance(obj, ABCNDFrame)
|
283 |
+
|
284 |
+
if len(func) == 0:
|
285 |
+
raise ValueError("No transform functions were provided")
|
286 |
+
|
287 |
+
func = self.normalize_dictlike_arg("transform", obj, func)
|
288 |
+
|
289 |
+
results: dict[Hashable, DataFrame | Series] = {}
|
290 |
+
for name, how in func.items():
|
291 |
+
colg = obj._gotitem(name, ndim=1)
|
292 |
+
results[name] = colg.transform(how, 0, *args, **kwargs)
|
293 |
+
return concat(results, axis=1)
|
294 |
+
|
295 |
+
def transform_str_or_callable(self, func) -> DataFrame | Series:
|
296 |
+
"""
|
297 |
+
Compute transform in the case of a string or callable func
|
298 |
+
"""
|
299 |
+
obj = self.obj
|
300 |
+
args = self.args
|
301 |
+
kwargs = self.kwargs
|
302 |
+
|
303 |
+
if isinstance(func, str):
|
304 |
+
return self._apply_str(obj, func, *args, **kwargs)
|
305 |
+
|
306 |
+
if not args and not kwargs:
|
307 |
+
f = com.get_cython_func(func)
|
308 |
+
if f:
|
309 |
+
warn_alias_replacement(obj, func, f)
|
310 |
+
return getattr(obj, f)()
|
311 |
+
|
312 |
+
# Two possible ways to use a UDF - apply or call directly
|
313 |
+
try:
|
314 |
+
return obj.apply(func, args=args, **kwargs)
|
315 |
+
except Exception:
|
316 |
+
return func(obj, *args, **kwargs)
|
317 |
+
|
318 |
+
def agg_list_like(self) -> DataFrame | Series:
|
319 |
+
"""
|
320 |
+
Compute aggregation in the case of a list-like argument.
|
321 |
+
|
322 |
+
Returns
|
323 |
+
-------
|
324 |
+
Result of aggregation.
|
325 |
+
"""
|
326 |
+
return self.agg_or_apply_list_like(op_name="agg")
|
327 |
+
|
328 |
+
def compute_list_like(
|
329 |
+
self,
|
330 |
+
op_name: Literal["agg", "apply"],
|
331 |
+
selected_obj: Series | DataFrame,
|
332 |
+
kwargs: dict[str, Any],
|
333 |
+
) -> tuple[list[Hashable] | Index, list[Any]]:
|
334 |
+
"""
|
335 |
+
Compute agg/apply results for like-like input.
|
336 |
+
|
337 |
+
Parameters
|
338 |
+
----------
|
339 |
+
op_name : {"agg", "apply"}
|
340 |
+
Operation being performed.
|
341 |
+
selected_obj : Series or DataFrame
|
342 |
+
Data to perform operation on.
|
343 |
+
kwargs : dict
|
344 |
+
Keyword arguments to pass to the functions.
|
345 |
+
|
346 |
+
Returns
|
347 |
+
-------
|
348 |
+
keys : list[Hashable] or Index
|
349 |
+
Index labels for result.
|
350 |
+
results : list
|
351 |
+
Data for result. When aggregating with a Series, this can contain any
|
352 |
+
Python objects.
|
353 |
+
"""
|
354 |
+
func = cast(list[AggFuncTypeBase], self.func)
|
355 |
+
obj = self.obj
|
356 |
+
|
357 |
+
results = []
|
358 |
+
keys = []
|
359 |
+
|
360 |
+
# degenerate case
|
361 |
+
if selected_obj.ndim == 1:
|
362 |
+
for a in func:
|
363 |
+
colg = obj._gotitem(selected_obj.name, ndim=1, subset=selected_obj)
|
364 |
+
args = (
|
365 |
+
[self.axis, *self.args]
|
366 |
+
if include_axis(op_name, colg)
|
367 |
+
else self.args
|
368 |
+
)
|
369 |
+
new_res = getattr(colg, op_name)(a, *args, **kwargs)
|
370 |
+
results.append(new_res)
|
371 |
+
|
372 |
+
# make sure we find a good name
|
373 |
+
name = com.get_callable_name(a) or a
|
374 |
+
keys.append(name)
|
375 |
+
|
376 |
+
else:
|
377 |
+
indices = []
|
378 |
+
for index, col in enumerate(selected_obj):
|
379 |
+
colg = obj._gotitem(col, ndim=1, subset=selected_obj.iloc[:, index])
|
380 |
+
args = (
|
381 |
+
[self.axis, *self.args]
|
382 |
+
if include_axis(op_name, colg)
|
383 |
+
else self.args
|
384 |
+
)
|
385 |
+
new_res = getattr(colg, op_name)(func, *args, **kwargs)
|
386 |
+
results.append(new_res)
|
387 |
+
indices.append(index)
|
388 |
+
# error: Incompatible types in assignment (expression has type "Any |
|
389 |
+
# Index", variable has type "list[Any | Callable[..., Any] | str]")
|
390 |
+
keys = selected_obj.columns.take(indices) # type: ignore[assignment]
|
391 |
+
|
392 |
+
return keys, results
|
393 |
+
|
394 |
+
def wrap_results_list_like(
|
395 |
+
self, keys: Iterable[Hashable], results: list[Series | DataFrame]
|
396 |
+
):
|
397 |
+
from pandas.core.reshape.concat import concat
|
398 |
+
|
399 |
+
obj = self.obj
|
400 |
+
|
401 |
+
try:
|
402 |
+
return concat(results, keys=keys, axis=1, sort=False)
|
403 |
+
except TypeError as err:
|
404 |
+
# we are concatting non-NDFrame objects,
|
405 |
+
# e.g. a list of scalars
|
406 |
+
from pandas import Series
|
407 |
+
|
408 |
+
result = Series(results, index=keys, name=obj.name)
|
409 |
+
if is_nested_object(result):
|
410 |
+
raise ValueError(
|
411 |
+
"cannot combine transform and aggregation operations"
|
412 |
+
) from err
|
413 |
+
return result
|
414 |
+
|
415 |
+
def agg_dict_like(self) -> DataFrame | Series:
|
416 |
+
"""
|
417 |
+
Compute aggregation in the case of a dict-like argument.
|
418 |
+
|
419 |
+
Returns
|
420 |
+
-------
|
421 |
+
Result of aggregation.
|
422 |
+
"""
|
423 |
+
return self.agg_or_apply_dict_like(op_name="agg")
|
424 |
+
|
425 |
+
def compute_dict_like(
|
426 |
+
self,
|
427 |
+
op_name: Literal["agg", "apply"],
|
428 |
+
selected_obj: Series | DataFrame,
|
429 |
+
selection: Hashable | Sequence[Hashable],
|
430 |
+
kwargs: dict[str, Any],
|
431 |
+
) -> tuple[list[Hashable], list[Any]]:
|
432 |
+
"""
|
433 |
+
Compute agg/apply results for dict-like input.
|
434 |
+
|
435 |
+
Parameters
|
436 |
+
----------
|
437 |
+
op_name : {"agg", "apply"}
|
438 |
+
Operation being performed.
|
439 |
+
selected_obj : Series or DataFrame
|
440 |
+
Data to perform operation on.
|
441 |
+
selection : hashable or sequence of hashables
|
442 |
+
Used by GroupBy, Window, and Resample if selection is applied to the object.
|
443 |
+
kwargs : dict
|
444 |
+
Keyword arguments to pass to the functions.
|
445 |
+
|
446 |
+
Returns
|
447 |
+
-------
|
448 |
+
keys : list[hashable]
|
449 |
+
Index labels for result.
|
450 |
+
results : list
|
451 |
+
Data for result. When aggregating with a Series, this can contain any
|
452 |
+
Python object.
|
453 |
+
"""
|
454 |
+
from pandas.core.groupby.generic import (
|
455 |
+
DataFrameGroupBy,
|
456 |
+
SeriesGroupBy,
|
457 |
+
)
|
458 |
+
|
459 |
+
obj = self.obj
|
460 |
+
is_groupby = isinstance(obj, (DataFrameGroupBy, SeriesGroupBy))
|
461 |
+
func = cast(AggFuncTypeDict, self.func)
|
462 |
+
func = self.normalize_dictlike_arg(op_name, selected_obj, func)
|
463 |
+
|
464 |
+
is_non_unique_col = (
|
465 |
+
selected_obj.ndim == 2
|
466 |
+
and selected_obj.columns.nunique() < len(selected_obj.columns)
|
467 |
+
)
|
468 |
+
|
469 |
+
if selected_obj.ndim == 1:
|
470 |
+
# key only used for output
|
471 |
+
colg = obj._gotitem(selection, ndim=1)
|
472 |
+
results = [getattr(colg, op_name)(how, **kwargs) for _, how in func.items()]
|
473 |
+
keys = list(func.keys())
|
474 |
+
elif not is_groupby and is_non_unique_col:
|
475 |
+
# key used for column selection and output
|
476 |
+
# GH#51099
|
477 |
+
results = []
|
478 |
+
keys = []
|
479 |
+
for key, how in func.items():
|
480 |
+
indices = selected_obj.columns.get_indexer_for([key])
|
481 |
+
labels = selected_obj.columns.take(indices)
|
482 |
+
label_to_indices = defaultdict(list)
|
483 |
+
for index, label in zip(indices, labels):
|
484 |
+
label_to_indices[label].append(index)
|
485 |
+
|
486 |
+
key_data = [
|
487 |
+
getattr(selected_obj._ixs(indice, axis=1), op_name)(how, **kwargs)
|
488 |
+
for label, indices in label_to_indices.items()
|
489 |
+
for indice in indices
|
490 |
+
]
|
491 |
+
|
492 |
+
keys += [key] * len(key_data)
|
493 |
+
results += key_data
|
494 |
+
else:
|
495 |
+
# key used for column selection and output
|
496 |
+
results = [
|
497 |
+
getattr(obj._gotitem(key, ndim=1), op_name)(how, **kwargs)
|
498 |
+
for key, how in func.items()
|
499 |
+
]
|
500 |
+
keys = list(func.keys())
|
501 |
+
|
502 |
+
return keys, results
|
503 |
+
|
504 |
+
def wrap_results_dict_like(
|
505 |
+
self,
|
506 |
+
selected_obj: Series | DataFrame,
|
507 |
+
result_index: list[Hashable],
|
508 |
+
result_data: list,
|
509 |
+
):
|
510 |
+
from pandas import Index
|
511 |
+
from pandas.core.reshape.concat import concat
|
512 |
+
|
513 |
+
obj = self.obj
|
514 |
+
|
515 |
+
# Avoid making two isinstance calls in all and any below
|
516 |
+
is_ndframe = [isinstance(r, ABCNDFrame) for r in result_data]
|
517 |
+
|
518 |
+
if all(is_ndframe):
|
519 |
+
results = dict(zip(result_index, result_data))
|
520 |
+
keys_to_use: Iterable[Hashable]
|
521 |
+
keys_to_use = [k for k in result_index if not results[k].empty]
|
522 |
+
# Have to check, if at least one DataFrame is not empty.
|
523 |
+
keys_to_use = keys_to_use if keys_to_use != [] else result_index
|
524 |
+
if selected_obj.ndim == 2:
|
525 |
+
# keys are columns, so we can preserve names
|
526 |
+
ktu = Index(keys_to_use)
|
527 |
+
ktu._set_names(selected_obj.columns.names)
|
528 |
+
keys_to_use = ktu
|
529 |
+
|
530 |
+
axis: AxisInt = 0 if isinstance(obj, ABCSeries) else 1
|
531 |
+
result = concat(
|
532 |
+
{k: results[k] for k in keys_to_use},
|
533 |
+
axis=axis,
|
534 |
+
keys=keys_to_use,
|
535 |
+
)
|
536 |
+
elif any(is_ndframe):
|
537 |
+
# There is a mix of NDFrames and scalars
|
538 |
+
raise ValueError(
|
539 |
+
"cannot perform both aggregation "
|
540 |
+
"and transformation operations "
|
541 |
+
"simultaneously"
|
542 |
+
)
|
543 |
+
else:
|
544 |
+
from pandas import Series
|
545 |
+
|
546 |
+
# we have a list of scalars
|
547 |
+
# GH 36212 use name only if obj is a series
|
548 |
+
if obj.ndim == 1:
|
549 |
+
obj = cast("Series", obj)
|
550 |
+
name = obj.name
|
551 |
+
else:
|
552 |
+
name = None
|
553 |
+
|
554 |
+
result = Series(result_data, index=result_index, name=name)
|
555 |
+
|
556 |
+
return result
|
557 |
+
|
558 |
+
def apply_str(self) -> DataFrame | Series:
|
559 |
+
"""
|
560 |
+
Compute apply in case of a string.
|
561 |
+
|
562 |
+
Returns
|
563 |
+
-------
|
564 |
+
result: Series or DataFrame
|
565 |
+
"""
|
566 |
+
# Caller is responsible for checking isinstance(self.f, str)
|
567 |
+
func = cast(str, self.func)
|
568 |
+
|
569 |
+
obj = self.obj
|
570 |
+
|
571 |
+
from pandas.core.groupby.generic import (
|
572 |
+
DataFrameGroupBy,
|
573 |
+
SeriesGroupBy,
|
574 |
+
)
|
575 |
+
|
576 |
+
# Support for `frame.transform('method')`
|
577 |
+
# Some methods (shift, etc.) require the axis argument, others
|
578 |
+
# don't, so inspect and insert if necessary.
|
579 |
+
method = getattr(obj, func, None)
|
580 |
+
if callable(method):
|
581 |
+
sig = inspect.getfullargspec(method)
|
582 |
+
arg_names = (*sig.args, *sig.kwonlyargs)
|
583 |
+
if self.axis != 0 and (
|
584 |
+
"axis" not in arg_names or func in ("corrwith", "skew")
|
585 |
+
):
|
586 |
+
raise ValueError(f"Operation {func} does not support axis=1")
|
587 |
+
if "axis" in arg_names:
|
588 |
+
if isinstance(obj, (SeriesGroupBy, DataFrameGroupBy)):
|
589 |
+
# Try to avoid FutureWarning for deprecated axis keyword;
|
590 |
+
# If self.axis matches the axis we would get by not passing
|
591 |
+
# axis, we safely exclude the keyword.
|
592 |
+
|
593 |
+
default_axis = 0
|
594 |
+
if func in ["idxmax", "idxmin"]:
|
595 |
+
# DataFrameGroupBy.idxmax, idxmin axis defaults to self.axis,
|
596 |
+
# whereas other axis keywords default to 0
|
597 |
+
default_axis = self.obj.axis
|
598 |
+
|
599 |
+
if default_axis != self.axis:
|
600 |
+
self.kwargs["axis"] = self.axis
|
601 |
+
else:
|
602 |
+
self.kwargs["axis"] = self.axis
|
603 |
+
return self._apply_str(obj, func, *self.args, **self.kwargs)
|
604 |
+
|
605 |
+
def apply_list_or_dict_like(self) -> DataFrame | Series:
|
606 |
+
"""
|
607 |
+
Compute apply in case of a list-like or dict-like.
|
608 |
+
|
609 |
+
Returns
|
610 |
+
-------
|
611 |
+
result: Series, DataFrame, or None
|
612 |
+
Result when self.func is a list-like or dict-like, None otherwise.
|
613 |
+
"""
|
614 |
+
|
615 |
+
if self.engine == "numba":
|
616 |
+
raise NotImplementedError(
|
617 |
+
"The 'numba' engine doesn't support list-like/"
|
618 |
+
"dict likes of callables yet."
|
619 |
+
)
|
620 |
+
|
621 |
+
if self.axis == 1 and isinstance(self.obj, ABCDataFrame):
|
622 |
+
return self.obj.T.apply(self.func, 0, args=self.args, **self.kwargs).T
|
623 |
+
|
624 |
+
func = self.func
|
625 |
+
kwargs = self.kwargs
|
626 |
+
|
627 |
+
if is_dict_like(func):
|
628 |
+
result = self.agg_or_apply_dict_like(op_name="apply")
|
629 |
+
else:
|
630 |
+
result = self.agg_or_apply_list_like(op_name="apply")
|
631 |
+
|
632 |
+
result = reconstruct_and_relabel_result(result, func, **kwargs)
|
633 |
+
|
634 |
+
return result
|
635 |
+
|
636 |
+
def normalize_dictlike_arg(
|
637 |
+
self, how: str, obj: DataFrame | Series, func: AggFuncTypeDict
|
638 |
+
) -> AggFuncTypeDict:
|
639 |
+
"""
|
640 |
+
Handler for dict-like argument.
|
641 |
+
|
642 |
+
Ensures that necessary columns exist if obj is a DataFrame, and
|
643 |
+
that a nested renamer is not passed. Also normalizes to all lists
|
644 |
+
when values consists of a mix of list and non-lists.
|
645 |
+
"""
|
646 |
+
assert how in ("apply", "agg", "transform")
|
647 |
+
|
648 |
+
# Can't use func.values(); wouldn't work for a Series
|
649 |
+
if (
|
650 |
+
how == "agg"
|
651 |
+
and isinstance(obj, ABCSeries)
|
652 |
+
and any(is_list_like(v) for _, v in func.items())
|
653 |
+
) or (any(is_dict_like(v) for _, v in func.items())):
|
654 |
+
# GH 15931 - deprecation of renaming keys
|
655 |
+
raise SpecificationError("nested renamer is not supported")
|
656 |
+
|
657 |
+
if obj.ndim != 1:
|
658 |
+
# Check for missing columns on a frame
|
659 |
+
from pandas import Index
|
660 |
+
|
661 |
+
cols = Index(list(func.keys())).difference(obj.columns, sort=True)
|
662 |
+
if len(cols) > 0:
|
663 |
+
raise KeyError(f"Column(s) {list(cols)} do not exist")
|
664 |
+
|
665 |
+
aggregator_types = (list, tuple, dict)
|
666 |
+
|
667 |
+
# if we have a dict of any non-scalars
|
668 |
+
# eg. {'A' : ['mean']}, normalize all to
|
669 |
+
# be list-likes
|
670 |
+
# Cannot use func.values() because arg may be a Series
|
671 |
+
if any(isinstance(x, aggregator_types) for _, x in func.items()):
|
672 |
+
new_func: AggFuncTypeDict = {}
|
673 |
+
for k, v in func.items():
|
674 |
+
if not isinstance(v, aggregator_types):
|
675 |
+
new_func[k] = [v]
|
676 |
+
else:
|
677 |
+
new_func[k] = v
|
678 |
+
func = new_func
|
679 |
+
return func
|
680 |
+
|
681 |
+
def _apply_str(self, obj, func: str, *args, **kwargs):
|
682 |
+
"""
|
683 |
+
if arg is a string, then try to operate on it:
|
684 |
+
- try to find a function (or attribute) on obj
|
685 |
+
- try to find a numpy function
|
686 |
+
- raise
|
687 |
+
"""
|
688 |
+
assert isinstance(func, str)
|
689 |
+
|
690 |
+
if hasattr(obj, func):
|
691 |
+
f = getattr(obj, func)
|
692 |
+
if callable(f):
|
693 |
+
return f(*args, **kwargs)
|
694 |
+
|
695 |
+
# people may aggregate on a non-callable attribute
|
696 |
+
# but don't let them think they can pass args to it
|
697 |
+
assert len(args) == 0
|
698 |
+
assert len([kwarg for kwarg in kwargs if kwarg not in ["axis"]]) == 0
|
699 |
+
return f
|
700 |
+
elif hasattr(np, func) and hasattr(obj, "__array__"):
|
701 |
+
# in particular exclude Window
|
702 |
+
f = getattr(np, func)
|
703 |
+
return f(obj, *args, **kwargs)
|
704 |
+
else:
|
705 |
+
msg = f"'{func}' is not a valid function for '{type(obj).__name__}' object"
|
706 |
+
raise AttributeError(msg)
|
707 |
+
|
708 |
+
|
709 |
+
class NDFrameApply(Apply):
|
710 |
+
"""
|
711 |
+
Methods shared by FrameApply and SeriesApply but
|
712 |
+
not GroupByApply or ResamplerWindowApply
|
713 |
+
"""
|
714 |
+
|
715 |
+
obj: DataFrame | Series
|
716 |
+
|
717 |
+
@property
|
718 |
+
def index(self) -> Index:
|
719 |
+
return self.obj.index
|
720 |
+
|
721 |
+
@property
|
722 |
+
def agg_axis(self) -> Index:
|
723 |
+
return self.obj._get_agg_axis(self.axis)
|
724 |
+
|
725 |
+
def agg_or_apply_list_like(
|
726 |
+
self, op_name: Literal["agg", "apply"]
|
727 |
+
) -> DataFrame | Series:
|
728 |
+
obj = self.obj
|
729 |
+
kwargs = self.kwargs
|
730 |
+
|
731 |
+
if op_name == "apply":
|
732 |
+
if isinstance(self, FrameApply):
|
733 |
+
by_row = self.by_row
|
734 |
+
|
735 |
+
elif isinstance(self, SeriesApply):
|
736 |
+
by_row = "_compat" if self.by_row else False
|
737 |
+
else:
|
738 |
+
by_row = False
|
739 |
+
kwargs = {**kwargs, "by_row": by_row}
|
740 |
+
|
741 |
+
if getattr(obj, "axis", 0) == 1:
|
742 |
+
raise NotImplementedError("axis other than 0 is not supported")
|
743 |
+
|
744 |
+
keys, results = self.compute_list_like(op_name, obj, kwargs)
|
745 |
+
result = self.wrap_results_list_like(keys, results)
|
746 |
+
return result
|
747 |
+
|
748 |
+
def agg_or_apply_dict_like(
|
749 |
+
self, op_name: Literal["agg", "apply"]
|
750 |
+
) -> DataFrame | Series:
|
751 |
+
assert op_name in ["agg", "apply"]
|
752 |
+
obj = self.obj
|
753 |
+
|
754 |
+
kwargs = {}
|
755 |
+
if op_name == "apply":
|
756 |
+
by_row = "_compat" if self.by_row else False
|
757 |
+
kwargs.update({"by_row": by_row})
|
758 |
+
|
759 |
+
if getattr(obj, "axis", 0) == 1:
|
760 |
+
raise NotImplementedError("axis other than 0 is not supported")
|
761 |
+
|
762 |
+
selection = None
|
763 |
+
result_index, result_data = self.compute_dict_like(
|
764 |
+
op_name, obj, selection, kwargs
|
765 |
+
)
|
766 |
+
result = self.wrap_results_dict_like(obj, result_index, result_data)
|
767 |
+
return result
|
768 |
+
|
769 |
+
|
770 |
+
class FrameApply(NDFrameApply):
|
771 |
+
obj: DataFrame
|
772 |
+
|
773 |
+
def __init__(
|
774 |
+
self,
|
775 |
+
obj: AggObjType,
|
776 |
+
func: AggFuncType,
|
777 |
+
raw: bool,
|
778 |
+
result_type: str | None,
|
779 |
+
*,
|
780 |
+
by_row: Literal[False, "compat"] = False,
|
781 |
+
engine: str = "python",
|
782 |
+
engine_kwargs: dict[str, bool] | None = None,
|
783 |
+
args,
|
784 |
+
kwargs,
|
785 |
+
) -> None:
|
786 |
+
if by_row is not False and by_row != "compat":
|
787 |
+
raise ValueError(f"by_row={by_row} not allowed")
|
788 |
+
super().__init__(
|
789 |
+
obj,
|
790 |
+
func,
|
791 |
+
raw,
|
792 |
+
result_type,
|
793 |
+
by_row=by_row,
|
794 |
+
engine=engine,
|
795 |
+
engine_kwargs=engine_kwargs,
|
796 |
+
args=args,
|
797 |
+
kwargs=kwargs,
|
798 |
+
)
|
799 |
+
|
800 |
+
# ---------------------------------------------------------------
|
801 |
+
# Abstract Methods
|
802 |
+
|
803 |
+
@property
|
804 |
+
@abc.abstractmethod
|
805 |
+
def result_index(self) -> Index:
|
806 |
+
pass
|
807 |
+
|
808 |
+
@property
|
809 |
+
@abc.abstractmethod
|
810 |
+
def result_columns(self) -> Index:
|
811 |
+
pass
|
812 |
+
|
813 |
+
@property
|
814 |
+
@abc.abstractmethod
|
815 |
+
def series_generator(self) -> Generator[Series, None, None]:
|
816 |
+
pass
|
817 |
+
|
818 |
+
@staticmethod
|
819 |
+
@functools.cache
|
820 |
+
@abc.abstractmethod
|
821 |
+
def generate_numba_apply_func(
|
822 |
+
func, nogil=True, nopython=True, parallel=False
|
823 |
+
) -> Callable[[npt.NDArray, Index, Index], dict[int, Any]]:
|
824 |
+
pass
|
825 |
+
|
826 |
+
@abc.abstractmethod
|
827 |
+
def apply_with_numba(self):
|
828 |
+
pass
|
829 |
+
|
830 |
+
def validate_values_for_numba(self):
|
831 |
+
# Validate column dtyps all OK
|
832 |
+
for colname, dtype in self.obj.dtypes.items():
|
833 |
+
if not is_numeric_dtype(dtype):
|
834 |
+
raise ValueError(
|
835 |
+
f"Column {colname} must have a numeric dtype. "
|
836 |
+
f"Found '{dtype}' instead"
|
837 |
+
)
|
838 |
+
if is_extension_array_dtype(dtype):
|
839 |
+
raise ValueError(
|
840 |
+
f"Column {colname} is backed by an extension array, "
|
841 |
+
f"which is not supported by the numba engine."
|
842 |
+
)
|
843 |
+
|
844 |
+
@abc.abstractmethod
|
845 |
+
def wrap_results_for_axis(
|
846 |
+
self, results: ResType, res_index: Index
|
847 |
+
) -> DataFrame | Series:
|
848 |
+
pass
|
849 |
+
|
850 |
+
# ---------------------------------------------------------------
|
851 |
+
|
852 |
+
@property
|
853 |
+
def res_columns(self) -> Index:
|
854 |
+
return self.result_columns
|
855 |
+
|
856 |
+
@property
|
857 |
+
def columns(self) -> Index:
|
858 |
+
return self.obj.columns
|
859 |
+
|
860 |
+
@cache_readonly
|
861 |
+
def values(self):
|
862 |
+
return self.obj.values
|
863 |
+
|
864 |
+
def apply(self) -> DataFrame | Series:
|
865 |
+
"""compute the results"""
|
866 |
+
|
867 |
+
# dispatch to handle list-like or dict-like
|
868 |
+
if is_list_like(self.func):
|
869 |
+
if self.engine == "numba":
|
870 |
+
raise NotImplementedError(
|
871 |
+
"the 'numba' engine doesn't support lists of callables yet"
|
872 |
+
)
|
873 |
+
return self.apply_list_or_dict_like()
|
874 |
+
|
875 |
+
# all empty
|
876 |
+
if len(self.columns) == 0 and len(self.index) == 0:
|
877 |
+
return self.apply_empty_result()
|
878 |
+
|
879 |
+
# string dispatch
|
880 |
+
if isinstance(self.func, str):
|
881 |
+
if self.engine == "numba":
|
882 |
+
raise NotImplementedError(
|
883 |
+
"the 'numba' engine doesn't support using "
|
884 |
+
"a string as the callable function"
|
885 |
+
)
|
886 |
+
return self.apply_str()
|
887 |
+
|
888 |
+
# ufunc
|
889 |
+
elif isinstance(self.func, np.ufunc):
|
890 |
+
if self.engine == "numba":
|
891 |
+
raise NotImplementedError(
|
892 |
+
"the 'numba' engine doesn't support "
|
893 |
+
"using a numpy ufunc as the callable function"
|
894 |
+
)
|
895 |
+
with np.errstate(all="ignore"):
|
896 |
+
results = self.obj._mgr.apply("apply", func=self.func)
|
897 |
+
# _constructor will retain self.index and self.columns
|
898 |
+
return self.obj._constructor_from_mgr(results, axes=results.axes)
|
899 |
+
|
900 |
+
# broadcasting
|
901 |
+
if self.result_type == "broadcast":
|
902 |
+
if self.engine == "numba":
|
903 |
+
raise NotImplementedError(
|
904 |
+
"the 'numba' engine doesn't support result_type='broadcast'"
|
905 |
+
)
|
906 |
+
return self.apply_broadcast(self.obj)
|
907 |
+
|
908 |
+
# one axis empty
|
909 |
+
elif not all(self.obj.shape):
|
910 |
+
return self.apply_empty_result()
|
911 |
+
|
912 |
+
# raw
|
913 |
+
elif self.raw:
|
914 |
+
return self.apply_raw(engine=self.engine, engine_kwargs=self.engine_kwargs)
|
915 |
+
|
916 |
+
return self.apply_standard()
|
917 |
+
|
918 |
+
def agg(self):
|
919 |
+
obj = self.obj
|
920 |
+
axis = self.axis
|
921 |
+
|
922 |
+
# TODO: Avoid having to change state
|
923 |
+
self.obj = self.obj if self.axis == 0 else self.obj.T
|
924 |
+
self.axis = 0
|
925 |
+
|
926 |
+
result = None
|
927 |
+
try:
|
928 |
+
result = super().agg()
|
929 |
+
finally:
|
930 |
+
self.obj = obj
|
931 |
+
self.axis = axis
|
932 |
+
|
933 |
+
if axis == 1:
|
934 |
+
result = result.T if result is not None else result
|
935 |
+
|
936 |
+
if result is None:
|
937 |
+
result = self.obj.apply(self.func, axis, args=self.args, **self.kwargs)
|
938 |
+
|
939 |
+
return result
|
940 |
+
|
941 |
+
def apply_empty_result(self):
|
942 |
+
"""
|
943 |
+
we have an empty result; at least 1 axis is 0
|
944 |
+
|
945 |
+
we will try to apply the function to an empty
|
946 |
+
series in order to see if this is a reduction function
|
947 |
+
"""
|
948 |
+
assert callable(self.func)
|
949 |
+
|
950 |
+
# we are not asked to reduce or infer reduction
|
951 |
+
# so just return a copy of the existing object
|
952 |
+
if self.result_type not in ["reduce", None]:
|
953 |
+
return self.obj.copy()
|
954 |
+
|
955 |
+
# we may need to infer
|
956 |
+
should_reduce = self.result_type == "reduce"
|
957 |
+
|
958 |
+
from pandas import Series
|
959 |
+
|
960 |
+
if not should_reduce:
|
961 |
+
try:
|
962 |
+
if self.axis == 0:
|
963 |
+
r = self.func(
|
964 |
+
Series([], dtype=np.float64), *self.args, **self.kwargs
|
965 |
+
)
|
966 |
+
else:
|
967 |
+
r = self.func(
|
968 |
+
Series(index=self.columns, dtype=np.float64),
|
969 |
+
*self.args,
|
970 |
+
**self.kwargs,
|
971 |
+
)
|
972 |
+
except Exception:
|
973 |
+
pass
|
974 |
+
else:
|
975 |
+
should_reduce = not isinstance(r, Series)
|
976 |
+
|
977 |
+
if should_reduce:
|
978 |
+
if len(self.agg_axis):
|
979 |
+
r = self.func(Series([], dtype=np.float64), *self.args, **self.kwargs)
|
980 |
+
else:
|
981 |
+
r = np.nan
|
982 |
+
|
983 |
+
return self.obj._constructor_sliced(r, index=self.agg_axis)
|
984 |
+
else:
|
985 |
+
return self.obj.copy()
|
986 |
+
|
987 |
+
def apply_raw(self, engine="python", engine_kwargs=None):
|
988 |
+
"""apply to the values as a numpy array"""
|
989 |
+
|
990 |
+
def wrap_function(func):
|
991 |
+
"""
|
992 |
+
Wrap user supplied function to work around numpy issue.
|
993 |
+
|
994 |
+
see https://github.com/numpy/numpy/issues/8352
|
995 |
+
"""
|
996 |
+
|
997 |
+
def wrapper(*args, **kwargs):
|
998 |
+
result = func(*args, **kwargs)
|
999 |
+
if isinstance(result, str):
|
1000 |
+
result = np.array(result, dtype=object)
|
1001 |
+
return result
|
1002 |
+
|
1003 |
+
return wrapper
|
1004 |
+
|
1005 |
+
if engine == "numba":
|
1006 |
+
engine_kwargs = {} if engine_kwargs is None else engine_kwargs
|
1007 |
+
|
1008 |
+
# error: Argument 1 to "__call__" of "_lru_cache_wrapper" has
|
1009 |
+
# incompatible type "Callable[..., Any] | str | list[Callable
|
1010 |
+
# [..., Any] | str] | dict[Hashable,Callable[..., Any] | str |
|
1011 |
+
# list[Callable[..., Any] | str]]"; expected "Hashable"
|
1012 |
+
nb_looper = generate_apply_looper(
|
1013 |
+
self.func, **engine_kwargs # type: ignore[arg-type]
|
1014 |
+
)
|
1015 |
+
result = nb_looper(self.values, self.axis)
|
1016 |
+
# If we made the result 2-D, squeeze it back to 1-D
|
1017 |
+
result = np.squeeze(result)
|
1018 |
+
else:
|
1019 |
+
result = np.apply_along_axis(
|
1020 |
+
wrap_function(self.func),
|
1021 |
+
self.axis,
|
1022 |
+
self.values,
|
1023 |
+
*self.args,
|
1024 |
+
**self.kwargs,
|
1025 |
+
)
|
1026 |
+
|
1027 |
+
# TODO: mixed type case
|
1028 |
+
if result.ndim == 2:
|
1029 |
+
return self.obj._constructor(result, index=self.index, columns=self.columns)
|
1030 |
+
else:
|
1031 |
+
return self.obj._constructor_sliced(result, index=self.agg_axis)
|
1032 |
+
|
1033 |
+
def apply_broadcast(self, target: DataFrame) -> DataFrame:
|
1034 |
+
assert callable(self.func)
|
1035 |
+
|
1036 |
+
result_values = np.empty_like(target.values)
|
1037 |
+
|
1038 |
+
# axis which we want to compare compliance
|
1039 |
+
result_compare = target.shape[0]
|
1040 |
+
|
1041 |
+
for i, col in enumerate(target.columns):
|
1042 |
+
res = self.func(target[col], *self.args, **self.kwargs)
|
1043 |
+
ares = np.asarray(res).ndim
|
1044 |
+
|
1045 |
+
# must be a scalar or 1d
|
1046 |
+
if ares > 1:
|
1047 |
+
raise ValueError("too many dims to broadcast")
|
1048 |
+
if ares == 1:
|
1049 |
+
# must match return dim
|
1050 |
+
if result_compare != len(res):
|
1051 |
+
raise ValueError("cannot broadcast result")
|
1052 |
+
|
1053 |
+
result_values[:, i] = res
|
1054 |
+
|
1055 |
+
# we *always* preserve the original index / columns
|
1056 |
+
result = self.obj._constructor(
|
1057 |
+
result_values, index=target.index, columns=target.columns
|
1058 |
+
)
|
1059 |
+
return result
|
1060 |
+
|
1061 |
+
def apply_standard(self):
|
1062 |
+
if self.engine == "python":
|
1063 |
+
results, res_index = self.apply_series_generator()
|
1064 |
+
else:
|
1065 |
+
results, res_index = self.apply_series_numba()
|
1066 |
+
|
1067 |
+
# wrap results
|
1068 |
+
return self.wrap_results(results, res_index)
|
1069 |
+
|
1070 |
+
def apply_series_generator(self) -> tuple[ResType, Index]:
|
1071 |
+
assert callable(self.func)
|
1072 |
+
|
1073 |
+
series_gen = self.series_generator
|
1074 |
+
res_index = self.result_index
|
1075 |
+
|
1076 |
+
results = {}
|
1077 |
+
|
1078 |
+
with option_context("mode.chained_assignment", None):
|
1079 |
+
for i, v in enumerate(series_gen):
|
1080 |
+
# ignore SettingWithCopy here in case the user mutates
|
1081 |
+
results[i] = self.func(v, *self.args, **self.kwargs)
|
1082 |
+
if isinstance(results[i], ABCSeries):
|
1083 |
+
# If we have a view on v, we need to make a copy because
|
1084 |
+
# series_generator will swap out the underlying data
|
1085 |
+
results[i] = results[i].copy(deep=False)
|
1086 |
+
|
1087 |
+
return results, res_index
|
1088 |
+
|
1089 |
+
def apply_series_numba(self):
|
1090 |
+
if self.engine_kwargs.get("parallel", False):
|
1091 |
+
raise NotImplementedError(
|
1092 |
+
"Parallel apply is not supported when raw=False and engine='numba'"
|
1093 |
+
)
|
1094 |
+
if not self.obj.index.is_unique or not self.columns.is_unique:
|
1095 |
+
raise NotImplementedError(
|
1096 |
+
"The index/columns must be unique when raw=False and engine='numba'"
|
1097 |
+
)
|
1098 |
+
self.validate_values_for_numba()
|
1099 |
+
results = self.apply_with_numba()
|
1100 |
+
return results, self.result_index
|
1101 |
+
|
1102 |
+
def wrap_results(self, results: ResType, res_index: Index) -> DataFrame | Series:
|
1103 |
+
from pandas import Series
|
1104 |
+
|
1105 |
+
# see if we can infer the results
|
1106 |
+
if len(results) > 0 and 0 in results and is_sequence(results[0]):
|
1107 |
+
return self.wrap_results_for_axis(results, res_index)
|
1108 |
+
|
1109 |
+
# dict of scalars
|
1110 |
+
|
1111 |
+
# the default dtype of an empty Series is `object`, but this
|
1112 |
+
# code can be hit by df.mean() where the result should have dtype
|
1113 |
+
# float64 even if it's an empty Series.
|
1114 |
+
constructor_sliced = self.obj._constructor_sliced
|
1115 |
+
if len(results) == 0 and constructor_sliced is Series:
|
1116 |
+
result = constructor_sliced(results, dtype=np.float64)
|
1117 |
+
else:
|
1118 |
+
result = constructor_sliced(results)
|
1119 |
+
result.index = res_index
|
1120 |
+
|
1121 |
+
return result
|
1122 |
+
|
1123 |
+
def apply_str(self) -> DataFrame | Series:
|
1124 |
+
# Caller is responsible for checking isinstance(self.func, str)
|
1125 |
+
# TODO: GH#39993 - Avoid special-casing by replacing with lambda
|
1126 |
+
if self.func == "size":
|
1127 |
+
# Special-cased because DataFrame.size returns a single scalar
|
1128 |
+
obj = self.obj
|
1129 |
+
value = obj.shape[self.axis]
|
1130 |
+
return obj._constructor_sliced(value, index=self.agg_axis)
|
1131 |
+
return super().apply_str()
|
1132 |
+
|
1133 |
+
|
1134 |
+
class FrameRowApply(FrameApply):
|
1135 |
+
axis: AxisInt = 0
|
1136 |
+
|
1137 |
+
@property
|
1138 |
+
def series_generator(self) -> Generator[Series, None, None]:
|
1139 |
+
return (self.obj._ixs(i, axis=1) for i in range(len(self.columns)))
|
1140 |
+
|
1141 |
+
@staticmethod
|
1142 |
+
@functools.cache
|
1143 |
+
def generate_numba_apply_func(
|
1144 |
+
func, nogil=True, nopython=True, parallel=False
|
1145 |
+
) -> Callable[[npt.NDArray, Index, Index], dict[int, Any]]:
|
1146 |
+
numba = import_optional_dependency("numba")
|
1147 |
+
from pandas import Series
|
1148 |
+
|
1149 |
+
# Import helper from extensions to cast string object -> np strings
|
1150 |
+
# Note: This also has the side effect of loading our numba extensions
|
1151 |
+
from pandas.core._numba.extensions import maybe_cast_str
|
1152 |
+
|
1153 |
+
jitted_udf = numba.extending.register_jitable(func)
|
1154 |
+
|
1155 |
+
# Currently the parallel argument doesn't get passed through here
|
1156 |
+
# (it's disabled) since the dicts in numba aren't thread-safe.
|
1157 |
+
@numba.jit(nogil=nogil, nopython=nopython, parallel=parallel)
|
1158 |
+
def numba_func(values, col_names, df_index):
|
1159 |
+
results = {}
|
1160 |
+
for j in range(values.shape[1]):
|
1161 |
+
# Create the series
|
1162 |
+
ser = Series(
|
1163 |
+
values[:, j], index=df_index, name=maybe_cast_str(col_names[j])
|
1164 |
+
)
|
1165 |
+
results[j] = jitted_udf(ser)
|
1166 |
+
return results
|
1167 |
+
|
1168 |
+
return numba_func
|
1169 |
+
|
1170 |
+
def apply_with_numba(self) -> dict[int, Any]:
|
1171 |
+
nb_func = self.generate_numba_apply_func(
|
1172 |
+
cast(Callable, self.func), **self.engine_kwargs
|
1173 |
+
)
|
1174 |
+
from pandas.core._numba.extensions import set_numba_data
|
1175 |
+
|
1176 |
+
index = self.obj.index
|
1177 |
+
if index.dtype == "string":
|
1178 |
+
index = index.astype(object)
|
1179 |
+
|
1180 |
+
columns = self.obj.columns
|
1181 |
+
if columns.dtype == "string":
|
1182 |
+
columns = columns.astype(object)
|
1183 |
+
|
1184 |
+
# Convert from numba dict to regular dict
|
1185 |
+
# Our isinstance checks in the df constructor don't pass for numbas typed dict
|
1186 |
+
with set_numba_data(index) as index, set_numba_data(columns) as columns:
|
1187 |
+
res = dict(nb_func(self.values, columns, index))
|
1188 |
+
return res
|
1189 |
+
|
1190 |
+
@property
|
1191 |
+
def result_index(self) -> Index:
|
1192 |
+
return self.columns
|
1193 |
+
|
1194 |
+
@property
|
1195 |
+
def result_columns(self) -> Index:
|
1196 |
+
return self.index
|
1197 |
+
|
1198 |
+
def wrap_results_for_axis(
|
1199 |
+
self, results: ResType, res_index: Index
|
1200 |
+
) -> DataFrame | Series:
|
1201 |
+
"""return the results for the rows"""
|
1202 |
+
|
1203 |
+
if self.result_type == "reduce":
|
1204 |
+
# e.g. test_apply_dict GH#8735
|
1205 |
+
res = self.obj._constructor_sliced(results)
|
1206 |
+
res.index = res_index
|
1207 |
+
return res
|
1208 |
+
|
1209 |
+
elif self.result_type is None and all(
|
1210 |
+
isinstance(x, dict) for x in results.values()
|
1211 |
+
):
|
1212 |
+
# Our operation was a to_dict op e.g.
|
1213 |
+
# test_apply_dict GH#8735, test_apply_reduce_to_dict GH#25196 #37544
|
1214 |
+
res = self.obj._constructor_sliced(results)
|
1215 |
+
res.index = res_index
|
1216 |
+
return res
|
1217 |
+
|
1218 |
+
try:
|
1219 |
+
result = self.obj._constructor(data=results)
|
1220 |
+
except ValueError as err:
|
1221 |
+
if "All arrays must be of the same length" in str(err):
|
1222 |
+
# e.g. result = [[2, 3], [1.5], ['foo', 'bar']]
|
1223 |
+
# see test_agg_listlike_result GH#29587
|
1224 |
+
res = self.obj._constructor_sliced(results)
|
1225 |
+
res.index = res_index
|
1226 |
+
return res
|
1227 |
+
else:
|
1228 |
+
raise
|
1229 |
+
|
1230 |
+
if not isinstance(results[0], ABCSeries):
|
1231 |
+
if len(result.index) == len(self.res_columns):
|
1232 |
+
result.index = self.res_columns
|
1233 |
+
|
1234 |
+
if len(result.columns) == len(res_index):
|
1235 |
+
result.columns = res_index
|
1236 |
+
|
1237 |
+
return result
|
1238 |
+
|
1239 |
+
|
1240 |
+
class FrameColumnApply(FrameApply):
|
1241 |
+
axis: AxisInt = 1
|
1242 |
+
|
1243 |
+
def apply_broadcast(self, target: DataFrame) -> DataFrame:
|
1244 |
+
result = super().apply_broadcast(target.T)
|
1245 |
+
return result.T
|
1246 |
+
|
1247 |
+
@property
|
1248 |
+
def series_generator(self) -> Generator[Series, None, None]:
|
1249 |
+
values = self.values
|
1250 |
+
values = ensure_wrapped_if_datetimelike(values)
|
1251 |
+
assert len(values) > 0
|
1252 |
+
|
1253 |
+
# We create one Series object, and will swap out the data inside
|
1254 |
+
# of it. Kids: don't do this at home.
|
1255 |
+
ser = self.obj._ixs(0, axis=0)
|
1256 |
+
mgr = ser._mgr
|
1257 |
+
|
1258 |
+
is_view = mgr.blocks[0].refs.has_reference() # type: ignore[union-attr]
|
1259 |
+
|
1260 |
+
if isinstance(ser.dtype, ExtensionDtype):
|
1261 |
+
# values will be incorrect for this block
|
1262 |
+
# TODO(EA2D): special case would be unnecessary with 2D EAs
|
1263 |
+
obj = self.obj
|
1264 |
+
for i in range(len(obj)):
|
1265 |
+
yield obj._ixs(i, axis=0)
|
1266 |
+
|
1267 |
+
else:
|
1268 |
+
for arr, name in zip(values, self.index):
|
1269 |
+
# GH#35462 re-pin mgr in case setitem changed it
|
1270 |
+
ser._mgr = mgr
|
1271 |
+
mgr.set_values(arr)
|
1272 |
+
object.__setattr__(ser, "_name", name)
|
1273 |
+
if not is_view:
|
1274 |
+
# In apply_series_generator we store the a shallow copy of the
|
1275 |
+
# result, which potentially increases the ref count of this reused
|
1276 |
+
# `ser` object (depending on the result of the applied function)
|
1277 |
+
# -> if that happened and `ser` is already a copy, then we reset
|
1278 |
+
# the refs here to avoid triggering a unnecessary CoW inside the
|
1279 |
+
# applied function (https://github.com/pandas-dev/pandas/pull/56212)
|
1280 |
+
mgr.blocks[0].refs = BlockValuesRefs(mgr.blocks[0]) # type: ignore[union-attr]
|
1281 |
+
yield ser
|
1282 |
+
|
1283 |
+
@staticmethod
|
1284 |
+
@functools.cache
|
1285 |
+
def generate_numba_apply_func(
|
1286 |
+
func, nogil=True, nopython=True, parallel=False
|
1287 |
+
) -> Callable[[npt.NDArray, Index, Index], dict[int, Any]]:
|
1288 |
+
numba = import_optional_dependency("numba")
|
1289 |
+
from pandas import Series
|
1290 |
+
from pandas.core._numba.extensions import maybe_cast_str
|
1291 |
+
|
1292 |
+
jitted_udf = numba.extending.register_jitable(func)
|
1293 |
+
|
1294 |
+
@numba.jit(nogil=nogil, nopython=nopython, parallel=parallel)
|
1295 |
+
def numba_func(values, col_names_index, index):
|
1296 |
+
results = {}
|
1297 |
+
# Currently the parallel argument doesn't get passed through here
|
1298 |
+
# (it's disabled) since the dicts in numba aren't thread-safe.
|
1299 |
+
for i in range(values.shape[0]):
|
1300 |
+
# Create the series
|
1301 |
+
# TODO: values corrupted without the copy
|
1302 |
+
ser = Series(
|
1303 |
+
values[i].copy(),
|
1304 |
+
index=col_names_index,
|
1305 |
+
name=maybe_cast_str(index[i]),
|
1306 |
+
)
|
1307 |
+
results[i] = jitted_udf(ser)
|
1308 |
+
|
1309 |
+
return results
|
1310 |
+
|
1311 |
+
return numba_func
|
1312 |
+
|
1313 |
+
def apply_with_numba(self) -> dict[int, Any]:
|
1314 |
+
nb_func = self.generate_numba_apply_func(
|
1315 |
+
cast(Callable, self.func), **self.engine_kwargs
|
1316 |
+
)
|
1317 |
+
|
1318 |
+
from pandas.core._numba.extensions import set_numba_data
|
1319 |
+
|
1320 |
+
# Convert from numba dict to regular dict
|
1321 |
+
# Our isinstance checks in the df constructor don't pass for numbas typed dict
|
1322 |
+
with set_numba_data(self.obj.index) as index, set_numba_data(
|
1323 |
+
self.columns
|
1324 |
+
) as columns:
|
1325 |
+
res = dict(nb_func(self.values, columns, index))
|
1326 |
+
|
1327 |
+
return res
|
1328 |
+
|
1329 |
+
@property
|
1330 |
+
def result_index(self) -> Index:
|
1331 |
+
return self.index
|
1332 |
+
|
1333 |
+
@property
|
1334 |
+
def result_columns(self) -> Index:
|
1335 |
+
return self.columns
|
1336 |
+
|
1337 |
+
def wrap_results_for_axis(
|
1338 |
+
self, results: ResType, res_index: Index
|
1339 |
+
) -> DataFrame | Series:
|
1340 |
+
"""return the results for the columns"""
|
1341 |
+
result: DataFrame | Series
|
1342 |
+
|
1343 |
+
# we have requested to expand
|
1344 |
+
if self.result_type == "expand":
|
1345 |
+
result = self.infer_to_same_shape(results, res_index)
|
1346 |
+
|
1347 |
+
# we have a non-series and don't want inference
|
1348 |
+
elif not isinstance(results[0], ABCSeries):
|
1349 |
+
result = self.obj._constructor_sliced(results)
|
1350 |
+
result.index = res_index
|
1351 |
+
|
1352 |
+
# we may want to infer results
|
1353 |
+
else:
|
1354 |
+
result = self.infer_to_same_shape(results, res_index)
|
1355 |
+
|
1356 |
+
return result
|
1357 |
+
|
1358 |
+
def infer_to_same_shape(self, results: ResType, res_index: Index) -> DataFrame:
|
1359 |
+
"""infer the results to the same shape as the input object"""
|
1360 |
+
result = self.obj._constructor(data=results)
|
1361 |
+
result = result.T
|
1362 |
+
|
1363 |
+
# set the index
|
1364 |
+
result.index = res_index
|
1365 |
+
|
1366 |
+
# infer dtypes
|
1367 |
+
result = result.infer_objects(copy=False)
|
1368 |
+
|
1369 |
+
return result
|
1370 |
+
|
1371 |
+
|
1372 |
+
class SeriesApply(NDFrameApply):
|
1373 |
+
obj: Series
|
1374 |
+
axis: AxisInt = 0
|
1375 |
+
by_row: Literal[False, "compat", "_compat"] # only relevant for apply()
|
1376 |
+
|
1377 |
+
def __init__(
|
1378 |
+
self,
|
1379 |
+
obj: Series,
|
1380 |
+
func: AggFuncType,
|
1381 |
+
*,
|
1382 |
+
convert_dtype: bool | lib.NoDefault = lib.no_default,
|
1383 |
+
by_row: Literal[False, "compat", "_compat"] = "compat",
|
1384 |
+
args,
|
1385 |
+
kwargs,
|
1386 |
+
) -> None:
|
1387 |
+
if convert_dtype is lib.no_default:
|
1388 |
+
convert_dtype = True
|
1389 |
+
else:
|
1390 |
+
warnings.warn(
|
1391 |
+
"the convert_dtype parameter is deprecated and will be removed in a "
|
1392 |
+
"future version. Do ``ser.astype(object).apply()`` "
|
1393 |
+
"instead if you want ``convert_dtype=False``.",
|
1394 |
+
FutureWarning,
|
1395 |
+
stacklevel=find_stack_level(),
|
1396 |
+
)
|
1397 |
+
self.convert_dtype = convert_dtype
|
1398 |
+
|
1399 |
+
super().__init__(
|
1400 |
+
obj,
|
1401 |
+
func,
|
1402 |
+
raw=False,
|
1403 |
+
result_type=None,
|
1404 |
+
by_row=by_row,
|
1405 |
+
args=args,
|
1406 |
+
kwargs=kwargs,
|
1407 |
+
)
|
1408 |
+
|
1409 |
+
def apply(self) -> DataFrame | Series:
|
1410 |
+
obj = self.obj
|
1411 |
+
|
1412 |
+
if len(obj) == 0:
|
1413 |
+
return self.apply_empty_result()
|
1414 |
+
|
1415 |
+
# dispatch to handle list-like or dict-like
|
1416 |
+
if is_list_like(self.func):
|
1417 |
+
return self.apply_list_or_dict_like()
|
1418 |
+
|
1419 |
+
if isinstance(self.func, str):
|
1420 |
+
# if we are a string, try to dispatch
|
1421 |
+
return self.apply_str()
|
1422 |
+
|
1423 |
+
if self.by_row == "_compat":
|
1424 |
+
return self.apply_compat()
|
1425 |
+
|
1426 |
+
# self.func is Callable
|
1427 |
+
return self.apply_standard()
|
1428 |
+
|
1429 |
+
def agg(self):
|
1430 |
+
result = super().agg()
|
1431 |
+
if result is None:
|
1432 |
+
obj = self.obj
|
1433 |
+
func = self.func
|
1434 |
+
# string, list-like, and dict-like are entirely handled in super
|
1435 |
+
assert callable(func)
|
1436 |
+
|
1437 |
+
# GH53325: The setup below is just to keep current behavior while emitting a
|
1438 |
+
# deprecation message. In the future this will all be replaced with a simple
|
1439 |
+
# `result = f(self.obj, *self.args, **self.kwargs)`.
|
1440 |
+
try:
|
1441 |
+
result = obj.apply(func, args=self.args, **self.kwargs)
|
1442 |
+
except (ValueError, AttributeError, TypeError):
|
1443 |
+
result = func(obj, *self.args, **self.kwargs)
|
1444 |
+
else:
|
1445 |
+
msg = (
|
1446 |
+
f"using {func} in {type(obj).__name__}.agg cannot aggregate and "
|
1447 |
+
f"has been deprecated. Use {type(obj).__name__}.transform to "
|
1448 |
+
f"keep behavior unchanged."
|
1449 |
+
)
|
1450 |
+
warnings.warn(msg, FutureWarning, stacklevel=find_stack_level())
|
1451 |
+
|
1452 |
+
return result
|
1453 |
+
|
1454 |
+
def apply_empty_result(self) -> Series:
|
1455 |
+
obj = self.obj
|
1456 |
+
return obj._constructor(dtype=obj.dtype, index=obj.index).__finalize__(
|
1457 |
+
obj, method="apply"
|
1458 |
+
)
|
1459 |
+
|
1460 |
+
def apply_compat(self):
|
1461 |
+
"""compat apply method for funcs in listlikes and dictlikes.
|
1462 |
+
|
1463 |
+
Used for each callable when giving listlikes and dictlikes of callables to
|
1464 |
+
apply. Needed for compatibility with Pandas < v2.1.
|
1465 |
+
|
1466 |
+
.. versionadded:: 2.1.0
|
1467 |
+
"""
|
1468 |
+
obj = self.obj
|
1469 |
+
func = self.func
|
1470 |
+
|
1471 |
+
if callable(func):
|
1472 |
+
f = com.get_cython_func(func)
|
1473 |
+
if f and not self.args and not self.kwargs:
|
1474 |
+
return obj.apply(func, by_row=False)
|
1475 |
+
|
1476 |
+
try:
|
1477 |
+
result = obj.apply(func, by_row="compat")
|
1478 |
+
except (ValueError, AttributeError, TypeError):
|
1479 |
+
result = obj.apply(func, by_row=False)
|
1480 |
+
return result
|
1481 |
+
|
1482 |
+
def apply_standard(self) -> DataFrame | Series:
|
1483 |
+
# caller is responsible for ensuring that f is Callable
|
1484 |
+
func = cast(Callable, self.func)
|
1485 |
+
obj = self.obj
|
1486 |
+
|
1487 |
+
if isinstance(func, np.ufunc):
|
1488 |
+
with np.errstate(all="ignore"):
|
1489 |
+
return func(obj, *self.args, **self.kwargs)
|
1490 |
+
elif not self.by_row:
|
1491 |
+
return func(obj, *self.args, **self.kwargs)
|
1492 |
+
|
1493 |
+
if self.args or self.kwargs:
|
1494 |
+
# _map_values does not support args/kwargs
|
1495 |
+
def curried(x):
|
1496 |
+
return func(x, *self.args, **self.kwargs)
|
1497 |
+
|
1498 |
+
else:
|
1499 |
+
curried = func
|
1500 |
+
|
1501 |
+
# row-wise access
|
1502 |
+
# apply doesn't have a `na_action` keyword and for backward compat reasons
|
1503 |
+
# we need to give `na_action="ignore"` for categorical data.
|
1504 |
+
# TODO: remove the `na_action="ignore"` when that default has been changed in
|
1505 |
+
# Categorical (GH51645).
|
1506 |
+
action = "ignore" if isinstance(obj.dtype, CategoricalDtype) else None
|
1507 |
+
mapped = obj._map_values(
|
1508 |
+
mapper=curried, na_action=action, convert=self.convert_dtype
|
1509 |
+
)
|
1510 |
+
|
1511 |
+
if len(mapped) and isinstance(mapped[0], ABCSeries):
|
1512 |
+
# GH#43986 Need to do list(mapped) in order to get treated as nested
|
1513 |
+
# See also GH#25959 regarding EA support
|
1514 |
+
return obj._constructor_expanddim(list(mapped), index=obj.index)
|
1515 |
+
else:
|
1516 |
+
return obj._constructor(mapped, index=obj.index).__finalize__(
|
1517 |
+
obj, method="apply"
|
1518 |
+
)
|
1519 |
+
|
1520 |
+
|
1521 |
+
class GroupByApply(Apply):
|
1522 |
+
obj: GroupBy | Resampler | BaseWindow
|
1523 |
+
|
1524 |
+
def __init__(
|
1525 |
+
self,
|
1526 |
+
obj: GroupBy[NDFrameT],
|
1527 |
+
func: AggFuncType,
|
1528 |
+
*,
|
1529 |
+
args,
|
1530 |
+
kwargs,
|
1531 |
+
) -> None:
|
1532 |
+
kwargs = kwargs.copy()
|
1533 |
+
self.axis = obj.obj._get_axis_number(kwargs.get("axis", 0))
|
1534 |
+
super().__init__(
|
1535 |
+
obj,
|
1536 |
+
func,
|
1537 |
+
raw=False,
|
1538 |
+
result_type=None,
|
1539 |
+
args=args,
|
1540 |
+
kwargs=kwargs,
|
1541 |
+
)
|
1542 |
+
|
1543 |
+
def apply(self):
|
1544 |
+
raise NotImplementedError
|
1545 |
+
|
1546 |
+
def transform(self):
|
1547 |
+
raise NotImplementedError
|
1548 |
+
|
1549 |
+
def agg_or_apply_list_like(
|
1550 |
+
self, op_name: Literal["agg", "apply"]
|
1551 |
+
) -> DataFrame | Series:
|
1552 |
+
obj = self.obj
|
1553 |
+
kwargs = self.kwargs
|
1554 |
+
if op_name == "apply":
|
1555 |
+
kwargs = {**kwargs, "by_row": False}
|
1556 |
+
|
1557 |
+
if getattr(obj, "axis", 0) == 1:
|
1558 |
+
raise NotImplementedError("axis other than 0 is not supported")
|
1559 |
+
|
1560 |
+
if obj._selected_obj.ndim == 1:
|
1561 |
+
# For SeriesGroupBy this matches _obj_with_exclusions
|
1562 |
+
selected_obj = obj._selected_obj
|
1563 |
+
else:
|
1564 |
+
selected_obj = obj._obj_with_exclusions
|
1565 |
+
|
1566 |
+
# Only set as_index=True on groupby objects, not Window or Resample
|
1567 |
+
# that inherit from this class.
|
1568 |
+
with com.temp_setattr(
|
1569 |
+
obj, "as_index", True, condition=hasattr(obj, "as_index")
|
1570 |
+
):
|
1571 |
+
keys, results = self.compute_list_like(op_name, selected_obj, kwargs)
|
1572 |
+
result = self.wrap_results_list_like(keys, results)
|
1573 |
+
return result
|
1574 |
+
|
1575 |
+
def agg_or_apply_dict_like(
|
1576 |
+
self, op_name: Literal["agg", "apply"]
|
1577 |
+
) -> DataFrame | Series:
|
1578 |
+
from pandas.core.groupby.generic import (
|
1579 |
+
DataFrameGroupBy,
|
1580 |
+
SeriesGroupBy,
|
1581 |
+
)
|
1582 |
+
|
1583 |
+
assert op_name in ["agg", "apply"]
|
1584 |
+
|
1585 |
+
obj = self.obj
|
1586 |
+
kwargs = {}
|
1587 |
+
if op_name == "apply":
|
1588 |
+
by_row = "_compat" if self.by_row else False
|
1589 |
+
kwargs.update({"by_row": by_row})
|
1590 |
+
|
1591 |
+
if getattr(obj, "axis", 0) == 1:
|
1592 |
+
raise NotImplementedError("axis other than 0 is not supported")
|
1593 |
+
|
1594 |
+
selected_obj = obj._selected_obj
|
1595 |
+
selection = obj._selection
|
1596 |
+
|
1597 |
+
is_groupby = isinstance(obj, (DataFrameGroupBy, SeriesGroupBy))
|
1598 |
+
|
1599 |
+
# Numba Groupby engine/engine-kwargs passthrough
|
1600 |
+
if is_groupby:
|
1601 |
+
engine = self.kwargs.get("engine", None)
|
1602 |
+
engine_kwargs = self.kwargs.get("engine_kwargs", None)
|
1603 |
+
kwargs.update({"engine": engine, "engine_kwargs": engine_kwargs})
|
1604 |
+
|
1605 |
+
with com.temp_setattr(
|
1606 |
+
obj, "as_index", True, condition=hasattr(obj, "as_index")
|
1607 |
+
):
|
1608 |
+
result_index, result_data = self.compute_dict_like(
|
1609 |
+
op_name, selected_obj, selection, kwargs
|
1610 |
+
)
|
1611 |
+
result = self.wrap_results_dict_like(selected_obj, result_index, result_data)
|
1612 |
+
return result
|
1613 |
+
|
1614 |
+
|
1615 |
+
class ResamplerWindowApply(GroupByApply):
|
1616 |
+
axis: AxisInt = 0
|
1617 |
+
obj: Resampler | BaseWindow
|
1618 |
+
|
1619 |
+
def __init__(
|
1620 |
+
self,
|
1621 |
+
obj: Resampler | BaseWindow,
|
1622 |
+
func: AggFuncType,
|
1623 |
+
*,
|
1624 |
+
args,
|
1625 |
+
kwargs,
|
1626 |
+
) -> None:
|
1627 |
+
super(GroupByApply, self).__init__(
|
1628 |
+
obj,
|
1629 |
+
func,
|
1630 |
+
raw=False,
|
1631 |
+
result_type=None,
|
1632 |
+
args=args,
|
1633 |
+
kwargs=kwargs,
|
1634 |
+
)
|
1635 |
+
|
1636 |
+
def apply(self):
|
1637 |
+
raise NotImplementedError
|
1638 |
+
|
1639 |
+
def transform(self):
|
1640 |
+
raise NotImplementedError
|
1641 |
+
|
1642 |
+
|
1643 |
+
def reconstruct_func(
|
1644 |
+
func: AggFuncType | None, **kwargs
|
1645 |
+
) -> tuple[bool, AggFuncType, tuple[str, ...] | None, npt.NDArray[np.intp] | None]:
|
1646 |
+
"""
|
1647 |
+
This is the internal function to reconstruct func given if there is relabeling
|
1648 |
+
or not and also normalize the keyword to get new order of columns.
|
1649 |
+
|
1650 |
+
If named aggregation is applied, `func` will be None, and kwargs contains the
|
1651 |
+
column and aggregation function information to be parsed;
|
1652 |
+
If named aggregation is not applied, `func` is either string (e.g. 'min') or
|
1653 |
+
Callable, or list of them (e.g. ['min', np.max]), or the dictionary of column name
|
1654 |
+
and str/Callable/list of them (e.g. {'A': 'min'}, or {'A': [np.min, lambda x: x]})
|
1655 |
+
|
1656 |
+
If relabeling is True, will return relabeling, reconstructed func, column
|
1657 |
+
names, and the reconstructed order of columns.
|
1658 |
+
If relabeling is False, the columns and order will be None.
|
1659 |
+
|
1660 |
+
Parameters
|
1661 |
+
----------
|
1662 |
+
func: agg function (e.g. 'min' or Callable) or list of agg functions
|
1663 |
+
(e.g. ['min', np.max]) or dictionary (e.g. {'A': ['min', np.max]}).
|
1664 |
+
**kwargs: dict, kwargs used in is_multi_agg_with_relabel and
|
1665 |
+
normalize_keyword_aggregation function for relabelling
|
1666 |
+
|
1667 |
+
Returns
|
1668 |
+
-------
|
1669 |
+
relabelling: bool, if there is relabelling or not
|
1670 |
+
func: normalized and mangled func
|
1671 |
+
columns: tuple of column names
|
1672 |
+
order: array of columns indices
|
1673 |
+
|
1674 |
+
Examples
|
1675 |
+
--------
|
1676 |
+
>>> reconstruct_func(None, **{"foo": ("col", "min")})
|
1677 |
+
(True, defaultdict(<class 'list'>, {'col': ['min']}), ('foo',), array([0]))
|
1678 |
+
|
1679 |
+
>>> reconstruct_func("min")
|
1680 |
+
(False, 'min', None, None)
|
1681 |
+
"""
|
1682 |
+
relabeling = func is None and is_multi_agg_with_relabel(**kwargs)
|
1683 |
+
columns: tuple[str, ...] | None = None
|
1684 |
+
order: npt.NDArray[np.intp] | None = None
|
1685 |
+
|
1686 |
+
if not relabeling:
|
1687 |
+
if isinstance(func, list) and len(func) > len(set(func)):
|
1688 |
+
# GH 28426 will raise error if duplicated function names are used and
|
1689 |
+
# there is no reassigned name
|
1690 |
+
raise SpecificationError(
|
1691 |
+
"Function names must be unique if there is no new column names "
|
1692 |
+
"assigned"
|
1693 |
+
)
|
1694 |
+
if func is None:
|
1695 |
+
# nicer error message
|
1696 |
+
raise TypeError("Must provide 'func' or tuples of '(column, aggfunc).")
|
1697 |
+
|
1698 |
+
if relabeling:
|
1699 |
+
# error: Incompatible types in assignment (expression has type
|
1700 |
+
# "MutableMapping[Hashable, list[Callable[..., Any] | str]]", variable has type
|
1701 |
+
# "Callable[..., Any] | str | list[Callable[..., Any] | str] |
|
1702 |
+
# MutableMapping[Hashable, Callable[..., Any] | str | list[Callable[..., Any] |
|
1703 |
+
# str]] | None")
|
1704 |
+
func, columns, order = normalize_keyword_aggregation( # type: ignore[assignment]
|
1705 |
+
kwargs
|
1706 |
+
)
|
1707 |
+
assert func is not None
|
1708 |
+
|
1709 |
+
return relabeling, func, columns, order
|
1710 |
+
|
1711 |
+
|
1712 |
+
def is_multi_agg_with_relabel(**kwargs) -> bool:
|
1713 |
+
"""
|
1714 |
+
Check whether kwargs passed to .agg look like multi-agg with relabeling.
|
1715 |
+
|
1716 |
+
Parameters
|
1717 |
+
----------
|
1718 |
+
**kwargs : dict
|
1719 |
+
|
1720 |
+
Returns
|
1721 |
+
-------
|
1722 |
+
bool
|
1723 |
+
|
1724 |
+
Examples
|
1725 |
+
--------
|
1726 |
+
>>> is_multi_agg_with_relabel(a="max")
|
1727 |
+
False
|
1728 |
+
>>> is_multi_agg_with_relabel(a_max=("a", "max"), a_min=("a", "min"))
|
1729 |
+
True
|
1730 |
+
>>> is_multi_agg_with_relabel()
|
1731 |
+
False
|
1732 |
+
"""
|
1733 |
+
return all(isinstance(v, tuple) and len(v) == 2 for v in kwargs.values()) and (
|
1734 |
+
len(kwargs) > 0
|
1735 |
+
)
|
1736 |
+
|
1737 |
+
|
1738 |
+
def normalize_keyword_aggregation(
|
1739 |
+
kwargs: dict,
|
1740 |
+
) -> tuple[
|
1741 |
+
MutableMapping[Hashable, list[AggFuncTypeBase]],
|
1742 |
+
tuple[str, ...],
|
1743 |
+
npt.NDArray[np.intp],
|
1744 |
+
]:
|
1745 |
+
"""
|
1746 |
+
Normalize user-provided "named aggregation" kwargs.
|
1747 |
+
Transforms from the new ``Mapping[str, NamedAgg]`` style kwargs
|
1748 |
+
to the old Dict[str, List[scalar]]].
|
1749 |
+
|
1750 |
+
Parameters
|
1751 |
+
----------
|
1752 |
+
kwargs : dict
|
1753 |
+
|
1754 |
+
Returns
|
1755 |
+
-------
|
1756 |
+
aggspec : dict
|
1757 |
+
The transformed kwargs.
|
1758 |
+
columns : tuple[str, ...]
|
1759 |
+
The user-provided keys.
|
1760 |
+
col_idx_order : List[int]
|
1761 |
+
List of columns indices.
|
1762 |
+
|
1763 |
+
Examples
|
1764 |
+
--------
|
1765 |
+
>>> normalize_keyword_aggregation({"output": ("input", "sum")})
|
1766 |
+
(defaultdict(<class 'list'>, {'input': ['sum']}), ('output',), array([0]))
|
1767 |
+
"""
|
1768 |
+
from pandas.core.indexes.base import Index
|
1769 |
+
|
1770 |
+
# Normalize the aggregation functions as Mapping[column, List[func]],
|
1771 |
+
# process normally, then fixup the names.
|
1772 |
+
# TODO: aggspec type: typing.Dict[str, List[AggScalar]]
|
1773 |
+
aggspec = defaultdict(list)
|
1774 |
+
order = []
|
1775 |
+
columns, pairs = list(zip(*kwargs.items()))
|
1776 |
+
|
1777 |
+
for column, aggfunc in pairs:
|
1778 |
+
aggspec[column].append(aggfunc)
|
1779 |
+
order.append((column, com.get_callable_name(aggfunc) or aggfunc))
|
1780 |
+
|
1781 |
+
# uniquify aggfunc name if duplicated in order list
|
1782 |
+
uniquified_order = _make_unique_kwarg_list(order)
|
1783 |
+
|
1784 |
+
# GH 25719, due to aggspec will change the order of assigned columns in aggregation
|
1785 |
+
# uniquified_aggspec will store uniquified order list and will compare it with order
|
1786 |
+
# based on index
|
1787 |
+
aggspec_order = [
|
1788 |
+
(column, com.get_callable_name(aggfunc) or aggfunc)
|
1789 |
+
for column, aggfuncs in aggspec.items()
|
1790 |
+
for aggfunc in aggfuncs
|
1791 |
+
]
|
1792 |
+
uniquified_aggspec = _make_unique_kwarg_list(aggspec_order)
|
1793 |
+
|
1794 |
+
# get the new index of columns by comparison
|
1795 |
+
col_idx_order = Index(uniquified_aggspec).get_indexer(uniquified_order)
|
1796 |
+
return aggspec, columns, col_idx_order
|
1797 |
+
|
1798 |
+
|
1799 |
+
def _make_unique_kwarg_list(
|
1800 |
+
seq: Sequence[tuple[Any, Any]]
|
1801 |
+
) -> Sequence[tuple[Any, Any]]:
|
1802 |
+
"""
|
1803 |
+
Uniquify aggfunc name of the pairs in the order list
|
1804 |
+
|
1805 |
+
Examples:
|
1806 |
+
--------
|
1807 |
+
>>> kwarg_list = [('a', '<lambda>'), ('a', '<lambda>'), ('b', '<lambda>')]
|
1808 |
+
>>> _make_unique_kwarg_list(kwarg_list)
|
1809 |
+
[('a', '<lambda>_0'), ('a', '<lambda>_1'), ('b', '<lambda>')]
|
1810 |
+
"""
|
1811 |
+
return [
|
1812 |
+
(pair[0], f"{pair[1]}_{seq[:i].count(pair)}") if seq.count(pair) > 1 else pair
|
1813 |
+
for i, pair in enumerate(seq)
|
1814 |
+
]
|
1815 |
+
|
1816 |
+
|
1817 |
+
def relabel_result(
|
1818 |
+
result: DataFrame | Series,
|
1819 |
+
func: dict[str, list[Callable | str]],
|
1820 |
+
columns: Iterable[Hashable],
|
1821 |
+
order: Iterable[int],
|
1822 |
+
) -> dict[Hashable, Series]:
|
1823 |
+
"""
|
1824 |
+
Internal function to reorder result if relabelling is True for
|
1825 |
+
dataframe.agg, and return the reordered result in dict.
|
1826 |
+
|
1827 |
+
Parameters:
|
1828 |
+
----------
|
1829 |
+
result: Result from aggregation
|
1830 |
+
func: Dict of (column name, funcs)
|
1831 |
+
columns: New columns name for relabelling
|
1832 |
+
order: New order for relabelling
|
1833 |
+
|
1834 |
+
Examples
|
1835 |
+
--------
|
1836 |
+
>>> from pandas.core.apply import relabel_result
|
1837 |
+
>>> result = pd.DataFrame(
|
1838 |
+
... {"A": [np.nan, 2, np.nan], "C": [6, np.nan, np.nan], "B": [np.nan, 4, 2.5]},
|
1839 |
+
... index=["max", "mean", "min"]
|
1840 |
+
... )
|
1841 |
+
>>> funcs = {"A": ["max"], "C": ["max"], "B": ["mean", "min"]}
|
1842 |
+
>>> columns = ("foo", "aab", "bar", "dat")
|
1843 |
+
>>> order = [0, 1, 2, 3]
|
1844 |
+
>>> result_in_dict = relabel_result(result, funcs, columns, order)
|
1845 |
+
>>> pd.DataFrame(result_in_dict, index=columns)
|
1846 |
+
A C B
|
1847 |
+
foo 2.0 NaN NaN
|
1848 |
+
aab NaN 6.0 NaN
|
1849 |
+
bar NaN NaN 4.0
|
1850 |
+
dat NaN NaN 2.5
|
1851 |
+
"""
|
1852 |
+
from pandas.core.indexes.base import Index
|
1853 |
+
|
1854 |
+
reordered_indexes = [
|
1855 |
+
pair[0] for pair in sorted(zip(columns, order), key=lambda t: t[1])
|
1856 |
+
]
|
1857 |
+
reordered_result_in_dict: dict[Hashable, Series] = {}
|
1858 |
+
idx = 0
|
1859 |
+
|
1860 |
+
reorder_mask = not isinstance(result, ABCSeries) and len(result.columns) > 1
|
1861 |
+
for col, fun in func.items():
|
1862 |
+
s = result[col].dropna()
|
1863 |
+
|
1864 |
+
# In the `_aggregate`, the callable names are obtained and used in `result`, and
|
1865 |
+
# these names are ordered alphabetically. e.g.
|
1866 |
+
# C2 C1
|
1867 |
+
# <lambda> 1 NaN
|
1868 |
+
# amax NaN 4.0
|
1869 |
+
# max NaN 4.0
|
1870 |
+
# sum 18.0 6.0
|
1871 |
+
# Therefore, the order of functions for each column could be shuffled
|
1872 |
+
# accordingly so need to get the callable name if it is not parsed names, and
|
1873 |
+
# reorder the aggregated result for each column.
|
1874 |
+
# e.g. if df.agg(c1=("C2", sum), c2=("C2", lambda x: min(x))), correct order is
|
1875 |
+
# [sum, <lambda>], but in `result`, it will be [<lambda>, sum], and we need to
|
1876 |
+
# reorder so that aggregated values map to their functions regarding the order.
|
1877 |
+
|
1878 |
+
# However there is only one column being used for aggregation, not need to
|
1879 |
+
# reorder since the index is not sorted, and keep as is in `funcs`, e.g.
|
1880 |
+
# A
|
1881 |
+
# min 1.0
|
1882 |
+
# mean 1.5
|
1883 |
+
# mean 1.5
|
1884 |
+
if reorder_mask:
|
1885 |
+
fun = [
|
1886 |
+
com.get_callable_name(f) if not isinstance(f, str) else f for f in fun
|
1887 |
+
]
|
1888 |
+
col_idx_order = Index(s.index).get_indexer(fun)
|
1889 |
+
s = s.iloc[col_idx_order]
|
1890 |
+
|
1891 |
+
# assign the new user-provided "named aggregation" as index names, and reindex
|
1892 |
+
# it based on the whole user-provided names.
|
1893 |
+
s.index = reordered_indexes[idx : idx + len(fun)]
|
1894 |
+
reordered_result_in_dict[col] = s.reindex(columns, copy=False)
|
1895 |
+
idx = idx + len(fun)
|
1896 |
+
return reordered_result_in_dict
|
1897 |
+
|
1898 |
+
|
1899 |
+
def reconstruct_and_relabel_result(result, func, **kwargs) -> DataFrame | Series:
|
1900 |
+
from pandas import DataFrame
|
1901 |
+
|
1902 |
+
relabeling, func, columns, order = reconstruct_func(func, **kwargs)
|
1903 |
+
|
1904 |
+
if relabeling:
|
1905 |
+
# This is to keep the order to columns occurrence unchanged, and also
|
1906 |
+
# keep the order of new columns occurrence unchanged
|
1907 |
+
|
1908 |
+
# For the return values of reconstruct_func, if relabeling is
|
1909 |
+
# False, columns and order will be None.
|
1910 |
+
assert columns is not None
|
1911 |
+
assert order is not None
|
1912 |
+
|
1913 |
+
result_in_dict = relabel_result(result, func, columns, order)
|
1914 |
+
result = DataFrame(result_in_dict, index=columns)
|
1915 |
+
|
1916 |
+
return result
|
1917 |
+
|
1918 |
+
|
1919 |
+
# TODO: Can't use, because mypy doesn't like us setting __name__
|
1920 |
+
# error: "partial[Any]" has no attribute "__name__"
|
1921 |
+
# the type is:
|
1922 |
+
# typing.Sequence[Callable[..., ScalarResult]]
|
1923 |
+
# -> typing.Sequence[Callable[..., ScalarResult]]:
|
1924 |
+
|
1925 |
+
|
1926 |
+
def _managle_lambda_list(aggfuncs: Sequence[Any]) -> Sequence[Any]:
|
1927 |
+
"""
|
1928 |
+
Possibly mangle a list of aggfuncs.
|
1929 |
+
|
1930 |
+
Parameters
|
1931 |
+
----------
|
1932 |
+
aggfuncs : Sequence
|
1933 |
+
|
1934 |
+
Returns
|
1935 |
+
-------
|
1936 |
+
mangled: list-like
|
1937 |
+
A new AggSpec sequence, where lambdas have been converted
|
1938 |
+
to have unique names.
|
1939 |
+
|
1940 |
+
Notes
|
1941 |
+
-----
|
1942 |
+
If just one aggfunc is passed, the name will not be mangled.
|
1943 |
+
"""
|
1944 |
+
if len(aggfuncs) <= 1:
|
1945 |
+
# don't mangle for .agg([lambda x: .])
|
1946 |
+
return aggfuncs
|
1947 |
+
i = 0
|
1948 |
+
mangled_aggfuncs = []
|
1949 |
+
for aggfunc in aggfuncs:
|
1950 |
+
if com.get_callable_name(aggfunc) == "<lambda>":
|
1951 |
+
aggfunc = partial(aggfunc)
|
1952 |
+
aggfunc.__name__ = f"<lambda_{i}>"
|
1953 |
+
i += 1
|
1954 |
+
mangled_aggfuncs.append(aggfunc)
|
1955 |
+
|
1956 |
+
return mangled_aggfuncs
|
1957 |
+
|
1958 |
+
|
1959 |
+
def maybe_mangle_lambdas(agg_spec: Any) -> Any:
|
1960 |
+
"""
|
1961 |
+
Make new lambdas with unique names.
|
1962 |
+
|
1963 |
+
Parameters
|
1964 |
+
----------
|
1965 |
+
agg_spec : Any
|
1966 |
+
An argument to GroupBy.agg.
|
1967 |
+
Non-dict-like `agg_spec` are pass through as is.
|
1968 |
+
For dict-like `agg_spec` a new spec is returned
|
1969 |
+
with name-mangled lambdas.
|
1970 |
+
|
1971 |
+
Returns
|
1972 |
+
-------
|
1973 |
+
mangled : Any
|
1974 |
+
Same type as the input.
|
1975 |
+
|
1976 |
+
Examples
|
1977 |
+
--------
|
1978 |
+
>>> maybe_mangle_lambdas('sum')
|
1979 |
+
'sum'
|
1980 |
+
>>> maybe_mangle_lambdas([lambda: 1, lambda: 2]) # doctest: +SKIP
|
1981 |
+
[<function __main__.<lambda_0>,
|
1982 |
+
<function pandas...._make_lambda.<locals>.f(*args, **kwargs)>]
|
1983 |
+
"""
|
1984 |
+
is_dict = is_dict_like(agg_spec)
|
1985 |
+
if not (is_dict or is_list_like(agg_spec)):
|
1986 |
+
return agg_spec
|
1987 |
+
mangled_aggspec = type(agg_spec)() # dict or OrderedDict
|
1988 |
+
|
1989 |
+
if is_dict:
|
1990 |
+
for key, aggfuncs in agg_spec.items():
|
1991 |
+
if is_list_like(aggfuncs) and not is_dict_like(aggfuncs):
|
1992 |
+
mangled_aggfuncs = _managle_lambda_list(aggfuncs)
|
1993 |
+
else:
|
1994 |
+
mangled_aggfuncs = aggfuncs
|
1995 |
+
|
1996 |
+
mangled_aggspec[key] = mangled_aggfuncs
|
1997 |
+
else:
|
1998 |
+
mangled_aggspec = _managle_lambda_list(agg_spec)
|
1999 |
+
|
2000 |
+
return mangled_aggspec
|
2001 |
+
|
2002 |
+
|
2003 |
+
def validate_func_kwargs(
|
2004 |
+
kwargs: dict,
|
2005 |
+
) -> tuple[list[str], list[str | Callable[..., Any]]]:
|
2006 |
+
"""
|
2007 |
+
Validates types of user-provided "named aggregation" kwargs.
|
2008 |
+
`TypeError` is raised if aggfunc is not `str` or callable.
|
2009 |
+
|
2010 |
+
Parameters
|
2011 |
+
----------
|
2012 |
+
kwargs : dict
|
2013 |
+
|
2014 |
+
Returns
|
2015 |
+
-------
|
2016 |
+
columns : List[str]
|
2017 |
+
List of user-provided keys.
|
2018 |
+
func : List[Union[str, callable[...,Any]]]
|
2019 |
+
List of user-provided aggfuncs
|
2020 |
+
|
2021 |
+
Examples
|
2022 |
+
--------
|
2023 |
+
>>> validate_func_kwargs({'one': 'min', 'two': 'max'})
|
2024 |
+
(['one', 'two'], ['min', 'max'])
|
2025 |
+
"""
|
2026 |
+
tuple_given_message = "func is expected but received {} in **kwargs."
|
2027 |
+
columns = list(kwargs)
|
2028 |
+
func = []
|
2029 |
+
for col_func in kwargs.values():
|
2030 |
+
if not (isinstance(col_func, str) or callable(col_func)):
|
2031 |
+
raise TypeError(tuple_given_message.format(type(col_func).__name__))
|
2032 |
+
func.append(col_func)
|
2033 |
+
if not columns:
|
2034 |
+
no_arg_message = "Must provide 'func' or named aggregation **kwargs."
|
2035 |
+
raise TypeError(no_arg_message)
|
2036 |
+
return columns, func
|
2037 |
+
|
2038 |
+
|
2039 |
+
def include_axis(op_name: Literal["agg", "apply"], colg: Series | DataFrame) -> bool:
|
2040 |
+
return isinstance(colg, ABCDataFrame) or (
|
2041 |
+
isinstance(colg, ABCSeries) and op_name == "agg"
|
2042 |
+
)
|
2043 |
+
|
2044 |
+
|
2045 |
+
def warn_alias_replacement(
|
2046 |
+
obj: AggObjType,
|
2047 |
+
func: Callable,
|
2048 |
+
alias: str,
|
2049 |
+
) -> None:
|
2050 |
+
if alias.startswith("np."):
|
2051 |
+
full_alias = alias
|
2052 |
+
else:
|
2053 |
+
full_alias = f"{type(obj).__name__}.{alias}"
|
2054 |
+
alias = f'"{alias}"'
|
2055 |
+
warnings.warn(
|
2056 |
+
f"The provided callable {func} is currently using "
|
2057 |
+
f"{full_alias}. In a future version of pandas, "
|
2058 |
+
f"the provided callable will be used directly. To keep current "
|
2059 |
+
f"behavior pass the string {alias} instead.",
|
2060 |
+
category=FutureWarning,
|
2061 |
+
stacklevel=find_stack_level(),
|
2062 |
+
)
|
llmeval-env/lib/python3.10/site-packages/pandas/core/arraylike.py
ADDED
@@ -0,0 +1,530 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
"""
|
2 |
+
Methods that can be shared by many array-like classes or subclasses:
|
3 |
+
Series
|
4 |
+
Index
|
5 |
+
ExtensionArray
|
6 |
+
"""
|
7 |
+
from __future__ import annotations
|
8 |
+
|
9 |
+
import operator
|
10 |
+
from typing import Any
|
11 |
+
|
12 |
+
import numpy as np
|
13 |
+
|
14 |
+
from pandas._libs import lib
|
15 |
+
from pandas._libs.ops_dispatch import maybe_dispatch_ufunc_to_dunder_op
|
16 |
+
|
17 |
+
from pandas.core.dtypes.generic import ABCNDFrame
|
18 |
+
|
19 |
+
from pandas.core import roperator
|
20 |
+
from pandas.core.construction import extract_array
|
21 |
+
from pandas.core.ops.common import unpack_zerodim_and_defer
|
22 |
+
|
23 |
+
REDUCTION_ALIASES = {
|
24 |
+
"maximum": "max",
|
25 |
+
"minimum": "min",
|
26 |
+
"add": "sum",
|
27 |
+
"multiply": "prod",
|
28 |
+
}
|
29 |
+
|
30 |
+
|
31 |
+
class OpsMixin:
|
32 |
+
# -------------------------------------------------------------
|
33 |
+
# Comparisons
|
34 |
+
|
35 |
+
def _cmp_method(self, other, op):
|
36 |
+
return NotImplemented
|
37 |
+
|
38 |
+
@unpack_zerodim_and_defer("__eq__")
|
39 |
+
def __eq__(self, other):
|
40 |
+
return self._cmp_method(other, operator.eq)
|
41 |
+
|
42 |
+
@unpack_zerodim_and_defer("__ne__")
|
43 |
+
def __ne__(self, other):
|
44 |
+
return self._cmp_method(other, operator.ne)
|
45 |
+
|
46 |
+
@unpack_zerodim_and_defer("__lt__")
|
47 |
+
def __lt__(self, other):
|
48 |
+
return self._cmp_method(other, operator.lt)
|
49 |
+
|
50 |
+
@unpack_zerodim_and_defer("__le__")
|
51 |
+
def __le__(self, other):
|
52 |
+
return self._cmp_method(other, operator.le)
|
53 |
+
|
54 |
+
@unpack_zerodim_and_defer("__gt__")
|
55 |
+
def __gt__(self, other):
|
56 |
+
return self._cmp_method(other, operator.gt)
|
57 |
+
|
58 |
+
@unpack_zerodim_and_defer("__ge__")
|
59 |
+
def __ge__(self, other):
|
60 |
+
return self._cmp_method(other, operator.ge)
|
61 |
+
|
62 |
+
# -------------------------------------------------------------
|
63 |
+
# Logical Methods
|
64 |
+
|
65 |
+
def _logical_method(self, other, op):
|
66 |
+
return NotImplemented
|
67 |
+
|
68 |
+
@unpack_zerodim_and_defer("__and__")
|
69 |
+
def __and__(self, other):
|
70 |
+
return self._logical_method(other, operator.and_)
|
71 |
+
|
72 |
+
@unpack_zerodim_and_defer("__rand__")
|
73 |
+
def __rand__(self, other):
|
74 |
+
return self._logical_method(other, roperator.rand_)
|
75 |
+
|
76 |
+
@unpack_zerodim_and_defer("__or__")
|
77 |
+
def __or__(self, other):
|
78 |
+
return self._logical_method(other, operator.or_)
|
79 |
+
|
80 |
+
@unpack_zerodim_and_defer("__ror__")
|
81 |
+
def __ror__(self, other):
|
82 |
+
return self._logical_method(other, roperator.ror_)
|
83 |
+
|
84 |
+
@unpack_zerodim_and_defer("__xor__")
|
85 |
+
def __xor__(self, other):
|
86 |
+
return self._logical_method(other, operator.xor)
|
87 |
+
|
88 |
+
@unpack_zerodim_and_defer("__rxor__")
|
89 |
+
def __rxor__(self, other):
|
90 |
+
return self._logical_method(other, roperator.rxor)
|
91 |
+
|
92 |
+
# -------------------------------------------------------------
|
93 |
+
# Arithmetic Methods
|
94 |
+
|
95 |
+
def _arith_method(self, other, op):
|
96 |
+
return NotImplemented
|
97 |
+
|
98 |
+
@unpack_zerodim_and_defer("__add__")
|
99 |
+
def __add__(self, other):
|
100 |
+
"""
|
101 |
+
Get Addition of DataFrame and other, column-wise.
|
102 |
+
|
103 |
+
Equivalent to ``DataFrame.add(other)``.
|
104 |
+
|
105 |
+
Parameters
|
106 |
+
----------
|
107 |
+
other : scalar, sequence, Series, dict or DataFrame
|
108 |
+
Object to be added to the DataFrame.
|
109 |
+
|
110 |
+
Returns
|
111 |
+
-------
|
112 |
+
DataFrame
|
113 |
+
The result of adding ``other`` to DataFrame.
|
114 |
+
|
115 |
+
See Also
|
116 |
+
--------
|
117 |
+
DataFrame.add : Add a DataFrame and another object, with option for index-
|
118 |
+
or column-oriented addition.
|
119 |
+
|
120 |
+
Examples
|
121 |
+
--------
|
122 |
+
>>> df = pd.DataFrame({'height': [1.5, 2.6], 'weight': [500, 800]},
|
123 |
+
... index=['elk', 'moose'])
|
124 |
+
>>> df
|
125 |
+
height weight
|
126 |
+
elk 1.5 500
|
127 |
+
moose 2.6 800
|
128 |
+
|
129 |
+
Adding a scalar affects all rows and columns.
|
130 |
+
|
131 |
+
>>> df[['height', 'weight']] + 1.5
|
132 |
+
height weight
|
133 |
+
elk 3.0 501.5
|
134 |
+
moose 4.1 801.5
|
135 |
+
|
136 |
+
Each element of a list is added to a column of the DataFrame, in order.
|
137 |
+
|
138 |
+
>>> df[['height', 'weight']] + [0.5, 1.5]
|
139 |
+
height weight
|
140 |
+
elk 2.0 501.5
|
141 |
+
moose 3.1 801.5
|
142 |
+
|
143 |
+
Keys of a dictionary are aligned to the DataFrame, based on column names;
|
144 |
+
each value in the dictionary is added to the corresponding column.
|
145 |
+
|
146 |
+
>>> df[['height', 'weight']] + {'height': 0.5, 'weight': 1.5}
|
147 |
+
height weight
|
148 |
+
elk 2.0 501.5
|
149 |
+
moose 3.1 801.5
|
150 |
+
|
151 |
+
When `other` is a :class:`Series`, the index of `other` is aligned with the
|
152 |
+
columns of the DataFrame.
|
153 |
+
|
154 |
+
>>> s1 = pd.Series([0.5, 1.5], index=['weight', 'height'])
|
155 |
+
>>> df[['height', 'weight']] + s1
|
156 |
+
height weight
|
157 |
+
elk 3.0 500.5
|
158 |
+
moose 4.1 800.5
|
159 |
+
|
160 |
+
Even when the index of `other` is the same as the index of the DataFrame,
|
161 |
+
the :class:`Series` will not be reoriented. If index-wise alignment is desired,
|
162 |
+
:meth:`DataFrame.add` should be used with `axis='index'`.
|
163 |
+
|
164 |
+
>>> s2 = pd.Series([0.5, 1.5], index=['elk', 'moose'])
|
165 |
+
>>> df[['height', 'weight']] + s2
|
166 |
+
elk height moose weight
|
167 |
+
elk NaN NaN NaN NaN
|
168 |
+
moose NaN NaN NaN NaN
|
169 |
+
|
170 |
+
>>> df[['height', 'weight']].add(s2, axis='index')
|
171 |
+
height weight
|
172 |
+
elk 2.0 500.5
|
173 |
+
moose 4.1 801.5
|
174 |
+
|
175 |
+
When `other` is a :class:`DataFrame`, both columns names and the
|
176 |
+
index are aligned.
|
177 |
+
|
178 |
+
>>> other = pd.DataFrame({'height': [0.2, 0.4, 0.6]},
|
179 |
+
... index=['elk', 'moose', 'deer'])
|
180 |
+
>>> df[['height', 'weight']] + other
|
181 |
+
height weight
|
182 |
+
deer NaN NaN
|
183 |
+
elk 1.7 NaN
|
184 |
+
moose 3.0 NaN
|
185 |
+
"""
|
186 |
+
return self._arith_method(other, operator.add)
|
187 |
+
|
188 |
+
@unpack_zerodim_and_defer("__radd__")
|
189 |
+
def __radd__(self, other):
|
190 |
+
return self._arith_method(other, roperator.radd)
|
191 |
+
|
192 |
+
@unpack_zerodim_and_defer("__sub__")
|
193 |
+
def __sub__(self, other):
|
194 |
+
return self._arith_method(other, operator.sub)
|
195 |
+
|
196 |
+
@unpack_zerodim_and_defer("__rsub__")
|
197 |
+
def __rsub__(self, other):
|
198 |
+
return self._arith_method(other, roperator.rsub)
|
199 |
+
|
200 |
+
@unpack_zerodim_and_defer("__mul__")
|
201 |
+
def __mul__(self, other):
|
202 |
+
return self._arith_method(other, operator.mul)
|
203 |
+
|
204 |
+
@unpack_zerodim_and_defer("__rmul__")
|
205 |
+
def __rmul__(self, other):
|
206 |
+
return self._arith_method(other, roperator.rmul)
|
207 |
+
|
208 |
+
@unpack_zerodim_and_defer("__truediv__")
|
209 |
+
def __truediv__(self, other):
|
210 |
+
return self._arith_method(other, operator.truediv)
|
211 |
+
|
212 |
+
@unpack_zerodim_and_defer("__rtruediv__")
|
213 |
+
def __rtruediv__(self, other):
|
214 |
+
return self._arith_method(other, roperator.rtruediv)
|
215 |
+
|
216 |
+
@unpack_zerodim_and_defer("__floordiv__")
|
217 |
+
def __floordiv__(self, other):
|
218 |
+
return self._arith_method(other, operator.floordiv)
|
219 |
+
|
220 |
+
@unpack_zerodim_and_defer("__rfloordiv")
|
221 |
+
def __rfloordiv__(self, other):
|
222 |
+
return self._arith_method(other, roperator.rfloordiv)
|
223 |
+
|
224 |
+
@unpack_zerodim_and_defer("__mod__")
|
225 |
+
def __mod__(self, other):
|
226 |
+
return self._arith_method(other, operator.mod)
|
227 |
+
|
228 |
+
@unpack_zerodim_and_defer("__rmod__")
|
229 |
+
def __rmod__(self, other):
|
230 |
+
return self._arith_method(other, roperator.rmod)
|
231 |
+
|
232 |
+
@unpack_zerodim_and_defer("__divmod__")
|
233 |
+
def __divmod__(self, other):
|
234 |
+
return self._arith_method(other, divmod)
|
235 |
+
|
236 |
+
@unpack_zerodim_and_defer("__rdivmod__")
|
237 |
+
def __rdivmod__(self, other):
|
238 |
+
return self._arith_method(other, roperator.rdivmod)
|
239 |
+
|
240 |
+
@unpack_zerodim_and_defer("__pow__")
|
241 |
+
def __pow__(self, other):
|
242 |
+
return self._arith_method(other, operator.pow)
|
243 |
+
|
244 |
+
@unpack_zerodim_and_defer("__rpow__")
|
245 |
+
def __rpow__(self, other):
|
246 |
+
return self._arith_method(other, roperator.rpow)
|
247 |
+
|
248 |
+
|
249 |
+
# -----------------------------------------------------------------------------
|
250 |
+
# Helpers to implement __array_ufunc__
|
251 |
+
|
252 |
+
|
253 |
+
def array_ufunc(self, ufunc: np.ufunc, method: str, *inputs: Any, **kwargs: Any):
|
254 |
+
"""
|
255 |
+
Compatibility with numpy ufuncs.
|
256 |
+
|
257 |
+
See also
|
258 |
+
--------
|
259 |
+
numpy.org/doc/stable/reference/arrays.classes.html#numpy.class.__array_ufunc__
|
260 |
+
"""
|
261 |
+
from pandas.core.frame import (
|
262 |
+
DataFrame,
|
263 |
+
Series,
|
264 |
+
)
|
265 |
+
from pandas.core.generic import NDFrame
|
266 |
+
from pandas.core.internals import (
|
267 |
+
ArrayManager,
|
268 |
+
BlockManager,
|
269 |
+
)
|
270 |
+
|
271 |
+
cls = type(self)
|
272 |
+
|
273 |
+
kwargs = _standardize_out_kwarg(**kwargs)
|
274 |
+
|
275 |
+
# for binary ops, use our custom dunder methods
|
276 |
+
result = maybe_dispatch_ufunc_to_dunder_op(self, ufunc, method, *inputs, **kwargs)
|
277 |
+
if result is not NotImplemented:
|
278 |
+
return result
|
279 |
+
|
280 |
+
# Determine if we should defer.
|
281 |
+
no_defer = (
|
282 |
+
np.ndarray.__array_ufunc__,
|
283 |
+
cls.__array_ufunc__,
|
284 |
+
)
|
285 |
+
|
286 |
+
for item in inputs:
|
287 |
+
higher_priority = (
|
288 |
+
hasattr(item, "__array_priority__")
|
289 |
+
and item.__array_priority__ > self.__array_priority__
|
290 |
+
)
|
291 |
+
has_array_ufunc = (
|
292 |
+
hasattr(item, "__array_ufunc__")
|
293 |
+
and type(item).__array_ufunc__ not in no_defer
|
294 |
+
and not isinstance(item, self._HANDLED_TYPES)
|
295 |
+
)
|
296 |
+
if higher_priority or has_array_ufunc:
|
297 |
+
return NotImplemented
|
298 |
+
|
299 |
+
# align all the inputs.
|
300 |
+
types = tuple(type(x) for x in inputs)
|
301 |
+
alignable = [x for x, t in zip(inputs, types) if issubclass(t, NDFrame)]
|
302 |
+
|
303 |
+
if len(alignable) > 1:
|
304 |
+
# This triggers alignment.
|
305 |
+
# At the moment, there aren't any ufuncs with more than two inputs
|
306 |
+
# so this ends up just being x1.index | x2.index, but we write
|
307 |
+
# it to handle *args.
|
308 |
+
set_types = set(types)
|
309 |
+
if len(set_types) > 1 and {DataFrame, Series}.issubset(set_types):
|
310 |
+
# We currently don't handle ufunc(DataFrame, Series)
|
311 |
+
# well. Previously this raised an internal ValueError. We might
|
312 |
+
# support it someday, so raise a NotImplementedError.
|
313 |
+
raise NotImplementedError(
|
314 |
+
f"Cannot apply ufunc {ufunc} to mixed DataFrame and Series inputs."
|
315 |
+
)
|
316 |
+
axes = self.axes
|
317 |
+
for obj in alignable[1:]:
|
318 |
+
# this relies on the fact that we aren't handling mixed
|
319 |
+
# series / frame ufuncs.
|
320 |
+
for i, (ax1, ax2) in enumerate(zip(axes, obj.axes)):
|
321 |
+
axes[i] = ax1.union(ax2)
|
322 |
+
|
323 |
+
reconstruct_axes = dict(zip(self._AXIS_ORDERS, axes))
|
324 |
+
inputs = tuple(
|
325 |
+
x.reindex(**reconstruct_axes) if issubclass(t, NDFrame) else x
|
326 |
+
for x, t in zip(inputs, types)
|
327 |
+
)
|
328 |
+
else:
|
329 |
+
reconstruct_axes = dict(zip(self._AXIS_ORDERS, self.axes))
|
330 |
+
|
331 |
+
if self.ndim == 1:
|
332 |
+
names = [getattr(x, "name") for x in inputs if hasattr(x, "name")]
|
333 |
+
name = names[0] if len(set(names)) == 1 else None
|
334 |
+
reconstruct_kwargs = {"name": name}
|
335 |
+
else:
|
336 |
+
reconstruct_kwargs = {}
|
337 |
+
|
338 |
+
def reconstruct(result):
|
339 |
+
if ufunc.nout > 1:
|
340 |
+
# np.modf, np.frexp, np.divmod
|
341 |
+
return tuple(_reconstruct(x) for x in result)
|
342 |
+
|
343 |
+
return _reconstruct(result)
|
344 |
+
|
345 |
+
def _reconstruct(result):
|
346 |
+
if lib.is_scalar(result):
|
347 |
+
return result
|
348 |
+
|
349 |
+
if result.ndim != self.ndim:
|
350 |
+
if method == "outer":
|
351 |
+
raise NotImplementedError
|
352 |
+
return result
|
353 |
+
if isinstance(result, (BlockManager, ArrayManager)):
|
354 |
+
# we went through BlockManager.apply e.g. np.sqrt
|
355 |
+
result = self._constructor_from_mgr(result, axes=result.axes)
|
356 |
+
else:
|
357 |
+
# we converted an array, lost our axes
|
358 |
+
result = self._constructor(
|
359 |
+
result, **reconstruct_axes, **reconstruct_kwargs, copy=False
|
360 |
+
)
|
361 |
+
# TODO: When we support multiple values in __finalize__, this
|
362 |
+
# should pass alignable to `__finalize__` instead of self.
|
363 |
+
# Then `np.add(a, b)` would consider attrs from both a and b
|
364 |
+
# when a and b are NDFrames.
|
365 |
+
if len(alignable) == 1:
|
366 |
+
result = result.__finalize__(self)
|
367 |
+
return result
|
368 |
+
|
369 |
+
if "out" in kwargs:
|
370 |
+
# e.g. test_multiindex_get_loc
|
371 |
+
result = dispatch_ufunc_with_out(self, ufunc, method, *inputs, **kwargs)
|
372 |
+
return reconstruct(result)
|
373 |
+
|
374 |
+
if method == "reduce":
|
375 |
+
# e.g. test.series.test_ufunc.test_reduce
|
376 |
+
result = dispatch_reduction_ufunc(self, ufunc, method, *inputs, **kwargs)
|
377 |
+
if result is not NotImplemented:
|
378 |
+
return result
|
379 |
+
|
380 |
+
# We still get here with kwargs `axis` for e.g. np.maximum.accumulate
|
381 |
+
# and `dtype` and `keepdims` for np.ptp
|
382 |
+
|
383 |
+
if self.ndim > 1 and (len(inputs) > 1 or ufunc.nout > 1):
|
384 |
+
# Just give up on preserving types in the complex case.
|
385 |
+
# In theory we could preserve them for them.
|
386 |
+
# * nout>1 is doable if BlockManager.apply took nout and
|
387 |
+
# returned a Tuple[BlockManager].
|
388 |
+
# * len(inputs) > 1 is doable when we know that we have
|
389 |
+
# aligned blocks / dtypes.
|
390 |
+
|
391 |
+
# e.g. my_ufunc, modf, logaddexp, heaviside, subtract, add
|
392 |
+
inputs = tuple(np.asarray(x) for x in inputs)
|
393 |
+
# Note: we can't use default_array_ufunc here bc reindexing means
|
394 |
+
# that `self` may not be among `inputs`
|
395 |
+
result = getattr(ufunc, method)(*inputs, **kwargs)
|
396 |
+
elif self.ndim == 1:
|
397 |
+
# ufunc(series, ...)
|
398 |
+
inputs = tuple(extract_array(x, extract_numpy=True) for x in inputs)
|
399 |
+
result = getattr(ufunc, method)(*inputs, **kwargs)
|
400 |
+
else:
|
401 |
+
# ufunc(dataframe)
|
402 |
+
if method == "__call__" and not kwargs:
|
403 |
+
# for np.<ufunc>(..) calls
|
404 |
+
# kwargs cannot necessarily be handled block-by-block, so only
|
405 |
+
# take this path if there are no kwargs
|
406 |
+
mgr = inputs[0]._mgr
|
407 |
+
result = mgr.apply(getattr(ufunc, method))
|
408 |
+
else:
|
409 |
+
# otherwise specific ufunc methods (eg np.<ufunc>.accumulate(..))
|
410 |
+
# Those can have an axis keyword and thus can't be called block-by-block
|
411 |
+
result = default_array_ufunc(inputs[0], ufunc, method, *inputs, **kwargs)
|
412 |
+
# e.g. np.negative (only one reached), with "where" and "out" in kwargs
|
413 |
+
|
414 |
+
result = reconstruct(result)
|
415 |
+
return result
|
416 |
+
|
417 |
+
|
418 |
+
def _standardize_out_kwarg(**kwargs) -> dict:
|
419 |
+
"""
|
420 |
+
If kwargs contain "out1" and "out2", replace that with a tuple "out"
|
421 |
+
|
422 |
+
np.divmod, np.modf, np.frexp can have either `out=(out1, out2)` or
|
423 |
+
`out1=out1, out2=out2)`
|
424 |
+
"""
|
425 |
+
if "out" not in kwargs and "out1" in kwargs and "out2" in kwargs:
|
426 |
+
out1 = kwargs.pop("out1")
|
427 |
+
out2 = kwargs.pop("out2")
|
428 |
+
out = (out1, out2)
|
429 |
+
kwargs["out"] = out
|
430 |
+
return kwargs
|
431 |
+
|
432 |
+
|
433 |
+
def dispatch_ufunc_with_out(self, ufunc: np.ufunc, method: str, *inputs, **kwargs):
|
434 |
+
"""
|
435 |
+
If we have an `out` keyword, then call the ufunc without `out` and then
|
436 |
+
set the result into the given `out`.
|
437 |
+
"""
|
438 |
+
|
439 |
+
# Note: we assume _standardize_out_kwarg has already been called.
|
440 |
+
out = kwargs.pop("out")
|
441 |
+
where = kwargs.pop("where", None)
|
442 |
+
|
443 |
+
result = getattr(ufunc, method)(*inputs, **kwargs)
|
444 |
+
|
445 |
+
if result is NotImplemented:
|
446 |
+
return NotImplemented
|
447 |
+
|
448 |
+
if isinstance(result, tuple):
|
449 |
+
# i.e. np.divmod, np.modf, np.frexp
|
450 |
+
if not isinstance(out, tuple) or len(out) != len(result):
|
451 |
+
raise NotImplementedError
|
452 |
+
|
453 |
+
for arr, res in zip(out, result):
|
454 |
+
_assign_where(arr, res, where)
|
455 |
+
|
456 |
+
return out
|
457 |
+
|
458 |
+
if isinstance(out, tuple):
|
459 |
+
if len(out) == 1:
|
460 |
+
out = out[0]
|
461 |
+
else:
|
462 |
+
raise NotImplementedError
|
463 |
+
|
464 |
+
_assign_where(out, result, where)
|
465 |
+
return out
|
466 |
+
|
467 |
+
|
468 |
+
def _assign_where(out, result, where) -> None:
|
469 |
+
"""
|
470 |
+
Set a ufunc result into 'out', masking with a 'where' argument if necessary.
|
471 |
+
"""
|
472 |
+
if where is None:
|
473 |
+
# no 'where' arg passed to ufunc
|
474 |
+
out[:] = result
|
475 |
+
else:
|
476 |
+
np.putmask(out, where, result)
|
477 |
+
|
478 |
+
|
479 |
+
def default_array_ufunc(self, ufunc: np.ufunc, method: str, *inputs, **kwargs):
|
480 |
+
"""
|
481 |
+
Fallback to the behavior we would get if we did not define __array_ufunc__.
|
482 |
+
|
483 |
+
Notes
|
484 |
+
-----
|
485 |
+
We are assuming that `self` is among `inputs`.
|
486 |
+
"""
|
487 |
+
if not any(x is self for x in inputs):
|
488 |
+
raise NotImplementedError
|
489 |
+
|
490 |
+
new_inputs = [x if x is not self else np.asarray(x) for x in inputs]
|
491 |
+
|
492 |
+
return getattr(ufunc, method)(*new_inputs, **kwargs)
|
493 |
+
|
494 |
+
|
495 |
+
def dispatch_reduction_ufunc(self, ufunc: np.ufunc, method: str, *inputs, **kwargs):
|
496 |
+
"""
|
497 |
+
Dispatch ufunc reductions to self's reduction methods.
|
498 |
+
"""
|
499 |
+
assert method == "reduce"
|
500 |
+
|
501 |
+
if len(inputs) != 1 or inputs[0] is not self:
|
502 |
+
return NotImplemented
|
503 |
+
|
504 |
+
if ufunc.__name__ not in REDUCTION_ALIASES:
|
505 |
+
return NotImplemented
|
506 |
+
|
507 |
+
method_name = REDUCTION_ALIASES[ufunc.__name__]
|
508 |
+
|
509 |
+
# NB: we are assuming that min/max represent minimum/maximum methods,
|
510 |
+
# which would not be accurate for e.g. Timestamp.min
|
511 |
+
if not hasattr(self, method_name):
|
512 |
+
return NotImplemented
|
513 |
+
|
514 |
+
if self.ndim > 1:
|
515 |
+
if isinstance(self, ABCNDFrame):
|
516 |
+
# TODO: test cases where this doesn't hold, i.e. 2D DTA/TDA
|
517 |
+
kwargs["numeric_only"] = False
|
518 |
+
|
519 |
+
if "axis" not in kwargs:
|
520 |
+
# For DataFrame reductions we don't want the default axis=0
|
521 |
+
# Note: np.min is not a ufunc, but uses array_function_dispatch,
|
522 |
+
# so calls DataFrame.min (without ever getting here) with the np.min
|
523 |
+
# default of axis=None, which DataFrame.min catches and changes to axis=0.
|
524 |
+
# np.minimum.reduce(df) gets here bc axis is not in kwargs,
|
525 |
+
# so we set axis=0 to match the behaviorof np.minimum.reduce(df.values)
|
526 |
+
kwargs["axis"] = 0
|
527 |
+
|
528 |
+
# By default, numpy's reductions do not skip NaNs, so we have to
|
529 |
+
# pass skipna=False
|
530 |
+
return getattr(self, method_name)(skipna=False, **kwargs)
|
llmeval-env/lib/python3.10/site-packages/pandas/core/base.py
ADDED
@@ -0,0 +1,1391 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
"""
|
2 |
+
Base and utility classes for pandas objects.
|
3 |
+
"""
|
4 |
+
|
5 |
+
from __future__ import annotations
|
6 |
+
|
7 |
+
import textwrap
|
8 |
+
from typing import (
|
9 |
+
TYPE_CHECKING,
|
10 |
+
Any,
|
11 |
+
Generic,
|
12 |
+
Literal,
|
13 |
+
cast,
|
14 |
+
final,
|
15 |
+
overload,
|
16 |
+
)
|
17 |
+
import warnings
|
18 |
+
|
19 |
+
import numpy as np
|
20 |
+
|
21 |
+
from pandas._config import using_copy_on_write
|
22 |
+
|
23 |
+
from pandas._libs import lib
|
24 |
+
from pandas._typing import (
|
25 |
+
AxisInt,
|
26 |
+
DtypeObj,
|
27 |
+
IndexLabel,
|
28 |
+
NDFrameT,
|
29 |
+
Self,
|
30 |
+
Shape,
|
31 |
+
npt,
|
32 |
+
)
|
33 |
+
from pandas.compat import PYPY
|
34 |
+
from pandas.compat.numpy import function as nv
|
35 |
+
from pandas.errors import AbstractMethodError
|
36 |
+
from pandas.util._decorators import (
|
37 |
+
cache_readonly,
|
38 |
+
doc,
|
39 |
+
)
|
40 |
+
from pandas.util._exceptions import find_stack_level
|
41 |
+
|
42 |
+
from pandas.core.dtypes.cast import can_hold_element
|
43 |
+
from pandas.core.dtypes.common import (
|
44 |
+
is_object_dtype,
|
45 |
+
is_scalar,
|
46 |
+
)
|
47 |
+
from pandas.core.dtypes.dtypes import ExtensionDtype
|
48 |
+
from pandas.core.dtypes.generic import (
|
49 |
+
ABCDataFrame,
|
50 |
+
ABCIndex,
|
51 |
+
ABCSeries,
|
52 |
+
)
|
53 |
+
from pandas.core.dtypes.missing import (
|
54 |
+
isna,
|
55 |
+
remove_na_arraylike,
|
56 |
+
)
|
57 |
+
|
58 |
+
from pandas.core import (
|
59 |
+
algorithms,
|
60 |
+
nanops,
|
61 |
+
ops,
|
62 |
+
)
|
63 |
+
from pandas.core.accessor import DirNamesMixin
|
64 |
+
from pandas.core.arraylike import OpsMixin
|
65 |
+
from pandas.core.arrays import ExtensionArray
|
66 |
+
from pandas.core.construction import (
|
67 |
+
ensure_wrapped_if_datetimelike,
|
68 |
+
extract_array,
|
69 |
+
)
|
70 |
+
|
71 |
+
if TYPE_CHECKING:
|
72 |
+
from collections.abc import (
|
73 |
+
Hashable,
|
74 |
+
Iterator,
|
75 |
+
)
|
76 |
+
|
77 |
+
from pandas._typing import (
|
78 |
+
DropKeep,
|
79 |
+
NumpySorter,
|
80 |
+
NumpyValueArrayLike,
|
81 |
+
ScalarLike_co,
|
82 |
+
)
|
83 |
+
|
84 |
+
from pandas import (
|
85 |
+
DataFrame,
|
86 |
+
Index,
|
87 |
+
Series,
|
88 |
+
)
|
89 |
+
|
90 |
+
|
91 |
+
_shared_docs: dict[str, str] = {}
|
92 |
+
_indexops_doc_kwargs = {
|
93 |
+
"klass": "IndexOpsMixin",
|
94 |
+
"inplace": "",
|
95 |
+
"unique": "IndexOpsMixin",
|
96 |
+
"duplicated": "IndexOpsMixin",
|
97 |
+
}
|
98 |
+
|
99 |
+
|
100 |
+
class PandasObject(DirNamesMixin):
|
101 |
+
"""
|
102 |
+
Baseclass for various pandas objects.
|
103 |
+
"""
|
104 |
+
|
105 |
+
# results from calls to methods decorated with cache_readonly get added to _cache
|
106 |
+
_cache: dict[str, Any]
|
107 |
+
|
108 |
+
@property
|
109 |
+
def _constructor(self):
|
110 |
+
"""
|
111 |
+
Class constructor (for this class it's just `__class__`).
|
112 |
+
"""
|
113 |
+
return type(self)
|
114 |
+
|
115 |
+
def __repr__(self) -> str:
|
116 |
+
"""
|
117 |
+
Return a string representation for a particular object.
|
118 |
+
"""
|
119 |
+
# Should be overwritten by base classes
|
120 |
+
return object.__repr__(self)
|
121 |
+
|
122 |
+
def _reset_cache(self, key: str | None = None) -> None:
|
123 |
+
"""
|
124 |
+
Reset cached properties. If ``key`` is passed, only clears that key.
|
125 |
+
"""
|
126 |
+
if not hasattr(self, "_cache"):
|
127 |
+
return
|
128 |
+
if key is None:
|
129 |
+
self._cache.clear()
|
130 |
+
else:
|
131 |
+
self._cache.pop(key, None)
|
132 |
+
|
133 |
+
def __sizeof__(self) -> int:
|
134 |
+
"""
|
135 |
+
Generates the total memory usage for an object that returns
|
136 |
+
either a value or Series of values
|
137 |
+
"""
|
138 |
+
memory_usage = getattr(self, "memory_usage", None)
|
139 |
+
if memory_usage:
|
140 |
+
mem = memory_usage(deep=True) # pylint: disable=not-callable
|
141 |
+
return int(mem if is_scalar(mem) else mem.sum())
|
142 |
+
|
143 |
+
# no memory_usage attribute, so fall back to object's 'sizeof'
|
144 |
+
return super().__sizeof__()
|
145 |
+
|
146 |
+
|
147 |
+
class NoNewAttributesMixin:
|
148 |
+
"""
|
149 |
+
Mixin which prevents adding new attributes.
|
150 |
+
|
151 |
+
Prevents additional attributes via xxx.attribute = "something" after a
|
152 |
+
call to `self.__freeze()`. Mainly used to prevent the user from using
|
153 |
+
wrong attributes on an accessor (`Series.cat/.str/.dt`).
|
154 |
+
|
155 |
+
If you really want to add a new attribute at a later time, you need to use
|
156 |
+
`object.__setattr__(self, key, value)`.
|
157 |
+
"""
|
158 |
+
|
159 |
+
def _freeze(self) -> None:
|
160 |
+
"""
|
161 |
+
Prevents setting additional attributes.
|
162 |
+
"""
|
163 |
+
object.__setattr__(self, "__frozen", True)
|
164 |
+
|
165 |
+
# prevent adding any attribute via s.xxx.new_attribute = ...
|
166 |
+
def __setattr__(self, key: str, value) -> None:
|
167 |
+
# _cache is used by a decorator
|
168 |
+
# We need to check both 1.) cls.__dict__ and 2.) getattr(self, key)
|
169 |
+
# because
|
170 |
+
# 1.) getattr is false for attributes that raise errors
|
171 |
+
# 2.) cls.__dict__ doesn't traverse into base classes
|
172 |
+
if getattr(self, "__frozen", False) and not (
|
173 |
+
key == "_cache"
|
174 |
+
or key in type(self).__dict__
|
175 |
+
or getattr(self, key, None) is not None
|
176 |
+
):
|
177 |
+
raise AttributeError(f"You cannot add any new attribute '{key}'")
|
178 |
+
object.__setattr__(self, key, value)
|
179 |
+
|
180 |
+
|
181 |
+
class SelectionMixin(Generic[NDFrameT]):
|
182 |
+
"""
|
183 |
+
mixin implementing the selection & aggregation interface on a group-like
|
184 |
+
object sub-classes need to define: obj, exclusions
|
185 |
+
"""
|
186 |
+
|
187 |
+
obj: NDFrameT
|
188 |
+
_selection: IndexLabel | None = None
|
189 |
+
exclusions: frozenset[Hashable]
|
190 |
+
_internal_names = ["_cache", "__setstate__"]
|
191 |
+
_internal_names_set = set(_internal_names)
|
192 |
+
|
193 |
+
@final
|
194 |
+
@property
|
195 |
+
def _selection_list(self):
|
196 |
+
if not isinstance(
|
197 |
+
self._selection, (list, tuple, ABCSeries, ABCIndex, np.ndarray)
|
198 |
+
):
|
199 |
+
return [self._selection]
|
200 |
+
return self._selection
|
201 |
+
|
202 |
+
@cache_readonly
|
203 |
+
def _selected_obj(self):
|
204 |
+
if self._selection is None or isinstance(self.obj, ABCSeries):
|
205 |
+
return self.obj
|
206 |
+
else:
|
207 |
+
return self.obj[self._selection]
|
208 |
+
|
209 |
+
@final
|
210 |
+
@cache_readonly
|
211 |
+
def ndim(self) -> int:
|
212 |
+
return self._selected_obj.ndim
|
213 |
+
|
214 |
+
@final
|
215 |
+
@cache_readonly
|
216 |
+
def _obj_with_exclusions(self):
|
217 |
+
if isinstance(self.obj, ABCSeries):
|
218 |
+
return self.obj
|
219 |
+
|
220 |
+
if self._selection is not None:
|
221 |
+
return self.obj._getitem_nocopy(self._selection_list)
|
222 |
+
|
223 |
+
if len(self.exclusions) > 0:
|
224 |
+
# equivalent to `self.obj.drop(self.exclusions, axis=1)
|
225 |
+
# but this avoids consolidating and making a copy
|
226 |
+
# TODO: following GH#45287 can we now use .drop directly without
|
227 |
+
# making a copy?
|
228 |
+
return self.obj._drop_axis(self.exclusions, axis=1, only_slice=True)
|
229 |
+
else:
|
230 |
+
return self.obj
|
231 |
+
|
232 |
+
def __getitem__(self, key):
|
233 |
+
if self._selection is not None:
|
234 |
+
raise IndexError(f"Column(s) {self._selection} already selected")
|
235 |
+
|
236 |
+
if isinstance(key, (list, tuple, ABCSeries, ABCIndex, np.ndarray)):
|
237 |
+
if len(self.obj.columns.intersection(key)) != len(set(key)):
|
238 |
+
bad_keys = list(set(key).difference(self.obj.columns))
|
239 |
+
raise KeyError(f"Columns not found: {str(bad_keys)[1:-1]}")
|
240 |
+
return self._gotitem(list(key), ndim=2)
|
241 |
+
|
242 |
+
else:
|
243 |
+
if key not in self.obj:
|
244 |
+
raise KeyError(f"Column not found: {key}")
|
245 |
+
ndim = self.obj[key].ndim
|
246 |
+
return self._gotitem(key, ndim=ndim)
|
247 |
+
|
248 |
+
def _gotitem(self, key, ndim: int, subset=None):
|
249 |
+
"""
|
250 |
+
sub-classes to define
|
251 |
+
return a sliced object
|
252 |
+
|
253 |
+
Parameters
|
254 |
+
----------
|
255 |
+
key : str / list of selections
|
256 |
+
ndim : {1, 2}
|
257 |
+
requested ndim of result
|
258 |
+
subset : object, default None
|
259 |
+
subset to act on
|
260 |
+
"""
|
261 |
+
raise AbstractMethodError(self)
|
262 |
+
|
263 |
+
@final
|
264 |
+
def _infer_selection(self, key, subset: Series | DataFrame):
|
265 |
+
"""
|
266 |
+
Infer the `selection` to pass to our constructor in _gotitem.
|
267 |
+
"""
|
268 |
+
# Shared by Rolling and Resample
|
269 |
+
selection = None
|
270 |
+
if subset.ndim == 2 and (
|
271 |
+
(lib.is_scalar(key) and key in subset) or lib.is_list_like(key)
|
272 |
+
):
|
273 |
+
selection = key
|
274 |
+
elif subset.ndim == 1 and lib.is_scalar(key) and key == subset.name:
|
275 |
+
selection = key
|
276 |
+
return selection
|
277 |
+
|
278 |
+
def aggregate(self, func, *args, **kwargs):
|
279 |
+
raise AbstractMethodError(self)
|
280 |
+
|
281 |
+
agg = aggregate
|
282 |
+
|
283 |
+
|
284 |
+
class IndexOpsMixin(OpsMixin):
|
285 |
+
"""
|
286 |
+
Common ops mixin to support a unified interface / docs for Series / Index
|
287 |
+
"""
|
288 |
+
|
289 |
+
# ndarray compatibility
|
290 |
+
__array_priority__ = 1000
|
291 |
+
_hidden_attrs: frozenset[str] = frozenset(
|
292 |
+
["tolist"] # tolist is not deprecated, just suppressed in the __dir__
|
293 |
+
)
|
294 |
+
|
295 |
+
@property
|
296 |
+
def dtype(self) -> DtypeObj:
|
297 |
+
# must be defined here as a property for mypy
|
298 |
+
raise AbstractMethodError(self)
|
299 |
+
|
300 |
+
@property
|
301 |
+
def _values(self) -> ExtensionArray | np.ndarray:
|
302 |
+
# must be defined here as a property for mypy
|
303 |
+
raise AbstractMethodError(self)
|
304 |
+
|
305 |
+
@final
|
306 |
+
def transpose(self, *args, **kwargs) -> Self:
|
307 |
+
"""
|
308 |
+
Return the transpose, which is by definition self.
|
309 |
+
|
310 |
+
Returns
|
311 |
+
-------
|
312 |
+
%(klass)s
|
313 |
+
"""
|
314 |
+
nv.validate_transpose(args, kwargs)
|
315 |
+
return self
|
316 |
+
|
317 |
+
T = property(
|
318 |
+
transpose,
|
319 |
+
doc="""
|
320 |
+
Return the transpose, which is by definition self.
|
321 |
+
|
322 |
+
Examples
|
323 |
+
--------
|
324 |
+
For Series:
|
325 |
+
|
326 |
+
>>> s = pd.Series(['Ant', 'Bear', 'Cow'])
|
327 |
+
>>> s
|
328 |
+
0 Ant
|
329 |
+
1 Bear
|
330 |
+
2 Cow
|
331 |
+
dtype: object
|
332 |
+
>>> s.T
|
333 |
+
0 Ant
|
334 |
+
1 Bear
|
335 |
+
2 Cow
|
336 |
+
dtype: object
|
337 |
+
|
338 |
+
For Index:
|
339 |
+
|
340 |
+
>>> idx = pd.Index([1, 2, 3])
|
341 |
+
>>> idx.T
|
342 |
+
Index([1, 2, 3], dtype='int64')
|
343 |
+
""",
|
344 |
+
)
|
345 |
+
|
346 |
+
@property
|
347 |
+
def shape(self) -> Shape:
|
348 |
+
"""
|
349 |
+
Return a tuple of the shape of the underlying data.
|
350 |
+
|
351 |
+
Examples
|
352 |
+
--------
|
353 |
+
>>> s = pd.Series([1, 2, 3])
|
354 |
+
>>> s.shape
|
355 |
+
(3,)
|
356 |
+
"""
|
357 |
+
return self._values.shape
|
358 |
+
|
359 |
+
def __len__(self) -> int:
|
360 |
+
# We need this defined here for mypy
|
361 |
+
raise AbstractMethodError(self)
|
362 |
+
|
363 |
+
@property
|
364 |
+
def ndim(self) -> Literal[1]:
|
365 |
+
"""
|
366 |
+
Number of dimensions of the underlying data, by definition 1.
|
367 |
+
|
368 |
+
Examples
|
369 |
+
--------
|
370 |
+
>>> s = pd.Series(['Ant', 'Bear', 'Cow'])
|
371 |
+
>>> s
|
372 |
+
0 Ant
|
373 |
+
1 Bear
|
374 |
+
2 Cow
|
375 |
+
dtype: object
|
376 |
+
>>> s.ndim
|
377 |
+
1
|
378 |
+
|
379 |
+
For Index:
|
380 |
+
|
381 |
+
>>> idx = pd.Index([1, 2, 3])
|
382 |
+
>>> idx
|
383 |
+
Index([1, 2, 3], dtype='int64')
|
384 |
+
>>> idx.ndim
|
385 |
+
1
|
386 |
+
"""
|
387 |
+
return 1
|
388 |
+
|
389 |
+
@final
|
390 |
+
def item(self):
|
391 |
+
"""
|
392 |
+
Return the first element of the underlying data as a Python scalar.
|
393 |
+
|
394 |
+
Returns
|
395 |
+
-------
|
396 |
+
scalar
|
397 |
+
The first element of Series or Index.
|
398 |
+
|
399 |
+
Raises
|
400 |
+
------
|
401 |
+
ValueError
|
402 |
+
If the data is not length = 1.
|
403 |
+
|
404 |
+
Examples
|
405 |
+
--------
|
406 |
+
>>> s = pd.Series([1])
|
407 |
+
>>> s.item()
|
408 |
+
1
|
409 |
+
|
410 |
+
For an index:
|
411 |
+
|
412 |
+
>>> s = pd.Series([1], index=['a'])
|
413 |
+
>>> s.index.item()
|
414 |
+
'a'
|
415 |
+
"""
|
416 |
+
if len(self) == 1:
|
417 |
+
return next(iter(self))
|
418 |
+
raise ValueError("can only convert an array of size 1 to a Python scalar")
|
419 |
+
|
420 |
+
@property
|
421 |
+
def nbytes(self) -> int:
|
422 |
+
"""
|
423 |
+
Return the number of bytes in the underlying data.
|
424 |
+
|
425 |
+
Examples
|
426 |
+
--------
|
427 |
+
For Series:
|
428 |
+
|
429 |
+
>>> s = pd.Series(['Ant', 'Bear', 'Cow'])
|
430 |
+
>>> s
|
431 |
+
0 Ant
|
432 |
+
1 Bear
|
433 |
+
2 Cow
|
434 |
+
dtype: object
|
435 |
+
>>> s.nbytes
|
436 |
+
24
|
437 |
+
|
438 |
+
For Index:
|
439 |
+
|
440 |
+
>>> idx = pd.Index([1, 2, 3])
|
441 |
+
>>> idx
|
442 |
+
Index([1, 2, 3], dtype='int64')
|
443 |
+
>>> idx.nbytes
|
444 |
+
24
|
445 |
+
"""
|
446 |
+
return self._values.nbytes
|
447 |
+
|
448 |
+
@property
|
449 |
+
def size(self) -> int:
|
450 |
+
"""
|
451 |
+
Return the number of elements in the underlying data.
|
452 |
+
|
453 |
+
Examples
|
454 |
+
--------
|
455 |
+
For Series:
|
456 |
+
|
457 |
+
>>> s = pd.Series(['Ant', 'Bear', 'Cow'])
|
458 |
+
>>> s
|
459 |
+
0 Ant
|
460 |
+
1 Bear
|
461 |
+
2 Cow
|
462 |
+
dtype: object
|
463 |
+
>>> s.size
|
464 |
+
3
|
465 |
+
|
466 |
+
For Index:
|
467 |
+
|
468 |
+
>>> idx = pd.Index([1, 2, 3])
|
469 |
+
>>> idx
|
470 |
+
Index([1, 2, 3], dtype='int64')
|
471 |
+
>>> idx.size
|
472 |
+
3
|
473 |
+
"""
|
474 |
+
return len(self._values)
|
475 |
+
|
476 |
+
@property
|
477 |
+
def array(self) -> ExtensionArray:
|
478 |
+
"""
|
479 |
+
The ExtensionArray of the data backing this Series or Index.
|
480 |
+
|
481 |
+
Returns
|
482 |
+
-------
|
483 |
+
ExtensionArray
|
484 |
+
An ExtensionArray of the values stored within. For extension
|
485 |
+
types, this is the actual array. For NumPy native types, this
|
486 |
+
is a thin (no copy) wrapper around :class:`numpy.ndarray`.
|
487 |
+
|
488 |
+
``.array`` differs from ``.values``, which may require converting
|
489 |
+
the data to a different form.
|
490 |
+
|
491 |
+
See Also
|
492 |
+
--------
|
493 |
+
Index.to_numpy : Similar method that always returns a NumPy array.
|
494 |
+
Series.to_numpy : Similar method that always returns a NumPy array.
|
495 |
+
|
496 |
+
Notes
|
497 |
+
-----
|
498 |
+
This table lays out the different array types for each extension
|
499 |
+
dtype within pandas.
|
500 |
+
|
501 |
+
================== =============================
|
502 |
+
dtype array type
|
503 |
+
================== =============================
|
504 |
+
category Categorical
|
505 |
+
period PeriodArray
|
506 |
+
interval IntervalArray
|
507 |
+
IntegerNA IntegerArray
|
508 |
+
string StringArray
|
509 |
+
boolean BooleanArray
|
510 |
+
datetime64[ns, tz] DatetimeArray
|
511 |
+
================== =============================
|
512 |
+
|
513 |
+
For any 3rd-party extension types, the array type will be an
|
514 |
+
ExtensionArray.
|
515 |
+
|
516 |
+
For all remaining dtypes ``.array`` will be a
|
517 |
+
:class:`arrays.NumpyExtensionArray` wrapping the actual ndarray
|
518 |
+
stored within. If you absolutely need a NumPy array (possibly with
|
519 |
+
copying / coercing data), then use :meth:`Series.to_numpy` instead.
|
520 |
+
|
521 |
+
Examples
|
522 |
+
--------
|
523 |
+
For regular NumPy types like int, and float, a NumpyExtensionArray
|
524 |
+
is returned.
|
525 |
+
|
526 |
+
>>> pd.Series([1, 2, 3]).array
|
527 |
+
<NumpyExtensionArray>
|
528 |
+
[1, 2, 3]
|
529 |
+
Length: 3, dtype: int64
|
530 |
+
|
531 |
+
For extension types, like Categorical, the actual ExtensionArray
|
532 |
+
is returned
|
533 |
+
|
534 |
+
>>> ser = pd.Series(pd.Categorical(['a', 'b', 'a']))
|
535 |
+
>>> ser.array
|
536 |
+
['a', 'b', 'a']
|
537 |
+
Categories (2, object): ['a', 'b']
|
538 |
+
"""
|
539 |
+
raise AbstractMethodError(self)
|
540 |
+
|
541 |
+
@final
|
542 |
+
def to_numpy(
|
543 |
+
self,
|
544 |
+
dtype: npt.DTypeLike | None = None,
|
545 |
+
copy: bool = False,
|
546 |
+
na_value: object = lib.no_default,
|
547 |
+
**kwargs,
|
548 |
+
) -> np.ndarray:
|
549 |
+
"""
|
550 |
+
A NumPy ndarray representing the values in this Series or Index.
|
551 |
+
|
552 |
+
Parameters
|
553 |
+
----------
|
554 |
+
dtype : str or numpy.dtype, optional
|
555 |
+
The dtype to pass to :meth:`numpy.asarray`.
|
556 |
+
copy : bool, default False
|
557 |
+
Whether to ensure that the returned value is not a view on
|
558 |
+
another array. Note that ``copy=False`` does not *ensure* that
|
559 |
+
``to_numpy()`` is no-copy. Rather, ``copy=True`` ensure that
|
560 |
+
a copy is made, even if not strictly necessary.
|
561 |
+
na_value : Any, optional
|
562 |
+
The value to use for missing values. The default value depends
|
563 |
+
on `dtype` and the type of the array.
|
564 |
+
**kwargs
|
565 |
+
Additional keywords passed through to the ``to_numpy`` method
|
566 |
+
of the underlying array (for extension arrays).
|
567 |
+
|
568 |
+
Returns
|
569 |
+
-------
|
570 |
+
numpy.ndarray
|
571 |
+
|
572 |
+
See Also
|
573 |
+
--------
|
574 |
+
Series.array : Get the actual data stored within.
|
575 |
+
Index.array : Get the actual data stored within.
|
576 |
+
DataFrame.to_numpy : Similar method for DataFrame.
|
577 |
+
|
578 |
+
Notes
|
579 |
+
-----
|
580 |
+
The returned array will be the same up to equality (values equal
|
581 |
+
in `self` will be equal in the returned array; likewise for values
|
582 |
+
that are not equal). When `self` contains an ExtensionArray, the
|
583 |
+
dtype may be different. For example, for a category-dtype Series,
|
584 |
+
``to_numpy()`` will return a NumPy array and the categorical dtype
|
585 |
+
will be lost.
|
586 |
+
|
587 |
+
For NumPy dtypes, this will be a reference to the actual data stored
|
588 |
+
in this Series or Index (assuming ``copy=False``). Modifying the result
|
589 |
+
in place will modify the data stored in the Series or Index (not that
|
590 |
+
we recommend doing that).
|
591 |
+
|
592 |
+
For extension types, ``to_numpy()`` *may* require copying data and
|
593 |
+
coercing the result to a NumPy type (possibly object), which may be
|
594 |
+
expensive. When you need a no-copy reference to the underlying data,
|
595 |
+
:attr:`Series.array` should be used instead.
|
596 |
+
|
597 |
+
This table lays out the different dtypes and default return types of
|
598 |
+
``to_numpy()`` for various dtypes within pandas.
|
599 |
+
|
600 |
+
================== ================================
|
601 |
+
dtype array type
|
602 |
+
================== ================================
|
603 |
+
category[T] ndarray[T] (same dtype as input)
|
604 |
+
period ndarray[object] (Periods)
|
605 |
+
interval ndarray[object] (Intervals)
|
606 |
+
IntegerNA ndarray[object]
|
607 |
+
datetime64[ns] datetime64[ns]
|
608 |
+
datetime64[ns, tz] ndarray[object] (Timestamps)
|
609 |
+
================== ================================
|
610 |
+
|
611 |
+
Examples
|
612 |
+
--------
|
613 |
+
>>> ser = pd.Series(pd.Categorical(['a', 'b', 'a']))
|
614 |
+
>>> ser.to_numpy()
|
615 |
+
array(['a', 'b', 'a'], dtype=object)
|
616 |
+
|
617 |
+
Specify the `dtype` to control how datetime-aware data is represented.
|
618 |
+
Use ``dtype=object`` to return an ndarray of pandas :class:`Timestamp`
|
619 |
+
objects, each with the correct ``tz``.
|
620 |
+
|
621 |
+
>>> ser = pd.Series(pd.date_range('2000', periods=2, tz="CET"))
|
622 |
+
>>> ser.to_numpy(dtype=object)
|
623 |
+
array([Timestamp('2000-01-01 00:00:00+0100', tz='CET'),
|
624 |
+
Timestamp('2000-01-02 00:00:00+0100', tz='CET')],
|
625 |
+
dtype=object)
|
626 |
+
|
627 |
+
Or ``dtype='datetime64[ns]'`` to return an ndarray of native
|
628 |
+
datetime64 values. The values are converted to UTC and the timezone
|
629 |
+
info is dropped.
|
630 |
+
|
631 |
+
>>> ser.to_numpy(dtype="datetime64[ns]")
|
632 |
+
... # doctest: +ELLIPSIS
|
633 |
+
array(['1999-12-31T23:00:00.000000000', '2000-01-01T23:00:00...'],
|
634 |
+
dtype='datetime64[ns]')
|
635 |
+
"""
|
636 |
+
if isinstance(self.dtype, ExtensionDtype):
|
637 |
+
return self.array.to_numpy(dtype, copy=copy, na_value=na_value, **kwargs)
|
638 |
+
elif kwargs:
|
639 |
+
bad_keys = next(iter(kwargs.keys()))
|
640 |
+
raise TypeError(
|
641 |
+
f"to_numpy() got an unexpected keyword argument '{bad_keys}'"
|
642 |
+
)
|
643 |
+
|
644 |
+
fillna = (
|
645 |
+
na_value is not lib.no_default
|
646 |
+
# no need to fillna with np.nan if we already have a float dtype
|
647 |
+
and not (na_value is np.nan and np.issubdtype(self.dtype, np.floating))
|
648 |
+
)
|
649 |
+
|
650 |
+
values = self._values
|
651 |
+
if fillna:
|
652 |
+
if not can_hold_element(values, na_value):
|
653 |
+
# if we can't hold the na_value asarray either makes a copy or we
|
654 |
+
# error before modifying values. The asarray later on thus won't make
|
655 |
+
# another copy
|
656 |
+
values = np.asarray(values, dtype=dtype)
|
657 |
+
else:
|
658 |
+
values = values.copy()
|
659 |
+
|
660 |
+
values[np.asanyarray(isna(self))] = na_value
|
661 |
+
|
662 |
+
result = np.asarray(values, dtype=dtype)
|
663 |
+
|
664 |
+
if (copy and not fillna) or (not copy and using_copy_on_write()):
|
665 |
+
if np.shares_memory(self._values[:2], result[:2]):
|
666 |
+
# Take slices to improve performance of check
|
667 |
+
if using_copy_on_write() and not copy:
|
668 |
+
result = result.view()
|
669 |
+
result.flags.writeable = False
|
670 |
+
else:
|
671 |
+
result = result.copy()
|
672 |
+
|
673 |
+
return result
|
674 |
+
|
675 |
+
@final
|
676 |
+
@property
|
677 |
+
def empty(self) -> bool:
|
678 |
+
return not self.size
|
679 |
+
|
680 |
+
@doc(op="max", oppose="min", value="largest")
|
681 |
+
def argmax(
|
682 |
+
self, axis: AxisInt | None = None, skipna: bool = True, *args, **kwargs
|
683 |
+
) -> int:
|
684 |
+
"""
|
685 |
+
Return int position of the {value} value in the Series.
|
686 |
+
|
687 |
+
If the {op}imum is achieved in multiple locations,
|
688 |
+
the first row position is returned.
|
689 |
+
|
690 |
+
Parameters
|
691 |
+
----------
|
692 |
+
axis : {{None}}
|
693 |
+
Unused. Parameter needed for compatibility with DataFrame.
|
694 |
+
skipna : bool, default True
|
695 |
+
Exclude NA/null values when showing the result.
|
696 |
+
*args, **kwargs
|
697 |
+
Additional arguments and keywords for compatibility with NumPy.
|
698 |
+
|
699 |
+
Returns
|
700 |
+
-------
|
701 |
+
int
|
702 |
+
Row position of the {op}imum value.
|
703 |
+
|
704 |
+
See Also
|
705 |
+
--------
|
706 |
+
Series.arg{op} : Return position of the {op}imum value.
|
707 |
+
Series.arg{oppose} : Return position of the {oppose}imum value.
|
708 |
+
numpy.ndarray.arg{op} : Equivalent method for numpy arrays.
|
709 |
+
Series.idxmax : Return index label of the maximum values.
|
710 |
+
Series.idxmin : Return index label of the minimum values.
|
711 |
+
|
712 |
+
Examples
|
713 |
+
--------
|
714 |
+
Consider dataset containing cereal calories
|
715 |
+
|
716 |
+
>>> s = pd.Series({{'Corn Flakes': 100.0, 'Almond Delight': 110.0,
|
717 |
+
... 'Cinnamon Toast Crunch': 120.0, 'Cocoa Puff': 110.0}})
|
718 |
+
>>> s
|
719 |
+
Corn Flakes 100.0
|
720 |
+
Almond Delight 110.0
|
721 |
+
Cinnamon Toast Crunch 120.0
|
722 |
+
Cocoa Puff 110.0
|
723 |
+
dtype: float64
|
724 |
+
|
725 |
+
>>> s.argmax()
|
726 |
+
2
|
727 |
+
>>> s.argmin()
|
728 |
+
0
|
729 |
+
|
730 |
+
The maximum cereal calories is the third element and
|
731 |
+
the minimum cereal calories is the first element,
|
732 |
+
since series is zero-indexed.
|
733 |
+
"""
|
734 |
+
delegate = self._values
|
735 |
+
nv.validate_minmax_axis(axis)
|
736 |
+
skipna = nv.validate_argmax_with_skipna(skipna, args, kwargs)
|
737 |
+
|
738 |
+
if isinstance(delegate, ExtensionArray):
|
739 |
+
if not skipna and delegate.isna().any():
|
740 |
+
warnings.warn(
|
741 |
+
f"The behavior of {type(self).__name__}.argmax/argmin "
|
742 |
+
"with skipna=False and NAs, or with all-NAs is deprecated. "
|
743 |
+
"In a future version this will raise ValueError.",
|
744 |
+
FutureWarning,
|
745 |
+
stacklevel=find_stack_level(),
|
746 |
+
)
|
747 |
+
return -1
|
748 |
+
else:
|
749 |
+
return delegate.argmax()
|
750 |
+
else:
|
751 |
+
result = nanops.nanargmax(delegate, skipna=skipna)
|
752 |
+
if result == -1:
|
753 |
+
warnings.warn(
|
754 |
+
f"The behavior of {type(self).__name__}.argmax/argmin "
|
755 |
+
"with skipna=False and NAs, or with all-NAs is deprecated. "
|
756 |
+
"In a future version this will raise ValueError.",
|
757 |
+
FutureWarning,
|
758 |
+
stacklevel=find_stack_level(),
|
759 |
+
)
|
760 |
+
# error: Incompatible return value type (got "Union[int, ndarray]", expected
|
761 |
+
# "int")
|
762 |
+
return result # type: ignore[return-value]
|
763 |
+
|
764 |
+
@doc(argmax, op="min", oppose="max", value="smallest")
|
765 |
+
def argmin(
|
766 |
+
self, axis: AxisInt | None = None, skipna: bool = True, *args, **kwargs
|
767 |
+
) -> int:
|
768 |
+
delegate = self._values
|
769 |
+
nv.validate_minmax_axis(axis)
|
770 |
+
skipna = nv.validate_argmin_with_skipna(skipna, args, kwargs)
|
771 |
+
|
772 |
+
if isinstance(delegate, ExtensionArray):
|
773 |
+
if not skipna and delegate.isna().any():
|
774 |
+
warnings.warn(
|
775 |
+
f"The behavior of {type(self).__name__}.argmax/argmin "
|
776 |
+
"with skipna=False and NAs, or with all-NAs is deprecated. "
|
777 |
+
"In a future version this will raise ValueError.",
|
778 |
+
FutureWarning,
|
779 |
+
stacklevel=find_stack_level(),
|
780 |
+
)
|
781 |
+
return -1
|
782 |
+
else:
|
783 |
+
return delegate.argmin()
|
784 |
+
else:
|
785 |
+
result = nanops.nanargmin(delegate, skipna=skipna)
|
786 |
+
if result == -1:
|
787 |
+
warnings.warn(
|
788 |
+
f"The behavior of {type(self).__name__}.argmax/argmin "
|
789 |
+
"with skipna=False and NAs, or with all-NAs is deprecated. "
|
790 |
+
"In a future version this will raise ValueError.",
|
791 |
+
FutureWarning,
|
792 |
+
stacklevel=find_stack_level(),
|
793 |
+
)
|
794 |
+
# error: Incompatible return value type (got "Union[int, ndarray]", expected
|
795 |
+
# "int")
|
796 |
+
return result # type: ignore[return-value]
|
797 |
+
|
798 |
+
def tolist(self):
|
799 |
+
"""
|
800 |
+
Return a list of the values.
|
801 |
+
|
802 |
+
These are each a scalar type, which is a Python scalar
|
803 |
+
(for str, int, float) or a pandas scalar
|
804 |
+
(for Timestamp/Timedelta/Interval/Period)
|
805 |
+
|
806 |
+
Returns
|
807 |
+
-------
|
808 |
+
list
|
809 |
+
|
810 |
+
See Also
|
811 |
+
--------
|
812 |
+
numpy.ndarray.tolist : Return the array as an a.ndim-levels deep
|
813 |
+
nested list of Python scalars.
|
814 |
+
|
815 |
+
Examples
|
816 |
+
--------
|
817 |
+
For Series
|
818 |
+
|
819 |
+
>>> s = pd.Series([1, 2, 3])
|
820 |
+
>>> s.to_list()
|
821 |
+
[1, 2, 3]
|
822 |
+
|
823 |
+
For Index:
|
824 |
+
|
825 |
+
>>> idx = pd.Index([1, 2, 3])
|
826 |
+
>>> idx
|
827 |
+
Index([1, 2, 3], dtype='int64')
|
828 |
+
|
829 |
+
>>> idx.to_list()
|
830 |
+
[1, 2, 3]
|
831 |
+
"""
|
832 |
+
return self._values.tolist()
|
833 |
+
|
834 |
+
to_list = tolist
|
835 |
+
|
836 |
+
def __iter__(self) -> Iterator:
|
837 |
+
"""
|
838 |
+
Return an iterator of the values.
|
839 |
+
|
840 |
+
These are each a scalar type, which is a Python scalar
|
841 |
+
(for str, int, float) or a pandas scalar
|
842 |
+
(for Timestamp/Timedelta/Interval/Period)
|
843 |
+
|
844 |
+
Returns
|
845 |
+
-------
|
846 |
+
iterator
|
847 |
+
|
848 |
+
Examples
|
849 |
+
--------
|
850 |
+
>>> s = pd.Series([1, 2, 3])
|
851 |
+
>>> for x in s:
|
852 |
+
... print(x)
|
853 |
+
1
|
854 |
+
2
|
855 |
+
3
|
856 |
+
"""
|
857 |
+
# We are explicitly making element iterators.
|
858 |
+
if not isinstance(self._values, np.ndarray):
|
859 |
+
# Check type instead of dtype to catch DTA/TDA
|
860 |
+
return iter(self._values)
|
861 |
+
else:
|
862 |
+
return map(self._values.item, range(self._values.size))
|
863 |
+
|
864 |
+
@cache_readonly
|
865 |
+
def hasnans(self) -> bool:
|
866 |
+
"""
|
867 |
+
Return True if there are any NaNs.
|
868 |
+
|
869 |
+
Enables various performance speedups.
|
870 |
+
|
871 |
+
Returns
|
872 |
+
-------
|
873 |
+
bool
|
874 |
+
|
875 |
+
Examples
|
876 |
+
--------
|
877 |
+
>>> s = pd.Series([1, 2, 3, None])
|
878 |
+
>>> s
|
879 |
+
0 1.0
|
880 |
+
1 2.0
|
881 |
+
2 3.0
|
882 |
+
3 NaN
|
883 |
+
dtype: float64
|
884 |
+
>>> s.hasnans
|
885 |
+
True
|
886 |
+
"""
|
887 |
+
# error: Item "bool" of "Union[bool, ndarray[Any, dtype[bool_]], NDFrame]"
|
888 |
+
# has no attribute "any"
|
889 |
+
return bool(isna(self).any()) # type: ignore[union-attr]
|
890 |
+
|
891 |
+
@final
|
892 |
+
def _map_values(self, mapper, na_action=None, convert: bool = True):
|
893 |
+
"""
|
894 |
+
An internal function that maps values using the input
|
895 |
+
correspondence (which can be a dict, Series, or function).
|
896 |
+
|
897 |
+
Parameters
|
898 |
+
----------
|
899 |
+
mapper : function, dict, or Series
|
900 |
+
The input correspondence object
|
901 |
+
na_action : {None, 'ignore'}
|
902 |
+
If 'ignore', propagate NA values, without passing them to the
|
903 |
+
mapping function
|
904 |
+
convert : bool, default True
|
905 |
+
Try to find better dtype for elementwise function results. If
|
906 |
+
False, leave as dtype=object. Note that the dtype is always
|
907 |
+
preserved for some extension array dtypes, such as Categorical.
|
908 |
+
|
909 |
+
Returns
|
910 |
+
-------
|
911 |
+
Union[Index, MultiIndex], inferred
|
912 |
+
The output of the mapping function applied to the index.
|
913 |
+
If the function returns a tuple with more than one element
|
914 |
+
a MultiIndex will be returned.
|
915 |
+
"""
|
916 |
+
arr = self._values
|
917 |
+
|
918 |
+
if isinstance(arr, ExtensionArray):
|
919 |
+
return arr.map(mapper, na_action=na_action)
|
920 |
+
|
921 |
+
return algorithms.map_array(arr, mapper, na_action=na_action, convert=convert)
|
922 |
+
|
923 |
+
@final
|
924 |
+
def value_counts(
|
925 |
+
self,
|
926 |
+
normalize: bool = False,
|
927 |
+
sort: bool = True,
|
928 |
+
ascending: bool = False,
|
929 |
+
bins=None,
|
930 |
+
dropna: bool = True,
|
931 |
+
) -> Series:
|
932 |
+
"""
|
933 |
+
Return a Series containing counts of unique values.
|
934 |
+
|
935 |
+
The resulting object will be in descending order so that the
|
936 |
+
first element is the most frequently-occurring element.
|
937 |
+
Excludes NA values by default.
|
938 |
+
|
939 |
+
Parameters
|
940 |
+
----------
|
941 |
+
normalize : bool, default False
|
942 |
+
If True then the object returned will contain the relative
|
943 |
+
frequencies of the unique values.
|
944 |
+
sort : bool, default True
|
945 |
+
Sort by frequencies when True. Preserve the order of the data when False.
|
946 |
+
ascending : bool, default False
|
947 |
+
Sort in ascending order.
|
948 |
+
bins : int, optional
|
949 |
+
Rather than count values, group them into half-open bins,
|
950 |
+
a convenience for ``pd.cut``, only works with numeric data.
|
951 |
+
dropna : bool, default True
|
952 |
+
Don't include counts of NaN.
|
953 |
+
|
954 |
+
Returns
|
955 |
+
-------
|
956 |
+
Series
|
957 |
+
|
958 |
+
See Also
|
959 |
+
--------
|
960 |
+
Series.count: Number of non-NA elements in a Series.
|
961 |
+
DataFrame.count: Number of non-NA elements in a DataFrame.
|
962 |
+
DataFrame.value_counts: Equivalent method on DataFrames.
|
963 |
+
|
964 |
+
Examples
|
965 |
+
--------
|
966 |
+
>>> index = pd.Index([3, 1, 2, 3, 4, np.nan])
|
967 |
+
>>> index.value_counts()
|
968 |
+
3.0 2
|
969 |
+
1.0 1
|
970 |
+
2.0 1
|
971 |
+
4.0 1
|
972 |
+
Name: count, dtype: int64
|
973 |
+
|
974 |
+
With `normalize` set to `True`, returns the relative frequency by
|
975 |
+
dividing all values by the sum of values.
|
976 |
+
|
977 |
+
>>> s = pd.Series([3, 1, 2, 3, 4, np.nan])
|
978 |
+
>>> s.value_counts(normalize=True)
|
979 |
+
3.0 0.4
|
980 |
+
1.0 0.2
|
981 |
+
2.0 0.2
|
982 |
+
4.0 0.2
|
983 |
+
Name: proportion, dtype: float64
|
984 |
+
|
985 |
+
**bins**
|
986 |
+
|
987 |
+
Bins can be useful for going from a continuous variable to a
|
988 |
+
categorical variable; instead of counting unique
|
989 |
+
apparitions of values, divide the index in the specified
|
990 |
+
number of half-open bins.
|
991 |
+
|
992 |
+
>>> s.value_counts(bins=3)
|
993 |
+
(0.996, 2.0] 2
|
994 |
+
(2.0, 3.0] 2
|
995 |
+
(3.0, 4.0] 1
|
996 |
+
Name: count, dtype: int64
|
997 |
+
|
998 |
+
**dropna**
|
999 |
+
|
1000 |
+
With `dropna` set to `False` we can also see NaN index values.
|
1001 |
+
|
1002 |
+
>>> s.value_counts(dropna=False)
|
1003 |
+
3.0 2
|
1004 |
+
1.0 1
|
1005 |
+
2.0 1
|
1006 |
+
4.0 1
|
1007 |
+
NaN 1
|
1008 |
+
Name: count, dtype: int64
|
1009 |
+
"""
|
1010 |
+
return algorithms.value_counts_internal(
|
1011 |
+
self,
|
1012 |
+
sort=sort,
|
1013 |
+
ascending=ascending,
|
1014 |
+
normalize=normalize,
|
1015 |
+
bins=bins,
|
1016 |
+
dropna=dropna,
|
1017 |
+
)
|
1018 |
+
|
1019 |
+
def unique(self):
|
1020 |
+
values = self._values
|
1021 |
+
if not isinstance(values, np.ndarray):
|
1022 |
+
# i.e. ExtensionArray
|
1023 |
+
result = values.unique()
|
1024 |
+
else:
|
1025 |
+
result = algorithms.unique1d(values)
|
1026 |
+
return result
|
1027 |
+
|
1028 |
+
@final
|
1029 |
+
def nunique(self, dropna: bool = True) -> int:
|
1030 |
+
"""
|
1031 |
+
Return number of unique elements in the object.
|
1032 |
+
|
1033 |
+
Excludes NA values by default.
|
1034 |
+
|
1035 |
+
Parameters
|
1036 |
+
----------
|
1037 |
+
dropna : bool, default True
|
1038 |
+
Don't include NaN in the count.
|
1039 |
+
|
1040 |
+
Returns
|
1041 |
+
-------
|
1042 |
+
int
|
1043 |
+
|
1044 |
+
See Also
|
1045 |
+
--------
|
1046 |
+
DataFrame.nunique: Method nunique for DataFrame.
|
1047 |
+
Series.count: Count non-NA/null observations in the Series.
|
1048 |
+
|
1049 |
+
Examples
|
1050 |
+
--------
|
1051 |
+
>>> s = pd.Series([1, 3, 5, 7, 7])
|
1052 |
+
>>> s
|
1053 |
+
0 1
|
1054 |
+
1 3
|
1055 |
+
2 5
|
1056 |
+
3 7
|
1057 |
+
4 7
|
1058 |
+
dtype: int64
|
1059 |
+
|
1060 |
+
>>> s.nunique()
|
1061 |
+
4
|
1062 |
+
"""
|
1063 |
+
uniqs = self.unique()
|
1064 |
+
if dropna:
|
1065 |
+
uniqs = remove_na_arraylike(uniqs)
|
1066 |
+
return len(uniqs)
|
1067 |
+
|
1068 |
+
@property
|
1069 |
+
def is_unique(self) -> bool:
|
1070 |
+
"""
|
1071 |
+
Return boolean if values in the object are unique.
|
1072 |
+
|
1073 |
+
Returns
|
1074 |
+
-------
|
1075 |
+
bool
|
1076 |
+
|
1077 |
+
Examples
|
1078 |
+
--------
|
1079 |
+
>>> s = pd.Series([1, 2, 3])
|
1080 |
+
>>> s.is_unique
|
1081 |
+
True
|
1082 |
+
|
1083 |
+
>>> s = pd.Series([1, 2, 3, 1])
|
1084 |
+
>>> s.is_unique
|
1085 |
+
False
|
1086 |
+
"""
|
1087 |
+
return self.nunique(dropna=False) == len(self)
|
1088 |
+
|
1089 |
+
@property
|
1090 |
+
def is_monotonic_increasing(self) -> bool:
|
1091 |
+
"""
|
1092 |
+
Return boolean if values in the object are monotonically increasing.
|
1093 |
+
|
1094 |
+
Returns
|
1095 |
+
-------
|
1096 |
+
bool
|
1097 |
+
|
1098 |
+
Examples
|
1099 |
+
--------
|
1100 |
+
>>> s = pd.Series([1, 2, 2])
|
1101 |
+
>>> s.is_monotonic_increasing
|
1102 |
+
True
|
1103 |
+
|
1104 |
+
>>> s = pd.Series([3, 2, 1])
|
1105 |
+
>>> s.is_monotonic_increasing
|
1106 |
+
False
|
1107 |
+
"""
|
1108 |
+
from pandas import Index
|
1109 |
+
|
1110 |
+
return Index(self).is_monotonic_increasing
|
1111 |
+
|
1112 |
+
@property
|
1113 |
+
def is_monotonic_decreasing(self) -> bool:
|
1114 |
+
"""
|
1115 |
+
Return boolean if values in the object are monotonically decreasing.
|
1116 |
+
|
1117 |
+
Returns
|
1118 |
+
-------
|
1119 |
+
bool
|
1120 |
+
|
1121 |
+
Examples
|
1122 |
+
--------
|
1123 |
+
>>> s = pd.Series([3, 2, 2, 1])
|
1124 |
+
>>> s.is_monotonic_decreasing
|
1125 |
+
True
|
1126 |
+
|
1127 |
+
>>> s = pd.Series([1, 2, 3])
|
1128 |
+
>>> s.is_monotonic_decreasing
|
1129 |
+
False
|
1130 |
+
"""
|
1131 |
+
from pandas import Index
|
1132 |
+
|
1133 |
+
return Index(self).is_monotonic_decreasing
|
1134 |
+
|
1135 |
+
@final
|
1136 |
+
def _memory_usage(self, deep: bool = False) -> int:
|
1137 |
+
"""
|
1138 |
+
Memory usage of the values.
|
1139 |
+
|
1140 |
+
Parameters
|
1141 |
+
----------
|
1142 |
+
deep : bool, default False
|
1143 |
+
Introspect the data deeply, interrogate
|
1144 |
+
`object` dtypes for system-level memory consumption.
|
1145 |
+
|
1146 |
+
Returns
|
1147 |
+
-------
|
1148 |
+
bytes used
|
1149 |
+
|
1150 |
+
See Also
|
1151 |
+
--------
|
1152 |
+
numpy.ndarray.nbytes : Total bytes consumed by the elements of the
|
1153 |
+
array.
|
1154 |
+
|
1155 |
+
Notes
|
1156 |
+
-----
|
1157 |
+
Memory usage does not include memory consumed by elements that
|
1158 |
+
are not components of the array if deep=False or if used on PyPy
|
1159 |
+
|
1160 |
+
Examples
|
1161 |
+
--------
|
1162 |
+
>>> idx = pd.Index([1, 2, 3])
|
1163 |
+
>>> idx.memory_usage()
|
1164 |
+
24
|
1165 |
+
"""
|
1166 |
+
if hasattr(self.array, "memory_usage"):
|
1167 |
+
return self.array.memory_usage( # pyright: ignore[reportGeneralTypeIssues]
|
1168 |
+
deep=deep,
|
1169 |
+
)
|
1170 |
+
|
1171 |
+
v = self.array.nbytes
|
1172 |
+
if deep and is_object_dtype(self.dtype) and not PYPY:
|
1173 |
+
values = cast(np.ndarray, self._values)
|
1174 |
+
v += lib.memory_usage_of_objects(values)
|
1175 |
+
return v
|
1176 |
+
|
1177 |
+
@doc(
|
1178 |
+
algorithms.factorize,
|
1179 |
+
values="",
|
1180 |
+
order="",
|
1181 |
+
size_hint="",
|
1182 |
+
sort=textwrap.dedent(
|
1183 |
+
"""\
|
1184 |
+
sort : bool, default False
|
1185 |
+
Sort `uniques` and shuffle `codes` to maintain the
|
1186 |
+
relationship.
|
1187 |
+
"""
|
1188 |
+
),
|
1189 |
+
)
|
1190 |
+
def factorize(
|
1191 |
+
self,
|
1192 |
+
sort: bool = False,
|
1193 |
+
use_na_sentinel: bool = True,
|
1194 |
+
) -> tuple[npt.NDArray[np.intp], Index]:
|
1195 |
+
codes, uniques = algorithms.factorize(
|
1196 |
+
self._values, sort=sort, use_na_sentinel=use_na_sentinel
|
1197 |
+
)
|
1198 |
+
if uniques.dtype == np.float16:
|
1199 |
+
uniques = uniques.astype(np.float32)
|
1200 |
+
|
1201 |
+
if isinstance(self, ABCIndex):
|
1202 |
+
# preserve e.g. MultiIndex
|
1203 |
+
uniques = self._constructor(uniques)
|
1204 |
+
else:
|
1205 |
+
from pandas import Index
|
1206 |
+
|
1207 |
+
uniques = Index(uniques)
|
1208 |
+
return codes, uniques
|
1209 |
+
|
1210 |
+
_shared_docs[
|
1211 |
+
"searchsorted"
|
1212 |
+
] = """
|
1213 |
+
Find indices where elements should be inserted to maintain order.
|
1214 |
+
|
1215 |
+
Find the indices into a sorted {klass} `self` such that, if the
|
1216 |
+
corresponding elements in `value` were inserted before the indices,
|
1217 |
+
the order of `self` would be preserved.
|
1218 |
+
|
1219 |
+
.. note::
|
1220 |
+
|
1221 |
+
The {klass} *must* be monotonically sorted, otherwise
|
1222 |
+
wrong locations will likely be returned. Pandas does *not*
|
1223 |
+
check this for you.
|
1224 |
+
|
1225 |
+
Parameters
|
1226 |
+
----------
|
1227 |
+
value : array-like or scalar
|
1228 |
+
Values to insert into `self`.
|
1229 |
+
side : {{'left', 'right'}}, optional
|
1230 |
+
If 'left', the index of the first suitable location found is given.
|
1231 |
+
If 'right', return the last such index. If there is no suitable
|
1232 |
+
index, return either 0 or N (where N is the length of `self`).
|
1233 |
+
sorter : 1-D array-like, optional
|
1234 |
+
Optional array of integer indices that sort `self` into ascending
|
1235 |
+
order. They are typically the result of ``np.argsort``.
|
1236 |
+
|
1237 |
+
Returns
|
1238 |
+
-------
|
1239 |
+
int or array of int
|
1240 |
+
A scalar or array of insertion points with the
|
1241 |
+
same shape as `value`.
|
1242 |
+
|
1243 |
+
See Also
|
1244 |
+
--------
|
1245 |
+
sort_values : Sort by the values along either axis.
|
1246 |
+
numpy.searchsorted : Similar method from NumPy.
|
1247 |
+
|
1248 |
+
Notes
|
1249 |
+
-----
|
1250 |
+
Binary search is used to find the required insertion points.
|
1251 |
+
|
1252 |
+
Examples
|
1253 |
+
--------
|
1254 |
+
>>> ser = pd.Series([1, 2, 3])
|
1255 |
+
>>> ser
|
1256 |
+
0 1
|
1257 |
+
1 2
|
1258 |
+
2 3
|
1259 |
+
dtype: int64
|
1260 |
+
|
1261 |
+
>>> ser.searchsorted(4)
|
1262 |
+
3
|
1263 |
+
|
1264 |
+
>>> ser.searchsorted([0, 4])
|
1265 |
+
array([0, 3])
|
1266 |
+
|
1267 |
+
>>> ser.searchsorted([1, 3], side='left')
|
1268 |
+
array([0, 2])
|
1269 |
+
|
1270 |
+
>>> ser.searchsorted([1, 3], side='right')
|
1271 |
+
array([1, 3])
|
1272 |
+
|
1273 |
+
>>> ser = pd.Series(pd.to_datetime(['3/11/2000', '3/12/2000', '3/13/2000']))
|
1274 |
+
>>> ser
|
1275 |
+
0 2000-03-11
|
1276 |
+
1 2000-03-12
|
1277 |
+
2 2000-03-13
|
1278 |
+
dtype: datetime64[ns]
|
1279 |
+
|
1280 |
+
>>> ser.searchsorted('3/14/2000')
|
1281 |
+
3
|
1282 |
+
|
1283 |
+
>>> ser = pd.Categorical(
|
1284 |
+
... ['apple', 'bread', 'bread', 'cheese', 'milk'], ordered=True
|
1285 |
+
... )
|
1286 |
+
>>> ser
|
1287 |
+
['apple', 'bread', 'bread', 'cheese', 'milk']
|
1288 |
+
Categories (4, object): ['apple' < 'bread' < 'cheese' < 'milk']
|
1289 |
+
|
1290 |
+
>>> ser.searchsorted('bread')
|
1291 |
+
1
|
1292 |
+
|
1293 |
+
>>> ser.searchsorted(['bread'], side='right')
|
1294 |
+
array([3])
|
1295 |
+
|
1296 |
+
If the values are not monotonically sorted, wrong locations
|
1297 |
+
may be returned:
|
1298 |
+
|
1299 |
+
>>> ser = pd.Series([2, 1, 3])
|
1300 |
+
>>> ser
|
1301 |
+
0 2
|
1302 |
+
1 1
|
1303 |
+
2 3
|
1304 |
+
dtype: int64
|
1305 |
+
|
1306 |
+
>>> ser.searchsorted(1) # doctest: +SKIP
|
1307 |
+
0 # wrong result, correct would be 1
|
1308 |
+
"""
|
1309 |
+
|
1310 |
+
# This overload is needed so that the call to searchsorted in
|
1311 |
+
# pandas.core.resample.TimeGrouper._get_period_bins picks the correct result
|
1312 |
+
|
1313 |
+
# error: Overloaded function signatures 1 and 2 overlap with incompatible
|
1314 |
+
# return types
|
1315 |
+
@overload
|
1316 |
+
def searchsorted( # type: ignore[overload-overlap]
|
1317 |
+
self,
|
1318 |
+
value: ScalarLike_co,
|
1319 |
+
side: Literal["left", "right"] = ...,
|
1320 |
+
sorter: NumpySorter = ...,
|
1321 |
+
) -> np.intp:
|
1322 |
+
...
|
1323 |
+
|
1324 |
+
@overload
|
1325 |
+
def searchsorted(
|
1326 |
+
self,
|
1327 |
+
value: npt.ArrayLike | ExtensionArray,
|
1328 |
+
side: Literal["left", "right"] = ...,
|
1329 |
+
sorter: NumpySorter = ...,
|
1330 |
+
) -> npt.NDArray[np.intp]:
|
1331 |
+
...
|
1332 |
+
|
1333 |
+
@doc(_shared_docs["searchsorted"], klass="Index")
|
1334 |
+
def searchsorted(
|
1335 |
+
self,
|
1336 |
+
value: NumpyValueArrayLike | ExtensionArray,
|
1337 |
+
side: Literal["left", "right"] = "left",
|
1338 |
+
sorter: NumpySorter | None = None,
|
1339 |
+
) -> npt.NDArray[np.intp] | np.intp:
|
1340 |
+
if isinstance(value, ABCDataFrame):
|
1341 |
+
msg = (
|
1342 |
+
"Value must be 1-D array-like or scalar, "
|
1343 |
+
f"{type(value).__name__} is not supported"
|
1344 |
+
)
|
1345 |
+
raise ValueError(msg)
|
1346 |
+
|
1347 |
+
values = self._values
|
1348 |
+
if not isinstance(values, np.ndarray):
|
1349 |
+
# Going through EA.searchsorted directly improves performance GH#38083
|
1350 |
+
return values.searchsorted(value, side=side, sorter=sorter)
|
1351 |
+
|
1352 |
+
return algorithms.searchsorted(
|
1353 |
+
values,
|
1354 |
+
value,
|
1355 |
+
side=side,
|
1356 |
+
sorter=sorter,
|
1357 |
+
)
|
1358 |
+
|
1359 |
+
def drop_duplicates(self, *, keep: DropKeep = "first"):
|
1360 |
+
duplicated = self._duplicated(keep=keep)
|
1361 |
+
# error: Value of type "IndexOpsMixin" is not indexable
|
1362 |
+
return self[~duplicated] # type: ignore[index]
|
1363 |
+
|
1364 |
+
@final
|
1365 |
+
def _duplicated(self, keep: DropKeep = "first") -> npt.NDArray[np.bool_]:
|
1366 |
+
arr = self._values
|
1367 |
+
if isinstance(arr, ExtensionArray):
|
1368 |
+
return arr.duplicated(keep=keep)
|
1369 |
+
return algorithms.duplicated(arr, keep=keep)
|
1370 |
+
|
1371 |
+
def _arith_method(self, other, op):
|
1372 |
+
res_name = ops.get_op_result_name(self, other)
|
1373 |
+
|
1374 |
+
lvalues = self._values
|
1375 |
+
rvalues = extract_array(other, extract_numpy=True, extract_range=True)
|
1376 |
+
rvalues = ops.maybe_prepare_scalar_for_op(rvalues, lvalues.shape)
|
1377 |
+
rvalues = ensure_wrapped_if_datetimelike(rvalues)
|
1378 |
+
if isinstance(rvalues, range):
|
1379 |
+
rvalues = np.arange(rvalues.start, rvalues.stop, rvalues.step)
|
1380 |
+
|
1381 |
+
with np.errstate(all="ignore"):
|
1382 |
+
result = ops.arithmetic_op(lvalues, rvalues, op)
|
1383 |
+
|
1384 |
+
return self._construct_result(result, name=res_name)
|
1385 |
+
|
1386 |
+
def _construct_result(self, result, name):
|
1387 |
+
"""
|
1388 |
+
Construct an appropriately-wrapped result from the ArrayLike result
|
1389 |
+
of an arithmetic-like operation.
|
1390 |
+
"""
|
1391 |
+
raise AbstractMethodError(self)
|
llmeval-env/lib/python3.10/site-packages/pandas/core/common.py
ADDED
@@ -0,0 +1,657 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
"""
|
2 |
+
Misc tools for implementing data structures
|
3 |
+
|
4 |
+
Note: pandas.core.common is *not* part of the public API.
|
5 |
+
"""
|
6 |
+
from __future__ import annotations
|
7 |
+
|
8 |
+
import builtins
|
9 |
+
from collections import (
|
10 |
+
abc,
|
11 |
+
defaultdict,
|
12 |
+
)
|
13 |
+
from collections.abc import (
|
14 |
+
Collection,
|
15 |
+
Generator,
|
16 |
+
Hashable,
|
17 |
+
Iterable,
|
18 |
+
Sequence,
|
19 |
+
)
|
20 |
+
import contextlib
|
21 |
+
from functools import partial
|
22 |
+
import inspect
|
23 |
+
from typing import (
|
24 |
+
TYPE_CHECKING,
|
25 |
+
Any,
|
26 |
+
Callable,
|
27 |
+
cast,
|
28 |
+
overload,
|
29 |
+
)
|
30 |
+
import warnings
|
31 |
+
|
32 |
+
import numpy as np
|
33 |
+
|
34 |
+
from pandas._libs import lib
|
35 |
+
from pandas.compat.numpy import np_version_gte1p24
|
36 |
+
|
37 |
+
from pandas.core.dtypes.cast import construct_1d_object_array_from_listlike
|
38 |
+
from pandas.core.dtypes.common import (
|
39 |
+
is_bool_dtype,
|
40 |
+
is_integer,
|
41 |
+
)
|
42 |
+
from pandas.core.dtypes.generic import (
|
43 |
+
ABCExtensionArray,
|
44 |
+
ABCIndex,
|
45 |
+
ABCMultiIndex,
|
46 |
+
ABCSeries,
|
47 |
+
)
|
48 |
+
from pandas.core.dtypes.inference import iterable_not_string
|
49 |
+
|
50 |
+
if TYPE_CHECKING:
|
51 |
+
from pandas._typing import (
|
52 |
+
AnyArrayLike,
|
53 |
+
ArrayLike,
|
54 |
+
NpDtype,
|
55 |
+
RandomState,
|
56 |
+
T,
|
57 |
+
)
|
58 |
+
|
59 |
+
from pandas import Index
|
60 |
+
|
61 |
+
|
62 |
+
def flatten(line):
|
63 |
+
"""
|
64 |
+
Flatten an arbitrarily nested sequence.
|
65 |
+
|
66 |
+
Parameters
|
67 |
+
----------
|
68 |
+
line : sequence
|
69 |
+
The non string sequence to flatten
|
70 |
+
|
71 |
+
Notes
|
72 |
+
-----
|
73 |
+
This doesn't consider strings sequences.
|
74 |
+
|
75 |
+
Returns
|
76 |
+
-------
|
77 |
+
flattened : generator
|
78 |
+
"""
|
79 |
+
for element in line:
|
80 |
+
if iterable_not_string(element):
|
81 |
+
yield from flatten(element)
|
82 |
+
else:
|
83 |
+
yield element
|
84 |
+
|
85 |
+
|
86 |
+
def consensus_name_attr(objs):
|
87 |
+
name = objs[0].name
|
88 |
+
for obj in objs[1:]:
|
89 |
+
try:
|
90 |
+
if obj.name != name:
|
91 |
+
name = None
|
92 |
+
except ValueError:
|
93 |
+
name = None
|
94 |
+
return name
|
95 |
+
|
96 |
+
|
97 |
+
def is_bool_indexer(key: Any) -> bool:
|
98 |
+
"""
|
99 |
+
Check whether `key` is a valid boolean indexer.
|
100 |
+
|
101 |
+
Parameters
|
102 |
+
----------
|
103 |
+
key : Any
|
104 |
+
Only list-likes may be considered boolean indexers.
|
105 |
+
All other types are not considered a boolean indexer.
|
106 |
+
For array-like input, boolean ndarrays or ExtensionArrays
|
107 |
+
with ``_is_boolean`` set are considered boolean indexers.
|
108 |
+
|
109 |
+
Returns
|
110 |
+
-------
|
111 |
+
bool
|
112 |
+
Whether `key` is a valid boolean indexer.
|
113 |
+
|
114 |
+
Raises
|
115 |
+
------
|
116 |
+
ValueError
|
117 |
+
When the array is an object-dtype ndarray or ExtensionArray
|
118 |
+
and contains missing values.
|
119 |
+
|
120 |
+
See Also
|
121 |
+
--------
|
122 |
+
check_array_indexer : Check that `key` is a valid array to index,
|
123 |
+
and convert to an ndarray.
|
124 |
+
"""
|
125 |
+
if isinstance(
|
126 |
+
key, (ABCSeries, np.ndarray, ABCIndex, ABCExtensionArray)
|
127 |
+
) and not isinstance(key, ABCMultiIndex):
|
128 |
+
if key.dtype == np.object_:
|
129 |
+
key_array = np.asarray(key)
|
130 |
+
|
131 |
+
if not lib.is_bool_array(key_array):
|
132 |
+
na_msg = "Cannot mask with non-boolean array containing NA / NaN values"
|
133 |
+
if lib.is_bool_array(key_array, skipna=True):
|
134 |
+
# Don't raise on e.g. ["A", "B", np.nan], see
|
135 |
+
# test_loc_getitem_list_of_labels_categoricalindex_with_na
|
136 |
+
raise ValueError(na_msg)
|
137 |
+
return False
|
138 |
+
return True
|
139 |
+
elif is_bool_dtype(key.dtype):
|
140 |
+
return True
|
141 |
+
elif isinstance(key, list):
|
142 |
+
# check if np.array(key).dtype would be bool
|
143 |
+
if len(key) > 0:
|
144 |
+
if type(key) is not list: # noqa: E721
|
145 |
+
# GH#42461 cython will raise TypeError if we pass a subclass
|
146 |
+
key = list(key)
|
147 |
+
return lib.is_bool_list(key)
|
148 |
+
|
149 |
+
return False
|
150 |
+
|
151 |
+
|
152 |
+
def cast_scalar_indexer(val):
|
153 |
+
"""
|
154 |
+
Disallow indexing with a float key, even if that key is a round number.
|
155 |
+
|
156 |
+
Parameters
|
157 |
+
----------
|
158 |
+
val : scalar
|
159 |
+
|
160 |
+
Returns
|
161 |
+
-------
|
162 |
+
outval : scalar
|
163 |
+
"""
|
164 |
+
# assumes lib.is_scalar(val)
|
165 |
+
if lib.is_float(val) and val.is_integer():
|
166 |
+
raise IndexError(
|
167 |
+
# GH#34193
|
168 |
+
"Indexing with a float is no longer supported. Manually convert "
|
169 |
+
"to an integer key instead."
|
170 |
+
)
|
171 |
+
return val
|
172 |
+
|
173 |
+
|
174 |
+
def not_none(*args):
|
175 |
+
"""
|
176 |
+
Returns a generator consisting of the arguments that are not None.
|
177 |
+
"""
|
178 |
+
return (arg for arg in args if arg is not None)
|
179 |
+
|
180 |
+
|
181 |
+
def any_none(*args) -> bool:
|
182 |
+
"""
|
183 |
+
Returns a boolean indicating if any argument is None.
|
184 |
+
"""
|
185 |
+
return any(arg is None for arg in args)
|
186 |
+
|
187 |
+
|
188 |
+
def all_none(*args) -> bool:
|
189 |
+
"""
|
190 |
+
Returns a boolean indicating if all arguments are None.
|
191 |
+
"""
|
192 |
+
return all(arg is None for arg in args)
|
193 |
+
|
194 |
+
|
195 |
+
def any_not_none(*args) -> bool:
|
196 |
+
"""
|
197 |
+
Returns a boolean indicating if any argument is not None.
|
198 |
+
"""
|
199 |
+
return any(arg is not None for arg in args)
|
200 |
+
|
201 |
+
|
202 |
+
def all_not_none(*args) -> bool:
|
203 |
+
"""
|
204 |
+
Returns a boolean indicating if all arguments are not None.
|
205 |
+
"""
|
206 |
+
return all(arg is not None for arg in args)
|
207 |
+
|
208 |
+
|
209 |
+
def count_not_none(*args) -> int:
|
210 |
+
"""
|
211 |
+
Returns the count of arguments that are not None.
|
212 |
+
"""
|
213 |
+
return sum(x is not None for x in args)
|
214 |
+
|
215 |
+
|
216 |
+
@overload
|
217 |
+
def asarray_tuplesafe(
|
218 |
+
values: ArrayLike | list | tuple | zip, dtype: NpDtype | None = ...
|
219 |
+
) -> np.ndarray:
|
220 |
+
# ExtensionArray can only be returned when values is an Index, all other iterables
|
221 |
+
# will return np.ndarray. Unfortunately "all other" cannot be encoded in a type
|
222 |
+
# signature, so instead we special-case some common types.
|
223 |
+
...
|
224 |
+
|
225 |
+
|
226 |
+
@overload
|
227 |
+
def asarray_tuplesafe(values: Iterable, dtype: NpDtype | None = ...) -> ArrayLike:
|
228 |
+
...
|
229 |
+
|
230 |
+
|
231 |
+
def asarray_tuplesafe(values: Iterable, dtype: NpDtype | None = None) -> ArrayLike:
|
232 |
+
if not (isinstance(values, (list, tuple)) or hasattr(values, "__array__")):
|
233 |
+
values = list(values)
|
234 |
+
elif isinstance(values, ABCIndex):
|
235 |
+
return values._values
|
236 |
+
elif isinstance(values, ABCSeries):
|
237 |
+
return values._values
|
238 |
+
|
239 |
+
if isinstance(values, list) and dtype in [np.object_, object]:
|
240 |
+
return construct_1d_object_array_from_listlike(values)
|
241 |
+
|
242 |
+
try:
|
243 |
+
with warnings.catch_warnings():
|
244 |
+
# Can remove warning filter once NumPy 1.24 is min version
|
245 |
+
if not np_version_gte1p24:
|
246 |
+
warnings.simplefilter("ignore", np.VisibleDeprecationWarning)
|
247 |
+
result = np.asarray(values, dtype=dtype)
|
248 |
+
except ValueError:
|
249 |
+
# Using try/except since it's more performant than checking is_list_like
|
250 |
+
# over each element
|
251 |
+
# error: Argument 1 to "construct_1d_object_array_from_listlike"
|
252 |
+
# has incompatible type "Iterable[Any]"; expected "Sized"
|
253 |
+
return construct_1d_object_array_from_listlike(values) # type: ignore[arg-type]
|
254 |
+
|
255 |
+
if issubclass(result.dtype.type, str):
|
256 |
+
result = np.asarray(values, dtype=object)
|
257 |
+
|
258 |
+
if result.ndim == 2:
|
259 |
+
# Avoid building an array of arrays:
|
260 |
+
values = [tuple(x) for x in values]
|
261 |
+
result = construct_1d_object_array_from_listlike(values)
|
262 |
+
|
263 |
+
return result
|
264 |
+
|
265 |
+
|
266 |
+
def index_labels_to_array(
|
267 |
+
labels: np.ndarray | Iterable, dtype: NpDtype | None = None
|
268 |
+
) -> np.ndarray:
|
269 |
+
"""
|
270 |
+
Transform label or iterable of labels to array, for use in Index.
|
271 |
+
|
272 |
+
Parameters
|
273 |
+
----------
|
274 |
+
dtype : dtype
|
275 |
+
If specified, use as dtype of the resulting array, otherwise infer.
|
276 |
+
|
277 |
+
Returns
|
278 |
+
-------
|
279 |
+
array
|
280 |
+
"""
|
281 |
+
if isinstance(labels, (str, tuple)):
|
282 |
+
labels = [labels]
|
283 |
+
|
284 |
+
if not isinstance(labels, (list, np.ndarray)):
|
285 |
+
try:
|
286 |
+
labels = list(labels)
|
287 |
+
except TypeError: # non-iterable
|
288 |
+
labels = [labels]
|
289 |
+
|
290 |
+
labels = asarray_tuplesafe(labels, dtype=dtype)
|
291 |
+
|
292 |
+
return labels
|
293 |
+
|
294 |
+
|
295 |
+
def maybe_make_list(obj):
|
296 |
+
if obj is not None and not isinstance(obj, (tuple, list)):
|
297 |
+
return [obj]
|
298 |
+
return obj
|
299 |
+
|
300 |
+
|
301 |
+
def maybe_iterable_to_list(obj: Iterable[T] | T) -> Collection[T] | T:
|
302 |
+
"""
|
303 |
+
If obj is Iterable but not list-like, consume into list.
|
304 |
+
"""
|
305 |
+
if isinstance(obj, abc.Iterable) and not isinstance(obj, abc.Sized):
|
306 |
+
return list(obj)
|
307 |
+
obj = cast(Collection, obj)
|
308 |
+
return obj
|
309 |
+
|
310 |
+
|
311 |
+
def is_null_slice(obj) -> bool:
|
312 |
+
"""
|
313 |
+
We have a null slice.
|
314 |
+
"""
|
315 |
+
return (
|
316 |
+
isinstance(obj, slice)
|
317 |
+
and obj.start is None
|
318 |
+
and obj.stop is None
|
319 |
+
and obj.step is None
|
320 |
+
)
|
321 |
+
|
322 |
+
|
323 |
+
def is_empty_slice(obj) -> bool:
|
324 |
+
"""
|
325 |
+
We have an empty slice, e.g. no values are selected.
|
326 |
+
"""
|
327 |
+
return (
|
328 |
+
isinstance(obj, slice)
|
329 |
+
and obj.start is not None
|
330 |
+
and obj.stop is not None
|
331 |
+
and obj.start == obj.stop
|
332 |
+
)
|
333 |
+
|
334 |
+
|
335 |
+
def is_true_slices(line) -> list[bool]:
|
336 |
+
"""
|
337 |
+
Find non-trivial slices in "line": return a list of booleans with same length.
|
338 |
+
"""
|
339 |
+
return [isinstance(k, slice) and not is_null_slice(k) for k in line]
|
340 |
+
|
341 |
+
|
342 |
+
# TODO: used only once in indexing; belongs elsewhere?
|
343 |
+
def is_full_slice(obj, line: int) -> bool:
|
344 |
+
"""
|
345 |
+
We have a full length slice.
|
346 |
+
"""
|
347 |
+
return (
|
348 |
+
isinstance(obj, slice)
|
349 |
+
and obj.start == 0
|
350 |
+
and obj.stop == line
|
351 |
+
and obj.step is None
|
352 |
+
)
|
353 |
+
|
354 |
+
|
355 |
+
def get_callable_name(obj):
|
356 |
+
# typical case has name
|
357 |
+
if hasattr(obj, "__name__"):
|
358 |
+
return getattr(obj, "__name__")
|
359 |
+
# some objects don't; could recurse
|
360 |
+
if isinstance(obj, partial):
|
361 |
+
return get_callable_name(obj.func)
|
362 |
+
# fall back to class name
|
363 |
+
if callable(obj):
|
364 |
+
return type(obj).__name__
|
365 |
+
# everything failed (probably because the argument
|
366 |
+
# wasn't actually callable); we return None
|
367 |
+
# instead of the empty string in this case to allow
|
368 |
+
# distinguishing between no name and a name of ''
|
369 |
+
return None
|
370 |
+
|
371 |
+
|
372 |
+
def apply_if_callable(maybe_callable, obj, **kwargs):
|
373 |
+
"""
|
374 |
+
Evaluate possibly callable input using obj and kwargs if it is callable,
|
375 |
+
otherwise return as it is.
|
376 |
+
|
377 |
+
Parameters
|
378 |
+
----------
|
379 |
+
maybe_callable : possibly a callable
|
380 |
+
obj : NDFrame
|
381 |
+
**kwargs
|
382 |
+
"""
|
383 |
+
if callable(maybe_callable):
|
384 |
+
return maybe_callable(obj, **kwargs)
|
385 |
+
|
386 |
+
return maybe_callable
|
387 |
+
|
388 |
+
|
389 |
+
def standardize_mapping(into):
|
390 |
+
"""
|
391 |
+
Helper function to standardize a supplied mapping.
|
392 |
+
|
393 |
+
Parameters
|
394 |
+
----------
|
395 |
+
into : instance or subclass of collections.abc.Mapping
|
396 |
+
Must be a class, an initialized collections.defaultdict,
|
397 |
+
or an instance of a collections.abc.Mapping subclass.
|
398 |
+
|
399 |
+
Returns
|
400 |
+
-------
|
401 |
+
mapping : a collections.abc.Mapping subclass or other constructor
|
402 |
+
a callable object that can accept an iterator to create
|
403 |
+
the desired Mapping.
|
404 |
+
|
405 |
+
See Also
|
406 |
+
--------
|
407 |
+
DataFrame.to_dict
|
408 |
+
Series.to_dict
|
409 |
+
"""
|
410 |
+
if not inspect.isclass(into):
|
411 |
+
if isinstance(into, defaultdict):
|
412 |
+
return partial(defaultdict, into.default_factory)
|
413 |
+
into = type(into)
|
414 |
+
if not issubclass(into, abc.Mapping):
|
415 |
+
raise TypeError(f"unsupported type: {into}")
|
416 |
+
if into == defaultdict:
|
417 |
+
raise TypeError("to_dict() only accepts initialized defaultdicts")
|
418 |
+
return into
|
419 |
+
|
420 |
+
|
421 |
+
@overload
|
422 |
+
def random_state(state: np.random.Generator) -> np.random.Generator:
|
423 |
+
...
|
424 |
+
|
425 |
+
|
426 |
+
@overload
|
427 |
+
def random_state(
|
428 |
+
state: int | np.ndarray | np.random.BitGenerator | np.random.RandomState | None,
|
429 |
+
) -> np.random.RandomState:
|
430 |
+
...
|
431 |
+
|
432 |
+
|
433 |
+
def random_state(state: RandomState | None = None):
|
434 |
+
"""
|
435 |
+
Helper function for processing random_state arguments.
|
436 |
+
|
437 |
+
Parameters
|
438 |
+
----------
|
439 |
+
state : int, array-like, BitGenerator, Generator, np.random.RandomState, None.
|
440 |
+
If receives an int, array-like, or BitGenerator, passes to
|
441 |
+
np.random.RandomState() as seed.
|
442 |
+
If receives an np.random RandomState or Generator, just returns that unchanged.
|
443 |
+
If receives `None`, returns np.random.
|
444 |
+
If receives anything else, raises an informative ValueError.
|
445 |
+
|
446 |
+
Default None.
|
447 |
+
|
448 |
+
Returns
|
449 |
+
-------
|
450 |
+
np.random.RandomState or np.random.Generator. If state is None, returns np.random
|
451 |
+
|
452 |
+
"""
|
453 |
+
if is_integer(state) or isinstance(state, (np.ndarray, np.random.BitGenerator)):
|
454 |
+
return np.random.RandomState(state)
|
455 |
+
elif isinstance(state, np.random.RandomState):
|
456 |
+
return state
|
457 |
+
elif isinstance(state, np.random.Generator):
|
458 |
+
return state
|
459 |
+
elif state is None:
|
460 |
+
return np.random
|
461 |
+
else:
|
462 |
+
raise ValueError(
|
463 |
+
"random_state must be an integer, array-like, a BitGenerator, Generator, "
|
464 |
+
"a numpy RandomState, or None"
|
465 |
+
)
|
466 |
+
|
467 |
+
|
468 |
+
def pipe(
|
469 |
+
obj, func: Callable[..., T] | tuple[Callable[..., T], str], *args, **kwargs
|
470 |
+
) -> T:
|
471 |
+
"""
|
472 |
+
Apply a function ``func`` to object ``obj`` either by passing obj as the
|
473 |
+
first argument to the function or, in the case that the func is a tuple,
|
474 |
+
interpret the first element of the tuple as a function and pass the obj to
|
475 |
+
that function as a keyword argument whose key is the value of the second
|
476 |
+
element of the tuple.
|
477 |
+
|
478 |
+
Parameters
|
479 |
+
----------
|
480 |
+
func : callable or tuple of (callable, str)
|
481 |
+
Function to apply to this object or, alternatively, a
|
482 |
+
``(callable, data_keyword)`` tuple where ``data_keyword`` is a
|
483 |
+
string indicating the keyword of ``callable`` that expects the
|
484 |
+
object.
|
485 |
+
*args : iterable, optional
|
486 |
+
Positional arguments passed into ``func``.
|
487 |
+
**kwargs : dict, optional
|
488 |
+
A dictionary of keyword arguments passed into ``func``.
|
489 |
+
|
490 |
+
Returns
|
491 |
+
-------
|
492 |
+
object : the return type of ``func``.
|
493 |
+
"""
|
494 |
+
if isinstance(func, tuple):
|
495 |
+
func, target = func
|
496 |
+
if target in kwargs:
|
497 |
+
msg = f"{target} is both the pipe target and a keyword argument"
|
498 |
+
raise ValueError(msg)
|
499 |
+
kwargs[target] = obj
|
500 |
+
return func(*args, **kwargs)
|
501 |
+
else:
|
502 |
+
return func(obj, *args, **kwargs)
|
503 |
+
|
504 |
+
|
505 |
+
def get_rename_function(mapper):
|
506 |
+
"""
|
507 |
+
Returns a function that will map names/labels, dependent if mapper
|
508 |
+
is a dict, Series or just a function.
|
509 |
+
"""
|
510 |
+
|
511 |
+
def f(x):
|
512 |
+
if x in mapper:
|
513 |
+
return mapper[x]
|
514 |
+
else:
|
515 |
+
return x
|
516 |
+
|
517 |
+
return f if isinstance(mapper, (abc.Mapping, ABCSeries)) else mapper
|
518 |
+
|
519 |
+
|
520 |
+
def convert_to_list_like(
|
521 |
+
values: Hashable | Iterable | AnyArrayLike,
|
522 |
+
) -> list | AnyArrayLike:
|
523 |
+
"""
|
524 |
+
Convert list-like or scalar input to list-like. List, numpy and pandas array-like
|
525 |
+
inputs are returned unmodified whereas others are converted to list.
|
526 |
+
"""
|
527 |
+
if isinstance(values, (list, np.ndarray, ABCIndex, ABCSeries, ABCExtensionArray)):
|
528 |
+
return values
|
529 |
+
elif isinstance(values, abc.Iterable) and not isinstance(values, str):
|
530 |
+
return list(values)
|
531 |
+
|
532 |
+
return [values]
|
533 |
+
|
534 |
+
|
535 |
+
@contextlib.contextmanager
|
536 |
+
def temp_setattr(
|
537 |
+
obj, attr: str, value, condition: bool = True
|
538 |
+
) -> Generator[None, None, None]:
|
539 |
+
"""
|
540 |
+
Temporarily set attribute on an object.
|
541 |
+
|
542 |
+
Parameters
|
543 |
+
----------
|
544 |
+
obj : object
|
545 |
+
Object whose attribute will be modified.
|
546 |
+
attr : str
|
547 |
+
Attribute to modify.
|
548 |
+
value : Any
|
549 |
+
Value to temporarily set attribute to.
|
550 |
+
condition : bool, default True
|
551 |
+
Whether to set the attribute. Provided in order to not have to
|
552 |
+
conditionally use this context manager.
|
553 |
+
|
554 |
+
Yields
|
555 |
+
------
|
556 |
+
object : obj with modified attribute.
|
557 |
+
"""
|
558 |
+
if condition:
|
559 |
+
old_value = getattr(obj, attr)
|
560 |
+
setattr(obj, attr, value)
|
561 |
+
try:
|
562 |
+
yield obj
|
563 |
+
finally:
|
564 |
+
if condition:
|
565 |
+
setattr(obj, attr, old_value)
|
566 |
+
|
567 |
+
|
568 |
+
def require_length_match(data, index: Index) -> None:
|
569 |
+
"""
|
570 |
+
Check the length of data matches the length of the index.
|
571 |
+
"""
|
572 |
+
if len(data) != len(index):
|
573 |
+
raise ValueError(
|
574 |
+
"Length of values "
|
575 |
+
f"({len(data)}) "
|
576 |
+
"does not match length of index "
|
577 |
+
f"({len(index)})"
|
578 |
+
)
|
579 |
+
|
580 |
+
|
581 |
+
# the ufuncs np.maximum.reduce and np.minimum.reduce default to axis=0,
|
582 |
+
# whereas np.min and np.max (which directly call obj.min and obj.max)
|
583 |
+
# default to axis=None.
|
584 |
+
_builtin_table = {
|
585 |
+
builtins.sum: np.sum,
|
586 |
+
builtins.max: np.maximum.reduce,
|
587 |
+
builtins.min: np.minimum.reduce,
|
588 |
+
}
|
589 |
+
|
590 |
+
# GH#53425: Only for deprecation
|
591 |
+
_builtin_table_alias = {
|
592 |
+
builtins.sum: "np.sum",
|
593 |
+
builtins.max: "np.maximum.reduce",
|
594 |
+
builtins.min: "np.minimum.reduce",
|
595 |
+
}
|
596 |
+
|
597 |
+
_cython_table = {
|
598 |
+
builtins.sum: "sum",
|
599 |
+
builtins.max: "max",
|
600 |
+
builtins.min: "min",
|
601 |
+
np.all: "all",
|
602 |
+
np.any: "any",
|
603 |
+
np.sum: "sum",
|
604 |
+
np.nansum: "sum",
|
605 |
+
np.mean: "mean",
|
606 |
+
np.nanmean: "mean",
|
607 |
+
np.prod: "prod",
|
608 |
+
np.nanprod: "prod",
|
609 |
+
np.std: "std",
|
610 |
+
np.nanstd: "std",
|
611 |
+
np.var: "var",
|
612 |
+
np.nanvar: "var",
|
613 |
+
np.median: "median",
|
614 |
+
np.nanmedian: "median",
|
615 |
+
np.max: "max",
|
616 |
+
np.nanmax: "max",
|
617 |
+
np.min: "min",
|
618 |
+
np.nanmin: "min",
|
619 |
+
np.cumprod: "cumprod",
|
620 |
+
np.nancumprod: "cumprod",
|
621 |
+
np.cumsum: "cumsum",
|
622 |
+
np.nancumsum: "cumsum",
|
623 |
+
}
|
624 |
+
|
625 |
+
|
626 |
+
def get_cython_func(arg: Callable) -> str | None:
|
627 |
+
"""
|
628 |
+
if we define an internal function for this argument, return it
|
629 |
+
"""
|
630 |
+
return _cython_table.get(arg)
|
631 |
+
|
632 |
+
|
633 |
+
def is_builtin_func(arg):
|
634 |
+
"""
|
635 |
+
if we define a builtin function for this argument, return it,
|
636 |
+
otherwise return the arg
|
637 |
+
"""
|
638 |
+
return _builtin_table.get(arg, arg)
|
639 |
+
|
640 |
+
|
641 |
+
def fill_missing_names(names: Sequence[Hashable | None]) -> list[Hashable]:
|
642 |
+
"""
|
643 |
+
If a name is missing then replace it by level_n, where n is the count
|
644 |
+
|
645 |
+
.. versionadded:: 1.4.0
|
646 |
+
|
647 |
+
Parameters
|
648 |
+
----------
|
649 |
+
names : list-like
|
650 |
+
list of column names or None values.
|
651 |
+
|
652 |
+
Returns
|
653 |
+
-------
|
654 |
+
list
|
655 |
+
list of column names with the None values replaced.
|
656 |
+
"""
|
657 |
+
return [f"level_{i}" if name is None else name for i, name in enumerate(names)]
|
llmeval-env/lib/python3.10/site-packages/pandas/core/config_init.py
ADDED
@@ -0,0 +1,924 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
"""
|
2 |
+
This module is imported from the pandas package __init__.py file
|
3 |
+
in order to ensure that the core.config options registered here will
|
4 |
+
be available as soon as the user loads the package. if register_option
|
5 |
+
is invoked inside specific modules, they will not be registered until that
|
6 |
+
module is imported, which may or may not be a problem.
|
7 |
+
|
8 |
+
If you need to make sure options are available even before a certain
|
9 |
+
module is imported, register them here rather than in the module.
|
10 |
+
|
11 |
+
"""
|
12 |
+
from __future__ import annotations
|
13 |
+
|
14 |
+
import os
|
15 |
+
from typing import Callable
|
16 |
+
|
17 |
+
import pandas._config.config as cf
|
18 |
+
from pandas._config.config import (
|
19 |
+
is_bool,
|
20 |
+
is_callable,
|
21 |
+
is_instance_factory,
|
22 |
+
is_int,
|
23 |
+
is_nonnegative_int,
|
24 |
+
is_one_of_factory,
|
25 |
+
is_str,
|
26 |
+
is_text,
|
27 |
+
)
|
28 |
+
|
29 |
+
# compute
|
30 |
+
|
31 |
+
use_bottleneck_doc = """
|
32 |
+
: bool
|
33 |
+
Use the bottleneck library to accelerate if it is installed,
|
34 |
+
the default is True
|
35 |
+
Valid values: False,True
|
36 |
+
"""
|
37 |
+
|
38 |
+
|
39 |
+
def use_bottleneck_cb(key) -> None:
|
40 |
+
from pandas.core import nanops
|
41 |
+
|
42 |
+
nanops.set_use_bottleneck(cf.get_option(key))
|
43 |
+
|
44 |
+
|
45 |
+
use_numexpr_doc = """
|
46 |
+
: bool
|
47 |
+
Use the numexpr library to accelerate computation if it is installed,
|
48 |
+
the default is True
|
49 |
+
Valid values: False,True
|
50 |
+
"""
|
51 |
+
|
52 |
+
|
53 |
+
def use_numexpr_cb(key) -> None:
|
54 |
+
from pandas.core.computation import expressions
|
55 |
+
|
56 |
+
expressions.set_use_numexpr(cf.get_option(key))
|
57 |
+
|
58 |
+
|
59 |
+
use_numba_doc = """
|
60 |
+
: bool
|
61 |
+
Use the numba engine option for select operations if it is installed,
|
62 |
+
the default is False
|
63 |
+
Valid values: False,True
|
64 |
+
"""
|
65 |
+
|
66 |
+
|
67 |
+
def use_numba_cb(key) -> None:
|
68 |
+
from pandas.core.util import numba_
|
69 |
+
|
70 |
+
numba_.set_use_numba(cf.get_option(key))
|
71 |
+
|
72 |
+
|
73 |
+
with cf.config_prefix("compute"):
|
74 |
+
cf.register_option(
|
75 |
+
"use_bottleneck",
|
76 |
+
True,
|
77 |
+
use_bottleneck_doc,
|
78 |
+
validator=is_bool,
|
79 |
+
cb=use_bottleneck_cb,
|
80 |
+
)
|
81 |
+
cf.register_option(
|
82 |
+
"use_numexpr", True, use_numexpr_doc, validator=is_bool, cb=use_numexpr_cb
|
83 |
+
)
|
84 |
+
cf.register_option(
|
85 |
+
"use_numba", False, use_numba_doc, validator=is_bool, cb=use_numba_cb
|
86 |
+
)
|
87 |
+
#
|
88 |
+
# options from the "display" namespace
|
89 |
+
|
90 |
+
pc_precision_doc = """
|
91 |
+
: int
|
92 |
+
Floating point output precision in terms of number of places after the
|
93 |
+
decimal, for regular formatting as well as scientific notation. Similar
|
94 |
+
to ``precision`` in :meth:`numpy.set_printoptions`.
|
95 |
+
"""
|
96 |
+
|
97 |
+
pc_colspace_doc = """
|
98 |
+
: int
|
99 |
+
Default space for DataFrame columns.
|
100 |
+
"""
|
101 |
+
|
102 |
+
pc_max_rows_doc = """
|
103 |
+
: int
|
104 |
+
If max_rows is exceeded, switch to truncate view. Depending on
|
105 |
+
`large_repr`, objects are either centrally truncated or printed as
|
106 |
+
a summary view. 'None' value means unlimited.
|
107 |
+
|
108 |
+
In case python/IPython is running in a terminal and `large_repr`
|
109 |
+
equals 'truncate' this can be set to 0 and pandas will auto-detect
|
110 |
+
the height of the terminal and print a truncated object which fits
|
111 |
+
the screen height. The IPython notebook, IPython qtconsole, or
|
112 |
+
IDLE do not run in a terminal and hence it is not possible to do
|
113 |
+
correct auto-detection.
|
114 |
+
"""
|
115 |
+
|
116 |
+
pc_min_rows_doc = """
|
117 |
+
: int
|
118 |
+
The numbers of rows to show in a truncated view (when `max_rows` is
|
119 |
+
exceeded). Ignored when `max_rows` is set to None or 0. When set to
|
120 |
+
None, follows the value of `max_rows`.
|
121 |
+
"""
|
122 |
+
|
123 |
+
pc_max_cols_doc = """
|
124 |
+
: int
|
125 |
+
If max_cols is exceeded, switch to truncate view. Depending on
|
126 |
+
`large_repr`, objects are either centrally truncated or printed as
|
127 |
+
a summary view. 'None' value means unlimited.
|
128 |
+
|
129 |
+
In case python/IPython is running in a terminal and `large_repr`
|
130 |
+
equals 'truncate' this can be set to 0 or None and pandas will auto-detect
|
131 |
+
the width of the terminal and print a truncated object which fits
|
132 |
+
the screen width. The IPython notebook, IPython qtconsole, or IDLE
|
133 |
+
do not run in a terminal and hence it is not possible to do
|
134 |
+
correct auto-detection and defaults to 20.
|
135 |
+
"""
|
136 |
+
|
137 |
+
pc_max_categories_doc = """
|
138 |
+
: int
|
139 |
+
This sets the maximum number of categories pandas should output when
|
140 |
+
printing out a `Categorical` or a Series of dtype "category".
|
141 |
+
"""
|
142 |
+
|
143 |
+
pc_max_info_cols_doc = """
|
144 |
+
: int
|
145 |
+
max_info_columns is used in DataFrame.info method to decide if
|
146 |
+
per column information will be printed.
|
147 |
+
"""
|
148 |
+
|
149 |
+
pc_nb_repr_h_doc = """
|
150 |
+
: boolean
|
151 |
+
When True, IPython notebook will use html representation for
|
152 |
+
pandas objects (if it is available).
|
153 |
+
"""
|
154 |
+
|
155 |
+
pc_pprint_nest_depth = """
|
156 |
+
: int
|
157 |
+
Controls the number of nested levels to process when pretty-printing
|
158 |
+
"""
|
159 |
+
|
160 |
+
pc_multi_sparse_doc = """
|
161 |
+
: boolean
|
162 |
+
"sparsify" MultiIndex display (don't display repeated
|
163 |
+
elements in outer levels within groups)
|
164 |
+
"""
|
165 |
+
|
166 |
+
float_format_doc = """
|
167 |
+
: callable
|
168 |
+
The callable should accept a floating point number and return
|
169 |
+
a string with the desired format of the number. This is used
|
170 |
+
in some places like SeriesFormatter.
|
171 |
+
See formats.format.EngFormatter for an example.
|
172 |
+
"""
|
173 |
+
|
174 |
+
max_colwidth_doc = """
|
175 |
+
: int or None
|
176 |
+
The maximum width in characters of a column in the repr of
|
177 |
+
a pandas data structure. When the column overflows, a "..."
|
178 |
+
placeholder is embedded in the output. A 'None' value means unlimited.
|
179 |
+
"""
|
180 |
+
|
181 |
+
colheader_justify_doc = """
|
182 |
+
: 'left'/'right'
|
183 |
+
Controls the justification of column headers. used by DataFrameFormatter.
|
184 |
+
"""
|
185 |
+
|
186 |
+
pc_expand_repr_doc = """
|
187 |
+
: boolean
|
188 |
+
Whether to print out the full DataFrame repr for wide DataFrames across
|
189 |
+
multiple lines, `max_columns` is still respected, but the output will
|
190 |
+
wrap-around across multiple "pages" if its width exceeds `display.width`.
|
191 |
+
"""
|
192 |
+
|
193 |
+
pc_show_dimensions_doc = """
|
194 |
+
: boolean or 'truncate'
|
195 |
+
Whether to print out dimensions at the end of DataFrame repr.
|
196 |
+
If 'truncate' is specified, only print out the dimensions if the
|
197 |
+
frame is truncated (e.g. not display all rows and/or columns)
|
198 |
+
"""
|
199 |
+
|
200 |
+
pc_east_asian_width_doc = """
|
201 |
+
: boolean
|
202 |
+
Whether to use the Unicode East Asian Width to calculate the display text
|
203 |
+
width.
|
204 |
+
Enabling this may affect to the performance (default: False)
|
205 |
+
"""
|
206 |
+
|
207 |
+
pc_ambiguous_as_wide_doc = """
|
208 |
+
: boolean
|
209 |
+
Whether to handle Unicode characters belong to Ambiguous as Wide (width=2)
|
210 |
+
(default: False)
|
211 |
+
"""
|
212 |
+
|
213 |
+
pc_table_schema_doc = """
|
214 |
+
: boolean
|
215 |
+
Whether to publish a Table Schema representation for frontends
|
216 |
+
that support it.
|
217 |
+
(default: False)
|
218 |
+
"""
|
219 |
+
|
220 |
+
pc_html_border_doc = """
|
221 |
+
: int
|
222 |
+
A ``border=value`` attribute is inserted in the ``<table>`` tag
|
223 |
+
for the DataFrame HTML repr.
|
224 |
+
"""
|
225 |
+
|
226 |
+
pc_html_use_mathjax_doc = """\
|
227 |
+
: boolean
|
228 |
+
When True, Jupyter notebook will process table contents using MathJax,
|
229 |
+
rendering mathematical expressions enclosed by the dollar symbol.
|
230 |
+
(default: True)
|
231 |
+
"""
|
232 |
+
|
233 |
+
pc_max_dir_items = """\
|
234 |
+
: int
|
235 |
+
The number of items that will be added to `dir(...)`. 'None' value means
|
236 |
+
unlimited. Because dir is cached, changing this option will not immediately
|
237 |
+
affect already existing dataframes until a column is deleted or added.
|
238 |
+
|
239 |
+
This is for instance used to suggest columns from a dataframe to tab
|
240 |
+
completion.
|
241 |
+
"""
|
242 |
+
|
243 |
+
pc_width_doc = """
|
244 |
+
: int
|
245 |
+
Width of the display in characters. In case python/IPython is running in
|
246 |
+
a terminal this can be set to None and pandas will correctly auto-detect
|
247 |
+
the width.
|
248 |
+
Note that the IPython notebook, IPython qtconsole, or IDLE do not run in a
|
249 |
+
terminal and hence it is not possible to correctly detect the width.
|
250 |
+
"""
|
251 |
+
|
252 |
+
pc_chop_threshold_doc = """
|
253 |
+
: float or None
|
254 |
+
if set to a float value, all float values smaller than the given threshold
|
255 |
+
will be displayed as exactly 0 by repr and friends.
|
256 |
+
"""
|
257 |
+
|
258 |
+
pc_max_seq_items = """
|
259 |
+
: int or None
|
260 |
+
When pretty-printing a long sequence, no more then `max_seq_items`
|
261 |
+
will be printed. If items are omitted, they will be denoted by the
|
262 |
+
addition of "..." to the resulting string.
|
263 |
+
|
264 |
+
If set to None, the number of items to be printed is unlimited.
|
265 |
+
"""
|
266 |
+
|
267 |
+
pc_max_info_rows_doc = """
|
268 |
+
: int
|
269 |
+
df.info() will usually show null-counts for each column.
|
270 |
+
For large frames this can be quite slow. max_info_rows and max_info_cols
|
271 |
+
limit this null check only to frames with smaller dimensions than
|
272 |
+
specified.
|
273 |
+
"""
|
274 |
+
|
275 |
+
pc_large_repr_doc = """
|
276 |
+
: 'truncate'/'info'
|
277 |
+
For DataFrames exceeding max_rows/max_cols, the repr (and HTML repr) can
|
278 |
+
show a truncated table, or switch to the view from
|
279 |
+
df.info() (the behaviour in earlier versions of pandas).
|
280 |
+
"""
|
281 |
+
|
282 |
+
pc_memory_usage_doc = """
|
283 |
+
: bool, string or None
|
284 |
+
This specifies if the memory usage of a DataFrame should be displayed when
|
285 |
+
df.info() is called. Valid values True,False,'deep'
|
286 |
+
"""
|
287 |
+
|
288 |
+
|
289 |
+
def table_schema_cb(key) -> None:
|
290 |
+
from pandas.io.formats.printing import enable_data_resource_formatter
|
291 |
+
|
292 |
+
enable_data_resource_formatter(cf.get_option(key))
|
293 |
+
|
294 |
+
|
295 |
+
def is_terminal() -> bool:
|
296 |
+
"""
|
297 |
+
Detect if Python is running in a terminal.
|
298 |
+
|
299 |
+
Returns True if Python is running in a terminal or False if not.
|
300 |
+
"""
|
301 |
+
try:
|
302 |
+
# error: Name 'get_ipython' is not defined
|
303 |
+
ip = get_ipython() # type: ignore[name-defined]
|
304 |
+
except NameError: # assume standard Python interpreter in a terminal
|
305 |
+
return True
|
306 |
+
else:
|
307 |
+
if hasattr(ip, "kernel"): # IPython as a Jupyter kernel
|
308 |
+
return False
|
309 |
+
else: # IPython in a terminal
|
310 |
+
return True
|
311 |
+
|
312 |
+
|
313 |
+
with cf.config_prefix("display"):
|
314 |
+
cf.register_option("precision", 6, pc_precision_doc, validator=is_nonnegative_int)
|
315 |
+
cf.register_option(
|
316 |
+
"float_format",
|
317 |
+
None,
|
318 |
+
float_format_doc,
|
319 |
+
validator=is_one_of_factory([None, is_callable]),
|
320 |
+
)
|
321 |
+
cf.register_option(
|
322 |
+
"max_info_rows",
|
323 |
+
1690785,
|
324 |
+
pc_max_info_rows_doc,
|
325 |
+
validator=is_int,
|
326 |
+
)
|
327 |
+
cf.register_option("max_rows", 60, pc_max_rows_doc, validator=is_nonnegative_int)
|
328 |
+
cf.register_option(
|
329 |
+
"min_rows",
|
330 |
+
10,
|
331 |
+
pc_min_rows_doc,
|
332 |
+
validator=is_instance_factory([type(None), int]),
|
333 |
+
)
|
334 |
+
cf.register_option("max_categories", 8, pc_max_categories_doc, validator=is_int)
|
335 |
+
|
336 |
+
cf.register_option(
|
337 |
+
"max_colwidth",
|
338 |
+
50,
|
339 |
+
max_colwidth_doc,
|
340 |
+
validator=is_nonnegative_int,
|
341 |
+
)
|
342 |
+
if is_terminal():
|
343 |
+
max_cols = 0 # automatically determine optimal number of columns
|
344 |
+
else:
|
345 |
+
max_cols = 20 # cannot determine optimal number of columns
|
346 |
+
cf.register_option(
|
347 |
+
"max_columns", max_cols, pc_max_cols_doc, validator=is_nonnegative_int
|
348 |
+
)
|
349 |
+
cf.register_option(
|
350 |
+
"large_repr",
|
351 |
+
"truncate",
|
352 |
+
pc_large_repr_doc,
|
353 |
+
validator=is_one_of_factory(["truncate", "info"]),
|
354 |
+
)
|
355 |
+
cf.register_option("max_info_columns", 100, pc_max_info_cols_doc, validator=is_int)
|
356 |
+
cf.register_option(
|
357 |
+
"colheader_justify", "right", colheader_justify_doc, validator=is_text
|
358 |
+
)
|
359 |
+
cf.register_option("notebook_repr_html", True, pc_nb_repr_h_doc, validator=is_bool)
|
360 |
+
cf.register_option("pprint_nest_depth", 3, pc_pprint_nest_depth, validator=is_int)
|
361 |
+
cf.register_option("multi_sparse", True, pc_multi_sparse_doc, validator=is_bool)
|
362 |
+
cf.register_option("expand_frame_repr", True, pc_expand_repr_doc)
|
363 |
+
cf.register_option(
|
364 |
+
"show_dimensions",
|
365 |
+
"truncate",
|
366 |
+
pc_show_dimensions_doc,
|
367 |
+
validator=is_one_of_factory([True, False, "truncate"]),
|
368 |
+
)
|
369 |
+
cf.register_option("chop_threshold", None, pc_chop_threshold_doc)
|
370 |
+
cf.register_option("max_seq_items", 100, pc_max_seq_items)
|
371 |
+
cf.register_option(
|
372 |
+
"width", 80, pc_width_doc, validator=is_instance_factory([type(None), int])
|
373 |
+
)
|
374 |
+
cf.register_option(
|
375 |
+
"memory_usage",
|
376 |
+
True,
|
377 |
+
pc_memory_usage_doc,
|
378 |
+
validator=is_one_of_factory([None, True, False, "deep"]),
|
379 |
+
)
|
380 |
+
cf.register_option(
|
381 |
+
"unicode.east_asian_width", False, pc_east_asian_width_doc, validator=is_bool
|
382 |
+
)
|
383 |
+
cf.register_option(
|
384 |
+
"unicode.ambiguous_as_wide", False, pc_east_asian_width_doc, validator=is_bool
|
385 |
+
)
|
386 |
+
cf.register_option(
|
387 |
+
"html.table_schema",
|
388 |
+
False,
|
389 |
+
pc_table_schema_doc,
|
390 |
+
validator=is_bool,
|
391 |
+
cb=table_schema_cb,
|
392 |
+
)
|
393 |
+
cf.register_option("html.border", 1, pc_html_border_doc, validator=is_int)
|
394 |
+
cf.register_option(
|
395 |
+
"html.use_mathjax", True, pc_html_use_mathjax_doc, validator=is_bool
|
396 |
+
)
|
397 |
+
cf.register_option(
|
398 |
+
"max_dir_items", 100, pc_max_dir_items, validator=is_nonnegative_int
|
399 |
+
)
|
400 |
+
|
401 |
+
tc_sim_interactive_doc = """
|
402 |
+
: boolean
|
403 |
+
Whether to simulate interactive mode for purposes of testing
|
404 |
+
"""
|
405 |
+
|
406 |
+
with cf.config_prefix("mode"):
|
407 |
+
cf.register_option("sim_interactive", False, tc_sim_interactive_doc)
|
408 |
+
|
409 |
+
use_inf_as_na_doc = """
|
410 |
+
: boolean
|
411 |
+
True means treat None, NaN, INF, -INF as NA (old way),
|
412 |
+
False means None and NaN are null, but INF, -INF are not NA
|
413 |
+
(new way).
|
414 |
+
|
415 |
+
This option is deprecated in pandas 2.1.0 and will be removed in 3.0.
|
416 |
+
"""
|
417 |
+
|
418 |
+
# We don't want to start importing everything at the global context level
|
419 |
+
# or we'll hit circular deps.
|
420 |
+
|
421 |
+
|
422 |
+
def use_inf_as_na_cb(key) -> None:
|
423 |
+
# TODO(3.0): enforcing this deprecation will close GH#52501
|
424 |
+
from pandas.core.dtypes.missing import _use_inf_as_na
|
425 |
+
|
426 |
+
_use_inf_as_na(key)
|
427 |
+
|
428 |
+
|
429 |
+
with cf.config_prefix("mode"):
|
430 |
+
cf.register_option("use_inf_as_na", False, use_inf_as_na_doc, cb=use_inf_as_na_cb)
|
431 |
+
|
432 |
+
cf.deprecate_option(
|
433 |
+
# GH#51684
|
434 |
+
"mode.use_inf_as_na",
|
435 |
+
"use_inf_as_na option is deprecated and will be removed in a future "
|
436 |
+
"version. Convert inf values to NaN before operating instead.",
|
437 |
+
)
|
438 |
+
|
439 |
+
data_manager_doc = """
|
440 |
+
: string
|
441 |
+
Internal data manager type; can be "block" or "array". Defaults to "block",
|
442 |
+
unless overridden by the 'PANDAS_DATA_MANAGER' environment variable (needs
|
443 |
+
to be set before pandas is imported).
|
444 |
+
"""
|
445 |
+
|
446 |
+
|
447 |
+
with cf.config_prefix("mode"):
|
448 |
+
cf.register_option(
|
449 |
+
"data_manager",
|
450 |
+
# Get the default from an environment variable, if set, otherwise defaults
|
451 |
+
# to "block". This environment variable can be set for testing.
|
452 |
+
os.environ.get("PANDAS_DATA_MANAGER", "block"),
|
453 |
+
data_manager_doc,
|
454 |
+
validator=is_one_of_factory(["block", "array"]),
|
455 |
+
)
|
456 |
+
|
457 |
+
cf.deprecate_option(
|
458 |
+
# GH#55043
|
459 |
+
"mode.data_manager",
|
460 |
+
"data_manager option is deprecated and will be removed in a future "
|
461 |
+
"version. Only the BlockManager will be available.",
|
462 |
+
)
|
463 |
+
|
464 |
+
|
465 |
+
# TODO better name?
|
466 |
+
copy_on_write_doc = """
|
467 |
+
: bool
|
468 |
+
Use new copy-view behaviour using Copy-on-Write. Defaults to False,
|
469 |
+
unless overridden by the 'PANDAS_COPY_ON_WRITE' environment variable
|
470 |
+
(if set to "1" for True, needs to be set before pandas is imported).
|
471 |
+
"""
|
472 |
+
|
473 |
+
|
474 |
+
with cf.config_prefix("mode"):
|
475 |
+
cf.register_option(
|
476 |
+
"copy_on_write",
|
477 |
+
# Get the default from an environment variable, if set, otherwise defaults
|
478 |
+
# to False. This environment variable can be set for testing.
|
479 |
+
"warn"
|
480 |
+
if os.environ.get("PANDAS_COPY_ON_WRITE", "0") == "warn"
|
481 |
+
else os.environ.get("PANDAS_COPY_ON_WRITE", "0") == "1",
|
482 |
+
copy_on_write_doc,
|
483 |
+
validator=is_one_of_factory([True, False, "warn"]),
|
484 |
+
)
|
485 |
+
|
486 |
+
|
487 |
+
# user warnings
|
488 |
+
chained_assignment = """
|
489 |
+
: string
|
490 |
+
Raise an exception, warn, or no action if trying to use chained assignment,
|
491 |
+
The default is warn
|
492 |
+
"""
|
493 |
+
|
494 |
+
with cf.config_prefix("mode"):
|
495 |
+
cf.register_option(
|
496 |
+
"chained_assignment",
|
497 |
+
"warn",
|
498 |
+
chained_assignment,
|
499 |
+
validator=is_one_of_factory([None, "warn", "raise"]),
|
500 |
+
)
|
501 |
+
|
502 |
+
|
503 |
+
string_storage_doc = """
|
504 |
+
: string
|
505 |
+
The default storage for StringDtype. This option is ignored if
|
506 |
+
``future.infer_string`` is set to True.
|
507 |
+
"""
|
508 |
+
|
509 |
+
with cf.config_prefix("mode"):
|
510 |
+
cf.register_option(
|
511 |
+
"string_storage",
|
512 |
+
"python",
|
513 |
+
string_storage_doc,
|
514 |
+
validator=is_one_of_factory(["python", "pyarrow", "pyarrow_numpy"]),
|
515 |
+
)
|
516 |
+
|
517 |
+
|
518 |
+
# Set up the io.excel specific reader configuration.
|
519 |
+
reader_engine_doc = """
|
520 |
+
: string
|
521 |
+
The default Excel reader engine for '{ext}' files. Available options:
|
522 |
+
auto, {others}.
|
523 |
+
"""
|
524 |
+
|
525 |
+
_xls_options = ["xlrd", "calamine"]
|
526 |
+
_xlsm_options = ["xlrd", "openpyxl", "calamine"]
|
527 |
+
_xlsx_options = ["xlrd", "openpyxl", "calamine"]
|
528 |
+
_ods_options = ["odf", "calamine"]
|
529 |
+
_xlsb_options = ["pyxlsb", "calamine"]
|
530 |
+
|
531 |
+
|
532 |
+
with cf.config_prefix("io.excel.xls"):
|
533 |
+
cf.register_option(
|
534 |
+
"reader",
|
535 |
+
"auto",
|
536 |
+
reader_engine_doc.format(ext="xls", others=", ".join(_xls_options)),
|
537 |
+
validator=is_one_of_factory(_xls_options + ["auto"]),
|
538 |
+
)
|
539 |
+
|
540 |
+
with cf.config_prefix("io.excel.xlsm"):
|
541 |
+
cf.register_option(
|
542 |
+
"reader",
|
543 |
+
"auto",
|
544 |
+
reader_engine_doc.format(ext="xlsm", others=", ".join(_xlsm_options)),
|
545 |
+
validator=is_one_of_factory(_xlsm_options + ["auto"]),
|
546 |
+
)
|
547 |
+
|
548 |
+
|
549 |
+
with cf.config_prefix("io.excel.xlsx"):
|
550 |
+
cf.register_option(
|
551 |
+
"reader",
|
552 |
+
"auto",
|
553 |
+
reader_engine_doc.format(ext="xlsx", others=", ".join(_xlsx_options)),
|
554 |
+
validator=is_one_of_factory(_xlsx_options + ["auto"]),
|
555 |
+
)
|
556 |
+
|
557 |
+
|
558 |
+
with cf.config_prefix("io.excel.ods"):
|
559 |
+
cf.register_option(
|
560 |
+
"reader",
|
561 |
+
"auto",
|
562 |
+
reader_engine_doc.format(ext="ods", others=", ".join(_ods_options)),
|
563 |
+
validator=is_one_of_factory(_ods_options + ["auto"]),
|
564 |
+
)
|
565 |
+
|
566 |
+
with cf.config_prefix("io.excel.xlsb"):
|
567 |
+
cf.register_option(
|
568 |
+
"reader",
|
569 |
+
"auto",
|
570 |
+
reader_engine_doc.format(ext="xlsb", others=", ".join(_xlsb_options)),
|
571 |
+
validator=is_one_of_factory(_xlsb_options + ["auto"]),
|
572 |
+
)
|
573 |
+
|
574 |
+
# Set up the io.excel specific writer configuration.
|
575 |
+
writer_engine_doc = """
|
576 |
+
: string
|
577 |
+
The default Excel writer engine for '{ext}' files. Available options:
|
578 |
+
auto, {others}.
|
579 |
+
"""
|
580 |
+
|
581 |
+
_xlsm_options = ["openpyxl"]
|
582 |
+
_xlsx_options = ["openpyxl", "xlsxwriter"]
|
583 |
+
_ods_options = ["odf"]
|
584 |
+
|
585 |
+
|
586 |
+
with cf.config_prefix("io.excel.xlsm"):
|
587 |
+
cf.register_option(
|
588 |
+
"writer",
|
589 |
+
"auto",
|
590 |
+
writer_engine_doc.format(ext="xlsm", others=", ".join(_xlsm_options)),
|
591 |
+
validator=str,
|
592 |
+
)
|
593 |
+
|
594 |
+
|
595 |
+
with cf.config_prefix("io.excel.xlsx"):
|
596 |
+
cf.register_option(
|
597 |
+
"writer",
|
598 |
+
"auto",
|
599 |
+
writer_engine_doc.format(ext="xlsx", others=", ".join(_xlsx_options)),
|
600 |
+
validator=str,
|
601 |
+
)
|
602 |
+
|
603 |
+
|
604 |
+
with cf.config_prefix("io.excel.ods"):
|
605 |
+
cf.register_option(
|
606 |
+
"writer",
|
607 |
+
"auto",
|
608 |
+
writer_engine_doc.format(ext="ods", others=", ".join(_ods_options)),
|
609 |
+
validator=str,
|
610 |
+
)
|
611 |
+
|
612 |
+
|
613 |
+
# Set up the io.parquet specific configuration.
|
614 |
+
parquet_engine_doc = """
|
615 |
+
: string
|
616 |
+
The default parquet reader/writer engine. Available options:
|
617 |
+
'auto', 'pyarrow', 'fastparquet', the default is 'auto'
|
618 |
+
"""
|
619 |
+
|
620 |
+
with cf.config_prefix("io.parquet"):
|
621 |
+
cf.register_option(
|
622 |
+
"engine",
|
623 |
+
"auto",
|
624 |
+
parquet_engine_doc,
|
625 |
+
validator=is_one_of_factory(["auto", "pyarrow", "fastparquet"]),
|
626 |
+
)
|
627 |
+
|
628 |
+
|
629 |
+
# Set up the io.sql specific configuration.
|
630 |
+
sql_engine_doc = """
|
631 |
+
: string
|
632 |
+
The default sql reader/writer engine. Available options:
|
633 |
+
'auto', 'sqlalchemy', the default is 'auto'
|
634 |
+
"""
|
635 |
+
|
636 |
+
with cf.config_prefix("io.sql"):
|
637 |
+
cf.register_option(
|
638 |
+
"engine",
|
639 |
+
"auto",
|
640 |
+
sql_engine_doc,
|
641 |
+
validator=is_one_of_factory(["auto", "sqlalchemy"]),
|
642 |
+
)
|
643 |
+
|
644 |
+
# --------
|
645 |
+
# Plotting
|
646 |
+
# ---------
|
647 |
+
|
648 |
+
plotting_backend_doc = """
|
649 |
+
: str
|
650 |
+
The plotting backend to use. The default value is "matplotlib", the
|
651 |
+
backend provided with pandas. Other backends can be specified by
|
652 |
+
providing the name of the module that implements the backend.
|
653 |
+
"""
|
654 |
+
|
655 |
+
|
656 |
+
def register_plotting_backend_cb(key) -> None:
|
657 |
+
if key == "matplotlib":
|
658 |
+
# We defer matplotlib validation, since it's the default
|
659 |
+
return
|
660 |
+
from pandas.plotting._core import _get_plot_backend
|
661 |
+
|
662 |
+
_get_plot_backend(key)
|
663 |
+
|
664 |
+
|
665 |
+
with cf.config_prefix("plotting"):
|
666 |
+
cf.register_option(
|
667 |
+
"backend",
|
668 |
+
defval="matplotlib",
|
669 |
+
doc=plotting_backend_doc,
|
670 |
+
validator=register_plotting_backend_cb,
|
671 |
+
)
|
672 |
+
|
673 |
+
|
674 |
+
register_converter_doc = """
|
675 |
+
: bool or 'auto'.
|
676 |
+
Whether to register converters with matplotlib's units registry for
|
677 |
+
dates, times, datetimes, and Periods. Toggling to False will remove
|
678 |
+
the converters, restoring any converters that pandas overwrote.
|
679 |
+
"""
|
680 |
+
|
681 |
+
|
682 |
+
def register_converter_cb(key) -> None:
|
683 |
+
from pandas.plotting import (
|
684 |
+
deregister_matplotlib_converters,
|
685 |
+
register_matplotlib_converters,
|
686 |
+
)
|
687 |
+
|
688 |
+
if cf.get_option(key):
|
689 |
+
register_matplotlib_converters()
|
690 |
+
else:
|
691 |
+
deregister_matplotlib_converters()
|
692 |
+
|
693 |
+
|
694 |
+
with cf.config_prefix("plotting.matplotlib"):
|
695 |
+
cf.register_option(
|
696 |
+
"register_converters",
|
697 |
+
"auto",
|
698 |
+
register_converter_doc,
|
699 |
+
validator=is_one_of_factory(["auto", True, False]),
|
700 |
+
cb=register_converter_cb,
|
701 |
+
)
|
702 |
+
|
703 |
+
# ------
|
704 |
+
# Styler
|
705 |
+
# ------
|
706 |
+
|
707 |
+
styler_sparse_index_doc = """
|
708 |
+
: bool
|
709 |
+
Whether to sparsify the display of a hierarchical index. Setting to False will
|
710 |
+
display each explicit level element in a hierarchical key for each row.
|
711 |
+
"""
|
712 |
+
|
713 |
+
styler_sparse_columns_doc = """
|
714 |
+
: bool
|
715 |
+
Whether to sparsify the display of hierarchical columns. Setting to False will
|
716 |
+
display each explicit level element in a hierarchical key for each column.
|
717 |
+
"""
|
718 |
+
|
719 |
+
styler_render_repr = """
|
720 |
+
: str
|
721 |
+
Determine which output to use in Jupyter Notebook in {"html", "latex"}.
|
722 |
+
"""
|
723 |
+
|
724 |
+
styler_max_elements = """
|
725 |
+
: int
|
726 |
+
The maximum number of data-cell (<td>) elements that will be rendered before
|
727 |
+
trimming will occur over columns, rows or both if needed.
|
728 |
+
"""
|
729 |
+
|
730 |
+
styler_max_rows = """
|
731 |
+
: int, optional
|
732 |
+
The maximum number of rows that will be rendered. May still be reduced to
|
733 |
+
satisfy ``max_elements``, which takes precedence.
|
734 |
+
"""
|
735 |
+
|
736 |
+
styler_max_columns = """
|
737 |
+
: int, optional
|
738 |
+
The maximum number of columns that will be rendered. May still be reduced to
|
739 |
+
satisfy ``max_elements``, which takes precedence.
|
740 |
+
"""
|
741 |
+
|
742 |
+
styler_precision = """
|
743 |
+
: int
|
744 |
+
The precision for floats and complex numbers.
|
745 |
+
"""
|
746 |
+
|
747 |
+
styler_decimal = """
|
748 |
+
: str
|
749 |
+
The character representation for the decimal separator for floats and complex.
|
750 |
+
"""
|
751 |
+
|
752 |
+
styler_thousands = """
|
753 |
+
: str, optional
|
754 |
+
The character representation for thousands separator for floats, int and complex.
|
755 |
+
"""
|
756 |
+
|
757 |
+
styler_na_rep = """
|
758 |
+
: str, optional
|
759 |
+
The string representation for values identified as missing.
|
760 |
+
"""
|
761 |
+
|
762 |
+
styler_escape = """
|
763 |
+
: str, optional
|
764 |
+
Whether to escape certain characters according to the given context; html or latex.
|
765 |
+
"""
|
766 |
+
|
767 |
+
styler_formatter = """
|
768 |
+
: str, callable, dict, optional
|
769 |
+
A formatter object to be used as default within ``Styler.format``.
|
770 |
+
"""
|
771 |
+
|
772 |
+
styler_multirow_align = """
|
773 |
+
: {"c", "t", "b"}
|
774 |
+
The specifier for vertical alignment of sparsified LaTeX multirows.
|
775 |
+
"""
|
776 |
+
|
777 |
+
styler_multicol_align = r"""
|
778 |
+
: {"r", "c", "l", "naive-l", "naive-r"}
|
779 |
+
The specifier for horizontal alignment of sparsified LaTeX multicolumns. Pipe
|
780 |
+
decorators can also be added to non-naive values to draw vertical
|
781 |
+
rules, e.g. "\|r" will draw a rule on the left side of right aligned merged cells.
|
782 |
+
"""
|
783 |
+
|
784 |
+
styler_hrules = """
|
785 |
+
: bool
|
786 |
+
Whether to add horizontal rules on top and bottom and below the headers.
|
787 |
+
"""
|
788 |
+
|
789 |
+
styler_environment = """
|
790 |
+
: str
|
791 |
+
The environment to replace ``\\begin{table}``. If "longtable" is used results
|
792 |
+
in a specific longtable environment format.
|
793 |
+
"""
|
794 |
+
|
795 |
+
styler_encoding = """
|
796 |
+
: str
|
797 |
+
The encoding used for output HTML and LaTeX files.
|
798 |
+
"""
|
799 |
+
|
800 |
+
styler_mathjax = """
|
801 |
+
: bool
|
802 |
+
If False will render special CSS classes to table attributes that indicate Mathjax
|
803 |
+
will not be used in Jupyter Notebook.
|
804 |
+
"""
|
805 |
+
|
806 |
+
with cf.config_prefix("styler"):
|
807 |
+
cf.register_option("sparse.index", True, styler_sparse_index_doc, validator=is_bool)
|
808 |
+
|
809 |
+
cf.register_option(
|
810 |
+
"sparse.columns", True, styler_sparse_columns_doc, validator=is_bool
|
811 |
+
)
|
812 |
+
|
813 |
+
cf.register_option(
|
814 |
+
"render.repr",
|
815 |
+
"html",
|
816 |
+
styler_render_repr,
|
817 |
+
validator=is_one_of_factory(["html", "latex"]),
|
818 |
+
)
|
819 |
+
|
820 |
+
cf.register_option(
|
821 |
+
"render.max_elements",
|
822 |
+
2**18,
|
823 |
+
styler_max_elements,
|
824 |
+
validator=is_nonnegative_int,
|
825 |
+
)
|
826 |
+
|
827 |
+
cf.register_option(
|
828 |
+
"render.max_rows",
|
829 |
+
None,
|
830 |
+
styler_max_rows,
|
831 |
+
validator=is_nonnegative_int,
|
832 |
+
)
|
833 |
+
|
834 |
+
cf.register_option(
|
835 |
+
"render.max_columns",
|
836 |
+
None,
|
837 |
+
styler_max_columns,
|
838 |
+
validator=is_nonnegative_int,
|
839 |
+
)
|
840 |
+
|
841 |
+
cf.register_option("render.encoding", "utf-8", styler_encoding, validator=is_str)
|
842 |
+
|
843 |
+
cf.register_option("format.decimal", ".", styler_decimal, validator=is_str)
|
844 |
+
|
845 |
+
cf.register_option(
|
846 |
+
"format.precision", 6, styler_precision, validator=is_nonnegative_int
|
847 |
+
)
|
848 |
+
|
849 |
+
cf.register_option(
|
850 |
+
"format.thousands",
|
851 |
+
None,
|
852 |
+
styler_thousands,
|
853 |
+
validator=is_instance_factory([type(None), str]),
|
854 |
+
)
|
855 |
+
|
856 |
+
cf.register_option(
|
857 |
+
"format.na_rep",
|
858 |
+
None,
|
859 |
+
styler_na_rep,
|
860 |
+
validator=is_instance_factory([type(None), str]),
|
861 |
+
)
|
862 |
+
|
863 |
+
cf.register_option(
|
864 |
+
"format.escape",
|
865 |
+
None,
|
866 |
+
styler_escape,
|
867 |
+
validator=is_one_of_factory([None, "html", "latex", "latex-math"]),
|
868 |
+
)
|
869 |
+
|
870 |
+
cf.register_option(
|
871 |
+
"format.formatter",
|
872 |
+
None,
|
873 |
+
styler_formatter,
|
874 |
+
validator=is_instance_factory([type(None), dict, Callable, str]),
|
875 |
+
)
|
876 |
+
|
877 |
+
cf.register_option("html.mathjax", True, styler_mathjax, validator=is_bool)
|
878 |
+
|
879 |
+
cf.register_option(
|
880 |
+
"latex.multirow_align",
|
881 |
+
"c",
|
882 |
+
styler_multirow_align,
|
883 |
+
validator=is_one_of_factory(["c", "t", "b", "naive"]),
|
884 |
+
)
|
885 |
+
|
886 |
+
val_mca = ["r", "|r|", "|r", "r|", "c", "|c|", "|c", "c|", "l", "|l|", "|l", "l|"]
|
887 |
+
val_mca += ["naive-l", "naive-r"]
|
888 |
+
cf.register_option(
|
889 |
+
"latex.multicol_align",
|
890 |
+
"r",
|
891 |
+
styler_multicol_align,
|
892 |
+
validator=is_one_of_factory(val_mca),
|
893 |
+
)
|
894 |
+
|
895 |
+
cf.register_option("latex.hrules", False, styler_hrules, validator=is_bool)
|
896 |
+
|
897 |
+
cf.register_option(
|
898 |
+
"latex.environment",
|
899 |
+
None,
|
900 |
+
styler_environment,
|
901 |
+
validator=is_instance_factory([type(None), str]),
|
902 |
+
)
|
903 |
+
|
904 |
+
|
905 |
+
with cf.config_prefix("future"):
|
906 |
+
cf.register_option(
|
907 |
+
"infer_string",
|
908 |
+
False,
|
909 |
+
"Whether to infer sequence of str objects as pyarrow string "
|
910 |
+
"dtype, which will be the default in pandas 3.0 "
|
911 |
+
"(at which point this option will be deprecated).",
|
912 |
+
validator=is_one_of_factory([True, False]),
|
913 |
+
)
|
914 |
+
|
915 |
+
cf.register_option(
|
916 |
+
"no_silent_downcasting",
|
917 |
+
False,
|
918 |
+
"Whether to opt-in to the future behavior which will *not* silently "
|
919 |
+
"downcast results from Series and DataFrame `where`, `mask`, and `clip` "
|
920 |
+
"methods. "
|
921 |
+
"Silent downcasting will be removed in pandas 3.0 "
|
922 |
+
"(at which point this option will be deprecated).",
|
923 |
+
validator=is_one_of_factory([True, False]),
|
924 |
+
)
|
llmeval-env/lib/python3.10/site-packages/pandas/core/construction.py
ADDED
@@ -0,0 +1,824 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
"""
|
2 |
+
Constructor functions intended to be shared by pd.array, Series.__init__,
|
3 |
+
and Index.__new__.
|
4 |
+
|
5 |
+
These should not depend on core.internals.
|
6 |
+
"""
|
7 |
+
from __future__ import annotations
|
8 |
+
|
9 |
+
from collections.abc import Sequence
|
10 |
+
from typing import (
|
11 |
+
TYPE_CHECKING,
|
12 |
+
Optional,
|
13 |
+
Union,
|
14 |
+
cast,
|
15 |
+
overload,
|
16 |
+
)
|
17 |
+
import warnings
|
18 |
+
|
19 |
+
import numpy as np
|
20 |
+
from numpy import ma
|
21 |
+
|
22 |
+
from pandas._config import using_pyarrow_string_dtype
|
23 |
+
|
24 |
+
from pandas._libs import lib
|
25 |
+
from pandas._libs.tslibs import (
|
26 |
+
Period,
|
27 |
+
get_supported_dtype,
|
28 |
+
is_supported_dtype,
|
29 |
+
)
|
30 |
+
from pandas._typing import (
|
31 |
+
AnyArrayLike,
|
32 |
+
ArrayLike,
|
33 |
+
Dtype,
|
34 |
+
DtypeObj,
|
35 |
+
T,
|
36 |
+
)
|
37 |
+
from pandas.util._exceptions import find_stack_level
|
38 |
+
|
39 |
+
from pandas.core.dtypes.base import ExtensionDtype
|
40 |
+
from pandas.core.dtypes.cast import (
|
41 |
+
construct_1d_arraylike_from_scalar,
|
42 |
+
construct_1d_object_array_from_listlike,
|
43 |
+
maybe_cast_to_datetime,
|
44 |
+
maybe_cast_to_integer_array,
|
45 |
+
maybe_convert_platform,
|
46 |
+
maybe_infer_to_datetimelike,
|
47 |
+
maybe_promote,
|
48 |
+
)
|
49 |
+
from pandas.core.dtypes.common import (
|
50 |
+
is_list_like,
|
51 |
+
is_object_dtype,
|
52 |
+
is_string_dtype,
|
53 |
+
pandas_dtype,
|
54 |
+
)
|
55 |
+
from pandas.core.dtypes.dtypes import NumpyEADtype
|
56 |
+
from pandas.core.dtypes.generic import (
|
57 |
+
ABCDataFrame,
|
58 |
+
ABCExtensionArray,
|
59 |
+
ABCIndex,
|
60 |
+
ABCSeries,
|
61 |
+
)
|
62 |
+
from pandas.core.dtypes.missing import isna
|
63 |
+
|
64 |
+
import pandas.core.common as com
|
65 |
+
|
66 |
+
if TYPE_CHECKING:
|
67 |
+
from pandas import (
|
68 |
+
Index,
|
69 |
+
Series,
|
70 |
+
)
|
71 |
+
from pandas.core.arrays.base import ExtensionArray
|
72 |
+
|
73 |
+
|
74 |
+
def array(
|
75 |
+
data: Sequence[object] | AnyArrayLike,
|
76 |
+
dtype: Dtype | None = None,
|
77 |
+
copy: bool = True,
|
78 |
+
) -> ExtensionArray:
|
79 |
+
"""
|
80 |
+
Create an array.
|
81 |
+
|
82 |
+
Parameters
|
83 |
+
----------
|
84 |
+
data : Sequence of objects
|
85 |
+
The scalars inside `data` should be instances of the
|
86 |
+
scalar type for `dtype`. It's expected that `data`
|
87 |
+
represents a 1-dimensional array of data.
|
88 |
+
|
89 |
+
When `data` is an Index or Series, the underlying array
|
90 |
+
will be extracted from `data`.
|
91 |
+
|
92 |
+
dtype : str, np.dtype, or ExtensionDtype, optional
|
93 |
+
The dtype to use for the array. This may be a NumPy
|
94 |
+
dtype or an extension type registered with pandas using
|
95 |
+
:meth:`pandas.api.extensions.register_extension_dtype`.
|
96 |
+
|
97 |
+
If not specified, there are two possibilities:
|
98 |
+
|
99 |
+
1. When `data` is a :class:`Series`, :class:`Index`, or
|
100 |
+
:class:`ExtensionArray`, the `dtype` will be taken
|
101 |
+
from the data.
|
102 |
+
2. Otherwise, pandas will attempt to infer the `dtype`
|
103 |
+
from the data.
|
104 |
+
|
105 |
+
Note that when `data` is a NumPy array, ``data.dtype`` is
|
106 |
+
*not* used for inferring the array type. This is because
|
107 |
+
NumPy cannot represent all the types of data that can be
|
108 |
+
held in extension arrays.
|
109 |
+
|
110 |
+
Currently, pandas will infer an extension dtype for sequences of
|
111 |
+
|
112 |
+
============================== =======================================
|
113 |
+
Scalar Type Array Type
|
114 |
+
============================== =======================================
|
115 |
+
:class:`pandas.Interval` :class:`pandas.arrays.IntervalArray`
|
116 |
+
:class:`pandas.Period` :class:`pandas.arrays.PeriodArray`
|
117 |
+
:class:`datetime.datetime` :class:`pandas.arrays.DatetimeArray`
|
118 |
+
:class:`datetime.timedelta` :class:`pandas.arrays.TimedeltaArray`
|
119 |
+
:class:`int` :class:`pandas.arrays.IntegerArray`
|
120 |
+
:class:`float` :class:`pandas.arrays.FloatingArray`
|
121 |
+
:class:`str` :class:`pandas.arrays.StringArray` or
|
122 |
+
:class:`pandas.arrays.ArrowStringArray`
|
123 |
+
:class:`bool` :class:`pandas.arrays.BooleanArray`
|
124 |
+
============================== =======================================
|
125 |
+
|
126 |
+
The ExtensionArray created when the scalar type is :class:`str` is determined by
|
127 |
+
``pd.options.mode.string_storage`` if the dtype is not explicitly given.
|
128 |
+
|
129 |
+
For all other cases, NumPy's usual inference rules will be used.
|
130 |
+
copy : bool, default True
|
131 |
+
Whether to copy the data, even if not necessary. Depending
|
132 |
+
on the type of `data`, creating the new array may require
|
133 |
+
copying data, even if ``copy=False``.
|
134 |
+
|
135 |
+
Returns
|
136 |
+
-------
|
137 |
+
ExtensionArray
|
138 |
+
The newly created array.
|
139 |
+
|
140 |
+
Raises
|
141 |
+
------
|
142 |
+
ValueError
|
143 |
+
When `data` is not 1-dimensional.
|
144 |
+
|
145 |
+
See Also
|
146 |
+
--------
|
147 |
+
numpy.array : Construct a NumPy array.
|
148 |
+
Series : Construct a pandas Series.
|
149 |
+
Index : Construct a pandas Index.
|
150 |
+
arrays.NumpyExtensionArray : ExtensionArray wrapping a NumPy array.
|
151 |
+
Series.array : Extract the array stored within a Series.
|
152 |
+
|
153 |
+
Notes
|
154 |
+
-----
|
155 |
+
Omitting the `dtype` argument means pandas will attempt to infer the
|
156 |
+
best array type from the values in the data. As new array types are
|
157 |
+
added by pandas and 3rd party libraries, the "best" array type may
|
158 |
+
change. We recommend specifying `dtype` to ensure that
|
159 |
+
|
160 |
+
1. the correct array type for the data is returned
|
161 |
+
2. the returned array type doesn't change as new extension types
|
162 |
+
are added by pandas and third-party libraries
|
163 |
+
|
164 |
+
Additionally, if the underlying memory representation of the returned
|
165 |
+
array matters, we recommend specifying the `dtype` as a concrete object
|
166 |
+
rather than a string alias or allowing it to be inferred. For example,
|
167 |
+
a future version of pandas or a 3rd-party library may include a
|
168 |
+
dedicated ExtensionArray for string data. In this event, the following
|
169 |
+
would no longer return a :class:`arrays.NumpyExtensionArray` backed by a
|
170 |
+
NumPy array.
|
171 |
+
|
172 |
+
>>> pd.array(['a', 'b'], dtype=str)
|
173 |
+
<NumpyExtensionArray>
|
174 |
+
['a', 'b']
|
175 |
+
Length: 2, dtype: str32
|
176 |
+
|
177 |
+
This would instead return the new ExtensionArray dedicated for string
|
178 |
+
data. If you really need the new array to be backed by a NumPy array,
|
179 |
+
specify that in the dtype.
|
180 |
+
|
181 |
+
>>> pd.array(['a', 'b'], dtype=np.dtype("<U1"))
|
182 |
+
<NumpyExtensionArray>
|
183 |
+
['a', 'b']
|
184 |
+
Length: 2, dtype: str32
|
185 |
+
|
186 |
+
Finally, Pandas has arrays that mostly overlap with NumPy
|
187 |
+
|
188 |
+
* :class:`arrays.DatetimeArray`
|
189 |
+
* :class:`arrays.TimedeltaArray`
|
190 |
+
|
191 |
+
When data with a ``datetime64[ns]`` or ``timedelta64[ns]`` dtype is
|
192 |
+
passed, pandas will always return a ``DatetimeArray`` or ``TimedeltaArray``
|
193 |
+
rather than a ``NumpyExtensionArray``. This is for symmetry with the case of
|
194 |
+
timezone-aware data, which NumPy does not natively support.
|
195 |
+
|
196 |
+
>>> pd.array(['2015', '2016'], dtype='datetime64[ns]')
|
197 |
+
<DatetimeArray>
|
198 |
+
['2015-01-01 00:00:00', '2016-01-01 00:00:00']
|
199 |
+
Length: 2, dtype: datetime64[ns]
|
200 |
+
|
201 |
+
>>> pd.array(["1h", "2h"], dtype='timedelta64[ns]')
|
202 |
+
<TimedeltaArray>
|
203 |
+
['0 days 01:00:00', '0 days 02:00:00']
|
204 |
+
Length: 2, dtype: timedelta64[ns]
|
205 |
+
|
206 |
+
Examples
|
207 |
+
--------
|
208 |
+
If a dtype is not specified, pandas will infer the best dtype from the values.
|
209 |
+
See the description of `dtype` for the types pandas infers for.
|
210 |
+
|
211 |
+
>>> pd.array([1, 2])
|
212 |
+
<IntegerArray>
|
213 |
+
[1, 2]
|
214 |
+
Length: 2, dtype: Int64
|
215 |
+
|
216 |
+
>>> pd.array([1, 2, np.nan])
|
217 |
+
<IntegerArray>
|
218 |
+
[1, 2, <NA>]
|
219 |
+
Length: 3, dtype: Int64
|
220 |
+
|
221 |
+
>>> pd.array([1.1, 2.2])
|
222 |
+
<FloatingArray>
|
223 |
+
[1.1, 2.2]
|
224 |
+
Length: 2, dtype: Float64
|
225 |
+
|
226 |
+
>>> pd.array(["a", None, "c"])
|
227 |
+
<StringArray>
|
228 |
+
['a', <NA>, 'c']
|
229 |
+
Length: 3, dtype: string
|
230 |
+
|
231 |
+
>>> with pd.option_context("string_storage", "pyarrow"):
|
232 |
+
... arr = pd.array(["a", None, "c"])
|
233 |
+
...
|
234 |
+
>>> arr
|
235 |
+
<ArrowStringArray>
|
236 |
+
['a', <NA>, 'c']
|
237 |
+
Length: 3, dtype: string
|
238 |
+
|
239 |
+
>>> pd.array([pd.Period('2000', freq="D"), pd.Period("2000", freq="D")])
|
240 |
+
<PeriodArray>
|
241 |
+
['2000-01-01', '2000-01-01']
|
242 |
+
Length: 2, dtype: period[D]
|
243 |
+
|
244 |
+
You can use the string alias for `dtype`
|
245 |
+
|
246 |
+
>>> pd.array(['a', 'b', 'a'], dtype='category')
|
247 |
+
['a', 'b', 'a']
|
248 |
+
Categories (2, object): ['a', 'b']
|
249 |
+
|
250 |
+
Or specify the actual dtype
|
251 |
+
|
252 |
+
>>> pd.array(['a', 'b', 'a'],
|
253 |
+
... dtype=pd.CategoricalDtype(['a', 'b', 'c'], ordered=True))
|
254 |
+
['a', 'b', 'a']
|
255 |
+
Categories (3, object): ['a' < 'b' < 'c']
|
256 |
+
|
257 |
+
If pandas does not infer a dedicated extension type a
|
258 |
+
:class:`arrays.NumpyExtensionArray` is returned.
|
259 |
+
|
260 |
+
>>> pd.array([1 + 1j, 3 + 2j])
|
261 |
+
<NumpyExtensionArray>
|
262 |
+
[(1+1j), (3+2j)]
|
263 |
+
Length: 2, dtype: complex128
|
264 |
+
|
265 |
+
As mentioned in the "Notes" section, new extension types may be added
|
266 |
+
in the future (by pandas or 3rd party libraries), causing the return
|
267 |
+
value to no longer be a :class:`arrays.NumpyExtensionArray`. Specify the
|
268 |
+
`dtype` as a NumPy dtype if you need to ensure there's no future change in
|
269 |
+
behavior.
|
270 |
+
|
271 |
+
>>> pd.array([1, 2], dtype=np.dtype("int32"))
|
272 |
+
<NumpyExtensionArray>
|
273 |
+
[1, 2]
|
274 |
+
Length: 2, dtype: int32
|
275 |
+
|
276 |
+
`data` must be 1-dimensional. A ValueError is raised when the input
|
277 |
+
has the wrong dimensionality.
|
278 |
+
|
279 |
+
>>> pd.array(1)
|
280 |
+
Traceback (most recent call last):
|
281 |
+
...
|
282 |
+
ValueError: Cannot pass scalar '1' to 'pandas.array'.
|
283 |
+
"""
|
284 |
+
from pandas.core.arrays import (
|
285 |
+
BooleanArray,
|
286 |
+
DatetimeArray,
|
287 |
+
ExtensionArray,
|
288 |
+
FloatingArray,
|
289 |
+
IntegerArray,
|
290 |
+
IntervalArray,
|
291 |
+
NumpyExtensionArray,
|
292 |
+
PeriodArray,
|
293 |
+
TimedeltaArray,
|
294 |
+
)
|
295 |
+
from pandas.core.arrays.string_ import StringDtype
|
296 |
+
|
297 |
+
if lib.is_scalar(data):
|
298 |
+
msg = f"Cannot pass scalar '{data}' to 'pandas.array'."
|
299 |
+
raise ValueError(msg)
|
300 |
+
elif isinstance(data, ABCDataFrame):
|
301 |
+
raise TypeError("Cannot pass DataFrame to 'pandas.array'")
|
302 |
+
|
303 |
+
if dtype is None and isinstance(data, (ABCSeries, ABCIndex, ExtensionArray)):
|
304 |
+
# Note: we exclude np.ndarray here, will do type inference on it
|
305 |
+
dtype = data.dtype
|
306 |
+
|
307 |
+
data = extract_array(data, extract_numpy=True)
|
308 |
+
|
309 |
+
# this returns None for not-found dtypes.
|
310 |
+
if dtype is not None:
|
311 |
+
dtype = pandas_dtype(dtype)
|
312 |
+
|
313 |
+
if isinstance(data, ExtensionArray) and (dtype is None or data.dtype == dtype):
|
314 |
+
# e.g. TimedeltaArray[s], avoid casting to NumpyExtensionArray
|
315 |
+
if copy:
|
316 |
+
return data.copy()
|
317 |
+
return data
|
318 |
+
|
319 |
+
if isinstance(dtype, ExtensionDtype):
|
320 |
+
cls = dtype.construct_array_type()
|
321 |
+
return cls._from_sequence(data, dtype=dtype, copy=copy)
|
322 |
+
|
323 |
+
if dtype is None:
|
324 |
+
inferred_dtype = lib.infer_dtype(data, skipna=True)
|
325 |
+
if inferred_dtype == "period":
|
326 |
+
period_data = cast(Union[Sequence[Optional[Period]], AnyArrayLike], data)
|
327 |
+
return PeriodArray._from_sequence(period_data, copy=copy)
|
328 |
+
|
329 |
+
elif inferred_dtype == "interval":
|
330 |
+
return IntervalArray(data, copy=copy)
|
331 |
+
|
332 |
+
elif inferred_dtype.startswith("datetime"):
|
333 |
+
# datetime, datetime64
|
334 |
+
try:
|
335 |
+
return DatetimeArray._from_sequence(data, copy=copy)
|
336 |
+
except ValueError:
|
337 |
+
# Mixture of timezones, fall back to NumpyExtensionArray
|
338 |
+
pass
|
339 |
+
|
340 |
+
elif inferred_dtype.startswith("timedelta"):
|
341 |
+
# timedelta, timedelta64
|
342 |
+
return TimedeltaArray._from_sequence(data, copy=copy)
|
343 |
+
|
344 |
+
elif inferred_dtype == "string":
|
345 |
+
# StringArray/ArrowStringArray depending on pd.options.mode.string_storage
|
346 |
+
dtype = StringDtype()
|
347 |
+
cls = dtype.construct_array_type()
|
348 |
+
return cls._from_sequence(data, dtype=dtype, copy=copy)
|
349 |
+
|
350 |
+
elif inferred_dtype == "integer":
|
351 |
+
return IntegerArray._from_sequence(data, copy=copy)
|
352 |
+
elif inferred_dtype == "empty" and not hasattr(data, "dtype") and not len(data):
|
353 |
+
return FloatingArray._from_sequence(data, copy=copy)
|
354 |
+
elif (
|
355 |
+
inferred_dtype in ("floating", "mixed-integer-float")
|
356 |
+
and getattr(data, "dtype", None) != np.float16
|
357 |
+
):
|
358 |
+
# GH#44715 Exclude np.float16 bc FloatingArray does not support it;
|
359 |
+
# we will fall back to NumpyExtensionArray.
|
360 |
+
return FloatingArray._from_sequence(data, copy=copy)
|
361 |
+
|
362 |
+
elif inferred_dtype == "boolean":
|
363 |
+
return BooleanArray._from_sequence(data, dtype="boolean", copy=copy)
|
364 |
+
|
365 |
+
# Pandas overrides NumPy for
|
366 |
+
# 1. datetime64[ns,us,ms,s]
|
367 |
+
# 2. timedelta64[ns,us,ms,s]
|
368 |
+
# so that a DatetimeArray is returned.
|
369 |
+
if lib.is_np_dtype(dtype, "M") and is_supported_dtype(dtype):
|
370 |
+
return DatetimeArray._from_sequence(data, dtype=dtype, copy=copy)
|
371 |
+
if lib.is_np_dtype(dtype, "m") and is_supported_dtype(dtype):
|
372 |
+
return TimedeltaArray._from_sequence(data, dtype=dtype, copy=copy)
|
373 |
+
|
374 |
+
elif lib.is_np_dtype(dtype, "mM"):
|
375 |
+
warnings.warn(
|
376 |
+
r"datetime64 and timedelta64 dtype resolutions other than "
|
377 |
+
r"'s', 'ms', 'us', and 'ns' are deprecated. "
|
378 |
+
r"In future releases passing unsupported resolutions will "
|
379 |
+
r"raise an exception.",
|
380 |
+
FutureWarning,
|
381 |
+
stacklevel=find_stack_level(),
|
382 |
+
)
|
383 |
+
|
384 |
+
return NumpyExtensionArray._from_sequence(data, dtype=dtype, copy=copy)
|
385 |
+
|
386 |
+
|
387 |
+
_typs = frozenset(
|
388 |
+
{
|
389 |
+
"index",
|
390 |
+
"rangeindex",
|
391 |
+
"multiindex",
|
392 |
+
"datetimeindex",
|
393 |
+
"timedeltaindex",
|
394 |
+
"periodindex",
|
395 |
+
"categoricalindex",
|
396 |
+
"intervalindex",
|
397 |
+
"series",
|
398 |
+
}
|
399 |
+
)
|
400 |
+
|
401 |
+
|
402 |
+
@overload
|
403 |
+
def extract_array(
|
404 |
+
obj: Series | Index, extract_numpy: bool = ..., extract_range: bool = ...
|
405 |
+
) -> ArrayLike:
|
406 |
+
...
|
407 |
+
|
408 |
+
|
409 |
+
@overload
|
410 |
+
def extract_array(
|
411 |
+
obj: T, extract_numpy: bool = ..., extract_range: bool = ...
|
412 |
+
) -> T | ArrayLike:
|
413 |
+
...
|
414 |
+
|
415 |
+
|
416 |
+
def extract_array(
|
417 |
+
obj: T, extract_numpy: bool = False, extract_range: bool = False
|
418 |
+
) -> T | ArrayLike:
|
419 |
+
"""
|
420 |
+
Extract the ndarray or ExtensionArray from a Series or Index.
|
421 |
+
|
422 |
+
For all other types, `obj` is just returned as is.
|
423 |
+
|
424 |
+
Parameters
|
425 |
+
----------
|
426 |
+
obj : object
|
427 |
+
For Series / Index, the underlying ExtensionArray is unboxed.
|
428 |
+
|
429 |
+
extract_numpy : bool, default False
|
430 |
+
Whether to extract the ndarray from a NumpyExtensionArray.
|
431 |
+
|
432 |
+
extract_range : bool, default False
|
433 |
+
If we have a RangeIndex, return range._values if True
|
434 |
+
(which is a materialized integer ndarray), otherwise return unchanged.
|
435 |
+
|
436 |
+
Returns
|
437 |
+
-------
|
438 |
+
arr : object
|
439 |
+
|
440 |
+
Examples
|
441 |
+
--------
|
442 |
+
>>> extract_array(pd.Series(['a', 'b', 'c'], dtype='category'))
|
443 |
+
['a', 'b', 'c']
|
444 |
+
Categories (3, object): ['a', 'b', 'c']
|
445 |
+
|
446 |
+
Other objects like lists, arrays, and DataFrames are just passed through.
|
447 |
+
|
448 |
+
>>> extract_array([1, 2, 3])
|
449 |
+
[1, 2, 3]
|
450 |
+
|
451 |
+
For an ndarray-backed Series / Index the ndarray is returned.
|
452 |
+
|
453 |
+
>>> extract_array(pd.Series([1, 2, 3]))
|
454 |
+
array([1, 2, 3])
|
455 |
+
|
456 |
+
To extract all the way down to the ndarray, pass ``extract_numpy=True``.
|
457 |
+
|
458 |
+
>>> extract_array(pd.Series([1, 2, 3]), extract_numpy=True)
|
459 |
+
array([1, 2, 3])
|
460 |
+
"""
|
461 |
+
typ = getattr(obj, "_typ", None)
|
462 |
+
if typ in _typs:
|
463 |
+
# i.e. isinstance(obj, (ABCIndex, ABCSeries))
|
464 |
+
if typ == "rangeindex":
|
465 |
+
if extract_range:
|
466 |
+
# error: "T" has no attribute "_values"
|
467 |
+
return obj._values # type: ignore[attr-defined]
|
468 |
+
return obj
|
469 |
+
|
470 |
+
# error: "T" has no attribute "_values"
|
471 |
+
return obj._values # type: ignore[attr-defined]
|
472 |
+
|
473 |
+
elif extract_numpy and typ == "npy_extension":
|
474 |
+
# i.e. isinstance(obj, ABCNumpyExtensionArray)
|
475 |
+
# error: "T" has no attribute "to_numpy"
|
476 |
+
return obj.to_numpy() # type: ignore[attr-defined]
|
477 |
+
|
478 |
+
return obj
|
479 |
+
|
480 |
+
|
481 |
+
def ensure_wrapped_if_datetimelike(arr):
|
482 |
+
"""
|
483 |
+
Wrap datetime64 and timedelta64 ndarrays in DatetimeArray/TimedeltaArray.
|
484 |
+
"""
|
485 |
+
if isinstance(arr, np.ndarray):
|
486 |
+
if arr.dtype.kind == "M":
|
487 |
+
from pandas.core.arrays import DatetimeArray
|
488 |
+
|
489 |
+
dtype = get_supported_dtype(arr.dtype)
|
490 |
+
return DatetimeArray._from_sequence(arr, dtype=dtype)
|
491 |
+
|
492 |
+
elif arr.dtype.kind == "m":
|
493 |
+
from pandas.core.arrays import TimedeltaArray
|
494 |
+
|
495 |
+
dtype = get_supported_dtype(arr.dtype)
|
496 |
+
return TimedeltaArray._from_sequence(arr, dtype=dtype)
|
497 |
+
|
498 |
+
return arr
|
499 |
+
|
500 |
+
|
501 |
+
def sanitize_masked_array(data: ma.MaskedArray) -> np.ndarray:
|
502 |
+
"""
|
503 |
+
Convert numpy MaskedArray to ensure mask is softened.
|
504 |
+
"""
|
505 |
+
mask = ma.getmaskarray(data)
|
506 |
+
if mask.any():
|
507 |
+
dtype, fill_value = maybe_promote(data.dtype, np.nan)
|
508 |
+
dtype = cast(np.dtype, dtype)
|
509 |
+
data = ma.asarray(data.astype(dtype, copy=True))
|
510 |
+
data.soften_mask() # set hardmask False if it was True
|
511 |
+
data[mask] = fill_value
|
512 |
+
else:
|
513 |
+
data = data.copy()
|
514 |
+
return data
|
515 |
+
|
516 |
+
|
517 |
+
def sanitize_array(
|
518 |
+
data,
|
519 |
+
index: Index | None,
|
520 |
+
dtype: DtypeObj | None = None,
|
521 |
+
copy: bool = False,
|
522 |
+
*,
|
523 |
+
allow_2d: bool = False,
|
524 |
+
) -> ArrayLike:
|
525 |
+
"""
|
526 |
+
Sanitize input data to an ndarray or ExtensionArray, copy if specified,
|
527 |
+
coerce to the dtype if specified.
|
528 |
+
|
529 |
+
Parameters
|
530 |
+
----------
|
531 |
+
data : Any
|
532 |
+
index : Index or None, default None
|
533 |
+
dtype : np.dtype, ExtensionDtype, or None, default None
|
534 |
+
copy : bool, default False
|
535 |
+
allow_2d : bool, default False
|
536 |
+
If False, raise if we have a 2D Arraylike.
|
537 |
+
|
538 |
+
Returns
|
539 |
+
-------
|
540 |
+
np.ndarray or ExtensionArray
|
541 |
+
"""
|
542 |
+
original_dtype = dtype
|
543 |
+
if isinstance(data, ma.MaskedArray):
|
544 |
+
data = sanitize_masked_array(data)
|
545 |
+
|
546 |
+
if isinstance(dtype, NumpyEADtype):
|
547 |
+
# Avoid ending up with a NumpyExtensionArray
|
548 |
+
dtype = dtype.numpy_dtype
|
549 |
+
|
550 |
+
object_index = False
|
551 |
+
if isinstance(data, ABCIndex) and data.dtype == object and dtype is None:
|
552 |
+
object_index = True
|
553 |
+
|
554 |
+
# extract ndarray or ExtensionArray, ensure we have no NumpyExtensionArray
|
555 |
+
data = extract_array(data, extract_numpy=True, extract_range=True)
|
556 |
+
|
557 |
+
if isinstance(data, np.ndarray) and data.ndim == 0:
|
558 |
+
if dtype is None:
|
559 |
+
dtype = data.dtype
|
560 |
+
data = lib.item_from_zerodim(data)
|
561 |
+
elif isinstance(data, range):
|
562 |
+
# GH#16804
|
563 |
+
data = range_to_ndarray(data)
|
564 |
+
copy = False
|
565 |
+
|
566 |
+
if not is_list_like(data):
|
567 |
+
if index is None:
|
568 |
+
raise ValueError("index must be specified when data is not list-like")
|
569 |
+
if (
|
570 |
+
isinstance(data, str)
|
571 |
+
and using_pyarrow_string_dtype()
|
572 |
+
and original_dtype is None
|
573 |
+
):
|
574 |
+
from pandas.core.arrays.string_ import StringDtype
|
575 |
+
|
576 |
+
dtype = StringDtype("pyarrow_numpy")
|
577 |
+
data = construct_1d_arraylike_from_scalar(data, len(index), dtype)
|
578 |
+
|
579 |
+
return data
|
580 |
+
|
581 |
+
elif isinstance(data, ABCExtensionArray):
|
582 |
+
# it is already ensured above this is not a NumpyExtensionArray
|
583 |
+
# Until GH#49309 is fixed this check needs to come before the
|
584 |
+
# ExtensionDtype check
|
585 |
+
if dtype is not None:
|
586 |
+
subarr = data.astype(dtype, copy=copy)
|
587 |
+
elif copy:
|
588 |
+
subarr = data.copy()
|
589 |
+
else:
|
590 |
+
subarr = data
|
591 |
+
|
592 |
+
elif isinstance(dtype, ExtensionDtype):
|
593 |
+
# create an extension array from its dtype
|
594 |
+
_sanitize_non_ordered(data)
|
595 |
+
cls = dtype.construct_array_type()
|
596 |
+
subarr = cls._from_sequence(data, dtype=dtype, copy=copy)
|
597 |
+
|
598 |
+
# GH#846
|
599 |
+
elif isinstance(data, np.ndarray):
|
600 |
+
if isinstance(data, np.matrix):
|
601 |
+
data = data.A
|
602 |
+
|
603 |
+
if dtype is None:
|
604 |
+
subarr = data
|
605 |
+
if data.dtype == object:
|
606 |
+
subarr = maybe_infer_to_datetimelike(data)
|
607 |
+
if (
|
608 |
+
object_index
|
609 |
+
and using_pyarrow_string_dtype()
|
610 |
+
and is_string_dtype(subarr)
|
611 |
+
):
|
612 |
+
# Avoid inference when string option is set
|
613 |
+
subarr = data
|
614 |
+
elif data.dtype.kind == "U" and using_pyarrow_string_dtype():
|
615 |
+
from pandas.core.arrays.string_ import StringDtype
|
616 |
+
|
617 |
+
dtype = StringDtype(storage="pyarrow_numpy")
|
618 |
+
subarr = dtype.construct_array_type()._from_sequence(data, dtype=dtype)
|
619 |
+
|
620 |
+
if subarr is data and copy:
|
621 |
+
subarr = subarr.copy()
|
622 |
+
|
623 |
+
else:
|
624 |
+
# we will try to copy by-definition here
|
625 |
+
subarr = _try_cast(data, dtype, copy)
|
626 |
+
|
627 |
+
elif hasattr(data, "__array__"):
|
628 |
+
# e.g. dask array GH#38645
|
629 |
+
if not copy:
|
630 |
+
data = np.asarray(data)
|
631 |
+
else:
|
632 |
+
data = np.array(data, copy=copy)
|
633 |
+
return sanitize_array(
|
634 |
+
data,
|
635 |
+
index=index,
|
636 |
+
dtype=dtype,
|
637 |
+
copy=False,
|
638 |
+
allow_2d=allow_2d,
|
639 |
+
)
|
640 |
+
|
641 |
+
else:
|
642 |
+
_sanitize_non_ordered(data)
|
643 |
+
# materialize e.g. generators, convert e.g. tuples, abc.ValueView
|
644 |
+
data = list(data)
|
645 |
+
|
646 |
+
if len(data) == 0 and dtype is None:
|
647 |
+
# We default to float64, matching numpy
|
648 |
+
subarr = np.array([], dtype=np.float64)
|
649 |
+
|
650 |
+
elif dtype is not None:
|
651 |
+
subarr = _try_cast(data, dtype, copy)
|
652 |
+
|
653 |
+
else:
|
654 |
+
subarr = maybe_convert_platform(data)
|
655 |
+
if subarr.dtype == object:
|
656 |
+
subarr = cast(np.ndarray, subarr)
|
657 |
+
subarr = maybe_infer_to_datetimelike(subarr)
|
658 |
+
|
659 |
+
subarr = _sanitize_ndim(subarr, data, dtype, index, allow_2d=allow_2d)
|
660 |
+
|
661 |
+
if isinstance(subarr, np.ndarray):
|
662 |
+
# at this point we should have dtype be None or subarr.dtype == dtype
|
663 |
+
dtype = cast(np.dtype, dtype)
|
664 |
+
subarr = _sanitize_str_dtypes(subarr, data, dtype, copy)
|
665 |
+
|
666 |
+
return subarr
|
667 |
+
|
668 |
+
|
669 |
+
def range_to_ndarray(rng: range) -> np.ndarray:
|
670 |
+
"""
|
671 |
+
Cast a range object to ndarray.
|
672 |
+
"""
|
673 |
+
# GH#30171 perf avoid realizing range as a list in np.array
|
674 |
+
try:
|
675 |
+
arr = np.arange(rng.start, rng.stop, rng.step, dtype="int64")
|
676 |
+
except OverflowError:
|
677 |
+
# GH#30173 handling for ranges that overflow int64
|
678 |
+
if (rng.start >= 0 and rng.step > 0) or (rng.step < 0 <= rng.stop):
|
679 |
+
try:
|
680 |
+
arr = np.arange(rng.start, rng.stop, rng.step, dtype="uint64")
|
681 |
+
except OverflowError:
|
682 |
+
arr = construct_1d_object_array_from_listlike(list(rng))
|
683 |
+
else:
|
684 |
+
arr = construct_1d_object_array_from_listlike(list(rng))
|
685 |
+
return arr
|
686 |
+
|
687 |
+
|
688 |
+
def _sanitize_non_ordered(data) -> None:
|
689 |
+
"""
|
690 |
+
Raise only for unordered sets, e.g., not for dict_keys
|
691 |
+
"""
|
692 |
+
if isinstance(data, (set, frozenset)):
|
693 |
+
raise TypeError(f"'{type(data).__name__}' type is unordered")
|
694 |
+
|
695 |
+
|
696 |
+
def _sanitize_ndim(
|
697 |
+
result: ArrayLike,
|
698 |
+
data,
|
699 |
+
dtype: DtypeObj | None,
|
700 |
+
index: Index | None,
|
701 |
+
*,
|
702 |
+
allow_2d: bool = False,
|
703 |
+
) -> ArrayLike:
|
704 |
+
"""
|
705 |
+
Ensure we have a 1-dimensional result array.
|
706 |
+
"""
|
707 |
+
if getattr(result, "ndim", 0) == 0:
|
708 |
+
raise ValueError("result should be arraylike with ndim > 0")
|
709 |
+
|
710 |
+
if result.ndim == 1:
|
711 |
+
# the result that we want
|
712 |
+
result = _maybe_repeat(result, index)
|
713 |
+
|
714 |
+
elif result.ndim > 1:
|
715 |
+
if isinstance(data, np.ndarray):
|
716 |
+
if allow_2d:
|
717 |
+
return result
|
718 |
+
raise ValueError(
|
719 |
+
f"Data must be 1-dimensional, got ndarray of shape {data.shape} instead"
|
720 |
+
)
|
721 |
+
if is_object_dtype(dtype) and isinstance(dtype, ExtensionDtype):
|
722 |
+
# i.e. NumpyEADtype("O")
|
723 |
+
|
724 |
+
result = com.asarray_tuplesafe(data, dtype=np.dtype("object"))
|
725 |
+
cls = dtype.construct_array_type()
|
726 |
+
result = cls._from_sequence(result, dtype=dtype)
|
727 |
+
else:
|
728 |
+
# error: Argument "dtype" to "asarray_tuplesafe" has incompatible type
|
729 |
+
# "Union[dtype[Any], ExtensionDtype, None]"; expected "Union[str,
|
730 |
+
# dtype[Any], None]"
|
731 |
+
result = com.asarray_tuplesafe(data, dtype=dtype) # type: ignore[arg-type]
|
732 |
+
return result
|
733 |
+
|
734 |
+
|
735 |
+
def _sanitize_str_dtypes(
|
736 |
+
result: np.ndarray, data, dtype: np.dtype | None, copy: bool
|
737 |
+
) -> np.ndarray:
|
738 |
+
"""
|
739 |
+
Ensure we have a dtype that is supported by pandas.
|
740 |
+
"""
|
741 |
+
|
742 |
+
# This is to prevent mixed-type Series getting all casted to
|
743 |
+
# NumPy string type, e.g. NaN --> '-1#IND'.
|
744 |
+
if issubclass(result.dtype.type, str):
|
745 |
+
# GH#16605
|
746 |
+
# If not empty convert the data to dtype
|
747 |
+
# GH#19853: If data is a scalar, result has already the result
|
748 |
+
if not lib.is_scalar(data):
|
749 |
+
if not np.all(isna(data)):
|
750 |
+
data = np.asarray(data, dtype=dtype)
|
751 |
+
if not copy:
|
752 |
+
result = np.asarray(data, dtype=object)
|
753 |
+
else:
|
754 |
+
result = np.array(data, dtype=object, copy=copy)
|
755 |
+
return result
|
756 |
+
|
757 |
+
|
758 |
+
def _maybe_repeat(arr: ArrayLike, index: Index | None) -> ArrayLike:
|
759 |
+
"""
|
760 |
+
If we have a length-1 array and an index describing how long we expect
|
761 |
+
the result to be, repeat the array.
|
762 |
+
"""
|
763 |
+
if index is not None:
|
764 |
+
if 1 == len(arr) != len(index):
|
765 |
+
arr = arr.repeat(len(index))
|
766 |
+
return arr
|
767 |
+
|
768 |
+
|
769 |
+
def _try_cast(
|
770 |
+
arr: list | np.ndarray,
|
771 |
+
dtype: np.dtype,
|
772 |
+
copy: bool,
|
773 |
+
) -> ArrayLike:
|
774 |
+
"""
|
775 |
+
Convert input to numpy ndarray and optionally cast to a given dtype.
|
776 |
+
|
777 |
+
Parameters
|
778 |
+
----------
|
779 |
+
arr : ndarray or list
|
780 |
+
Excludes: ExtensionArray, Series, Index.
|
781 |
+
dtype : np.dtype
|
782 |
+
copy : bool
|
783 |
+
If False, don't copy the data if not needed.
|
784 |
+
|
785 |
+
Returns
|
786 |
+
-------
|
787 |
+
np.ndarray or ExtensionArray
|
788 |
+
"""
|
789 |
+
is_ndarray = isinstance(arr, np.ndarray)
|
790 |
+
|
791 |
+
if dtype == object:
|
792 |
+
if not is_ndarray:
|
793 |
+
subarr = construct_1d_object_array_from_listlike(arr)
|
794 |
+
return subarr
|
795 |
+
return ensure_wrapped_if_datetimelike(arr).astype(dtype, copy=copy)
|
796 |
+
|
797 |
+
elif dtype.kind == "U":
|
798 |
+
# TODO: test cases with arr.dtype.kind in "mM"
|
799 |
+
if is_ndarray:
|
800 |
+
arr = cast(np.ndarray, arr)
|
801 |
+
shape = arr.shape
|
802 |
+
if arr.ndim > 1:
|
803 |
+
arr = arr.ravel()
|
804 |
+
else:
|
805 |
+
shape = (len(arr),)
|
806 |
+
return lib.ensure_string_array(arr, convert_na_value=False, copy=copy).reshape(
|
807 |
+
shape
|
808 |
+
)
|
809 |
+
|
810 |
+
elif dtype.kind in "mM":
|
811 |
+
return maybe_cast_to_datetime(arr, dtype)
|
812 |
+
|
813 |
+
# GH#15832: Check if we are requesting a numeric dtype and
|
814 |
+
# that we can convert the data to the requested dtype.
|
815 |
+
elif dtype.kind in "iu":
|
816 |
+
# this will raise if we have e.g. floats
|
817 |
+
|
818 |
+
subarr = maybe_cast_to_integer_array(arr, dtype)
|
819 |
+
elif not copy:
|
820 |
+
subarr = np.asarray(arr, dtype=dtype)
|
821 |
+
else:
|
822 |
+
subarr = np.array(arr, dtype=dtype, copy=copy)
|
823 |
+
|
824 |
+
return subarr
|
llmeval-env/lib/python3.10/site-packages/pandas/core/flags.py
ADDED
@@ -0,0 +1,117 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from __future__ import annotations
|
2 |
+
|
3 |
+
from typing import TYPE_CHECKING
|
4 |
+
import weakref
|
5 |
+
|
6 |
+
if TYPE_CHECKING:
|
7 |
+
from pandas.core.generic import NDFrame
|
8 |
+
|
9 |
+
|
10 |
+
class Flags:
|
11 |
+
"""
|
12 |
+
Flags that apply to pandas objects.
|
13 |
+
|
14 |
+
Parameters
|
15 |
+
----------
|
16 |
+
obj : Series or DataFrame
|
17 |
+
The object these flags are associated with.
|
18 |
+
allows_duplicate_labels : bool, default True
|
19 |
+
Whether to allow duplicate labels in this object. By default,
|
20 |
+
duplicate labels are permitted. Setting this to ``False`` will
|
21 |
+
cause an :class:`errors.DuplicateLabelError` to be raised when
|
22 |
+
`index` (or columns for DataFrame) is not unique, or any
|
23 |
+
subsequent operation on introduces duplicates.
|
24 |
+
See :ref:`duplicates.disallow` for more.
|
25 |
+
|
26 |
+
.. warning::
|
27 |
+
|
28 |
+
This is an experimental feature. Currently, many methods fail to
|
29 |
+
propagate the ``allows_duplicate_labels`` value. In future versions
|
30 |
+
it is expected that every method taking or returning one or more
|
31 |
+
DataFrame or Series objects will propagate ``allows_duplicate_labels``.
|
32 |
+
|
33 |
+
Examples
|
34 |
+
--------
|
35 |
+
Attributes can be set in two ways:
|
36 |
+
|
37 |
+
>>> df = pd.DataFrame()
|
38 |
+
>>> df.flags
|
39 |
+
<Flags(allows_duplicate_labels=True)>
|
40 |
+
>>> df.flags.allows_duplicate_labels = False
|
41 |
+
>>> df.flags
|
42 |
+
<Flags(allows_duplicate_labels=False)>
|
43 |
+
|
44 |
+
>>> df.flags['allows_duplicate_labels'] = True
|
45 |
+
>>> df.flags
|
46 |
+
<Flags(allows_duplicate_labels=True)>
|
47 |
+
"""
|
48 |
+
|
49 |
+
_keys: set[str] = {"allows_duplicate_labels"}
|
50 |
+
|
51 |
+
def __init__(self, obj: NDFrame, *, allows_duplicate_labels: bool) -> None:
|
52 |
+
self._allows_duplicate_labels = allows_duplicate_labels
|
53 |
+
self._obj = weakref.ref(obj)
|
54 |
+
|
55 |
+
@property
|
56 |
+
def allows_duplicate_labels(self) -> bool:
|
57 |
+
"""
|
58 |
+
Whether this object allows duplicate labels.
|
59 |
+
|
60 |
+
Setting ``allows_duplicate_labels=False`` ensures that the
|
61 |
+
index (and columns of a DataFrame) are unique. Most methods
|
62 |
+
that accept and return a Series or DataFrame will propagate
|
63 |
+
the value of ``allows_duplicate_labels``.
|
64 |
+
|
65 |
+
See :ref:`duplicates` for more.
|
66 |
+
|
67 |
+
See Also
|
68 |
+
--------
|
69 |
+
DataFrame.attrs : Set global metadata on this object.
|
70 |
+
DataFrame.set_flags : Set global flags on this object.
|
71 |
+
|
72 |
+
Examples
|
73 |
+
--------
|
74 |
+
>>> df = pd.DataFrame({"A": [1, 2]}, index=['a', 'a'])
|
75 |
+
>>> df.flags.allows_duplicate_labels
|
76 |
+
True
|
77 |
+
>>> df.flags.allows_duplicate_labels = False
|
78 |
+
Traceback (most recent call last):
|
79 |
+
...
|
80 |
+
pandas.errors.DuplicateLabelError: Index has duplicates.
|
81 |
+
positions
|
82 |
+
label
|
83 |
+
a [0, 1]
|
84 |
+
"""
|
85 |
+
return self._allows_duplicate_labels
|
86 |
+
|
87 |
+
@allows_duplicate_labels.setter
|
88 |
+
def allows_duplicate_labels(self, value: bool) -> None:
|
89 |
+
value = bool(value)
|
90 |
+
obj = self._obj()
|
91 |
+
if obj is None:
|
92 |
+
raise ValueError("This flag's object has been deleted.")
|
93 |
+
|
94 |
+
if not value:
|
95 |
+
for ax in obj.axes:
|
96 |
+
ax._maybe_check_unique()
|
97 |
+
|
98 |
+
self._allows_duplicate_labels = value
|
99 |
+
|
100 |
+
def __getitem__(self, key: str):
|
101 |
+
if key not in self._keys:
|
102 |
+
raise KeyError(key)
|
103 |
+
|
104 |
+
return getattr(self, key)
|
105 |
+
|
106 |
+
def __setitem__(self, key: str, value) -> None:
|
107 |
+
if key not in self._keys:
|
108 |
+
raise ValueError(f"Unknown flag {key}. Must be one of {self._keys}")
|
109 |
+
setattr(self, key, value)
|
110 |
+
|
111 |
+
def __repr__(self) -> str:
|
112 |
+
return f"<Flags(allows_duplicate_labels={self.allows_duplicate_labels})>"
|
113 |
+
|
114 |
+
def __eq__(self, other) -> bool:
|
115 |
+
if isinstance(other, type(self)):
|
116 |
+
return self.allows_duplicate_labels == other.allows_duplicate_labels
|
117 |
+
return False
|
llmeval-env/lib/python3.10/site-packages/pandas/core/frame.py
ADDED
The diff for this file is too large to render.
See raw diff
|
|
llmeval-env/lib/python3.10/site-packages/pandas/core/generic.py
ADDED
The diff for this file is too large to render.
See raw diff
|
|
llmeval-env/lib/python3.10/site-packages/pandas/core/indexes/__init__.py
ADDED
File without changes
|
llmeval-env/lib/python3.10/site-packages/pandas/core/indexes/__pycache__/__init__.cpython-310.pyc
ADDED
Binary file (192 Bytes). View file
|
|
llmeval-env/lib/python3.10/site-packages/pandas/core/indexes/__pycache__/accessors.cpython-310.pyc
ADDED
Binary file (17.3 kB). View file
|
|
llmeval-env/lib/python3.10/site-packages/pandas/core/indexes/__pycache__/api.cpython-310.pyc
ADDED
Binary file (10.6 kB). View file
|
|
llmeval-env/lib/python3.10/site-packages/pandas/core/indexes/__pycache__/base.cpython-310.pyc
ADDED
Binary file (196 kB). View file
|
|
llmeval-env/lib/python3.10/site-packages/pandas/core/indexes/__pycache__/category.cpython-310.pyc
ADDED
Binary file (14.9 kB). View file
|
|
llmeval-env/lib/python3.10/site-packages/pandas/core/indexes/__pycache__/datetimelike.cpython-310.pyc
ADDED
Binary file (21.5 kB). View file
|
|
llmeval-env/lib/python3.10/site-packages/pandas/core/indexes/__pycache__/datetimes.cpython-310.pyc
ADDED
Binary file (32.7 kB). View file
|
|
llmeval-env/lib/python3.10/site-packages/pandas/core/indexes/__pycache__/extension.cpython-310.pyc
ADDED
Binary file (5.2 kB). View file
|
|
llmeval-env/lib/python3.10/site-packages/pandas/core/indexes/__pycache__/frozen.cpython-310.pyc
ADDED
Binary file (4.11 kB). View file
|
|
llmeval-env/lib/python3.10/site-packages/pandas/core/indexes/__pycache__/interval.cpython-310.pyc
ADDED
Binary file (28.9 kB). View file
|
|
llmeval-env/lib/python3.10/site-packages/pandas/core/indexes/__pycache__/multi.cpython-310.pyc
ADDED
Binary file (108 kB). View file
|
|
llmeval-env/lib/python3.10/site-packages/pandas/core/indexes/__pycache__/period.cpython-310.pyc
ADDED
Binary file (16.4 kB). View file
|
|
llmeval-env/lib/python3.10/site-packages/pandas/core/indexes/__pycache__/range.cpython-310.pyc
ADDED
Binary file (29.3 kB). View file
|
|
llmeval-env/lib/python3.10/site-packages/pandas/core/indexes/__pycache__/timedeltas.cpython-310.pyc
ADDED
Binary file (10.1 kB). View file
|
|
llmeval-env/lib/python3.10/site-packages/pandas/core/indexes/accessors.py
ADDED
@@ -0,0 +1,643 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
"""
|
2 |
+
datetimelike delegation
|
3 |
+
"""
|
4 |
+
from __future__ import annotations
|
5 |
+
|
6 |
+
from typing import (
|
7 |
+
TYPE_CHECKING,
|
8 |
+
cast,
|
9 |
+
)
|
10 |
+
import warnings
|
11 |
+
|
12 |
+
import numpy as np
|
13 |
+
|
14 |
+
from pandas._libs import lib
|
15 |
+
from pandas.util._exceptions import find_stack_level
|
16 |
+
|
17 |
+
from pandas.core.dtypes.common import (
|
18 |
+
is_integer_dtype,
|
19 |
+
is_list_like,
|
20 |
+
)
|
21 |
+
from pandas.core.dtypes.dtypes import (
|
22 |
+
ArrowDtype,
|
23 |
+
CategoricalDtype,
|
24 |
+
DatetimeTZDtype,
|
25 |
+
PeriodDtype,
|
26 |
+
)
|
27 |
+
from pandas.core.dtypes.generic import ABCSeries
|
28 |
+
|
29 |
+
from pandas.core.accessor import (
|
30 |
+
PandasDelegate,
|
31 |
+
delegate_names,
|
32 |
+
)
|
33 |
+
from pandas.core.arrays import (
|
34 |
+
DatetimeArray,
|
35 |
+
PeriodArray,
|
36 |
+
TimedeltaArray,
|
37 |
+
)
|
38 |
+
from pandas.core.arrays.arrow.array import ArrowExtensionArray
|
39 |
+
from pandas.core.base import (
|
40 |
+
NoNewAttributesMixin,
|
41 |
+
PandasObject,
|
42 |
+
)
|
43 |
+
from pandas.core.indexes.datetimes import DatetimeIndex
|
44 |
+
from pandas.core.indexes.timedeltas import TimedeltaIndex
|
45 |
+
|
46 |
+
if TYPE_CHECKING:
|
47 |
+
from pandas import (
|
48 |
+
DataFrame,
|
49 |
+
Series,
|
50 |
+
)
|
51 |
+
|
52 |
+
|
53 |
+
class Properties(PandasDelegate, PandasObject, NoNewAttributesMixin):
|
54 |
+
_hidden_attrs = PandasObject._hidden_attrs | {
|
55 |
+
"orig",
|
56 |
+
"name",
|
57 |
+
}
|
58 |
+
|
59 |
+
def __init__(self, data: Series, orig) -> None:
|
60 |
+
if not isinstance(data, ABCSeries):
|
61 |
+
raise TypeError(
|
62 |
+
f"cannot convert an object of type {type(data)} to a datetimelike index"
|
63 |
+
)
|
64 |
+
|
65 |
+
self._parent = data
|
66 |
+
self.orig = orig
|
67 |
+
self.name = getattr(data, "name", None)
|
68 |
+
self._freeze()
|
69 |
+
|
70 |
+
def _get_values(self):
|
71 |
+
data = self._parent
|
72 |
+
if lib.is_np_dtype(data.dtype, "M"):
|
73 |
+
return DatetimeIndex(data, copy=False, name=self.name)
|
74 |
+
|
75 |
+
elif isinstance(data.dtype, DatetimeTZDtype):
|
76 |
+
return DatetimeIndex(data, copy=False, name=self.name)
|
77 |
+
|
78 |
+
elif lib.is_np_dtype(data.dtype, "m"):
|
79 |
+
return TimedeltaIndex(data, copy=False, name=self.name)
|
80 |
+
|
81 |
+
elif isinstance(data.dtype, PeriodDtype):
|
82 |
+
return PeriodArray(data, copy=False)
|
83 |
+
|
84 |
+
raise TypeError(
|
85 |
+
f"cannot convert an object of type {type(data)} to a datetimelike index"
|
86 |
+
)
|
87 |
+
|
88 |
+
def _delegate_property_get(self, name: str):
|
89 |
+
from pandas import Series
|
90 |
+
|
91 |
+
values = self._get_values()
|
92 |
+
|
93 |
+
result = getattr(values, name)
|
94 |
+
|
95 |
+
# maybe need to upcast (ints)
|
96 |
+
if isinstance(result, np.ndarray):
|
97 |
+
if is_integer_dtype(result):
|
98 |
+
result = result.astype("int64")
|
99 |
+
elif not is_list_like(result):
|
100 |
+
return result
|
101 |
+
|
102 |
+
result = np.asarray(result)
|
103 |
+
|
104 |
+
if self.orig is not None:
|
105 |
+
index = self.orig.index
|
106 |
+
else:
|
107 |
+
index = self._parent.index
|
108 |
+
# return the result as a Series
|
109 |
+
result = Series(result, index=index, name=self.name).__finalize__(self._parent)
|
110 |
+
|
111 |
+
# setting this object will show a SettingWithCopyWarning/Error
|
112 |
+
result._is_copy = (
|
113 |
+
"modifications to a property of a datetimelike "
|
114 |
+
"object are not supported and are discarded. "
|
115 |
+
"Change values on the original."
|
116 |
+
)
|
117 |
+
|
118 |
+
return result
|
119 |
+
|
120 |
+
def _delegate_property_set(self, name: str, value, *args, **kwargs):
|
121 |
+
raise ValueError(
|
122 |
+
"modifications to a property of a datetimelike object are not supported. "
|
123 |
+
"Change values on the original."
|
124 |
+
)
|
125 |
+
|
126 |
+
def _delegate_method(self, name: str, *args, **kwargs):
|
127 |
+
from pandas import Series
|
128 |
+
|
129 |
+
values = self._get_values()
|
130 |
+
|
131 |
+
method = getattr(values, name)
|
132 |
+
result = method(*args, **kwargs)
|
133 |
+
|
134 |
+
if not is_list_like(result):
|
135 |
+
return result
|
136 |
+
|
137 |
+
result = Series(result, index=self._parent.index, name=self.name).__finalize__(
|
138 |
+
self._parent
|
139 |
+
)
|
140 |
+
|
141 |
+
# setting this object will show a SettingWithCopyWarning/Error
|
142 |
+
result._is_copy = (
|
143 |
+
"modifications to a method of a datetimelike "
|
144 |
+
"object are not supported and are discarded. "
|
145 |
+
"Change values on the original."
|
146 |
+
)
|
147 |
+
|
148 |
+
return result
|
149 |
+
|
150 |
+
|
151 |
+
@delegate_names(
|
152 |
+
delegate=ArrowExtensionArray,
|
153 |
+
accessors=TimedeltaArray._datetimelike_ops,
|
154 |
+
typ="property",
|
155 |
+
accessor_mapping=lambda x: f"_dt_{x}",
|
156 |
+
raise_on_missing=False,
|
157 |
+
)
|
158 |
+
@delegate_names(
|
159 |
+
delegate=ArrowExtensionArray,
|
160 |
+
accessors=TimedeltaArray._datetimelike_methods,
|
161 |
+
typ="method",
|
162 |
+
accessor_mapping=lambda x: f"_dt_{x}",
|
163 |
+
raise_on_missing=False,
|
164 |
+
)
|
165 |
+
@delegate_names(
|
166 |
+
delegate=ArrowExtensionArray,
|
167 |
+
accessors=DatetimeArray._datetimelike_ops,
|
168 |
+
typ="property",
|
169 |
+
accessor_mapping=lambda x: f"_dt_{x}",
|
170 |
+
raise_on_missing=False,
|
171 |
+
)
|
172 |
+
@delegate_names(
|
173 |
+
delegate=ArrowExtensionArray,
|
174 |
+
accessors=DatetimeArray._datetimelike_methods,
|
175 |
+
typ="method",
|
176 |
+
accessor_mapping=lambda x: f"_dt_{x}",
|
177 |
+
raise_on_missing=False,
|
178 |
+
)
|
179 |
+
class ArrowTemporalProperties(PandasDelegate, PandasObject, NoNewAttributesMixin):
|
180 |
+
def __init__(self, data: Series, orig) -> None:
|
181 |
+
if not isinstance(data, ABCSeries):
|
182 |
+
raise TypeError(
|
183 |
+
f"cannot convert an object of type {type(data)} to a datetimelike index"
|
184 |
+
)
|
185 |
+
|
186 |
+
self._parent = data
|
187 |
+
self._orig = orig
|
188 |
+
self._freeze()
|
189 |
+
|
190 |
+
def _delegate_property_get(self, name: str):
|
191 |
+
if not hasattr(self._parent.array, f"_dt_{name}"):
|
192 |
+
raise NotImplementedError(
|
193 |
+
f"dt.{name} is not supported for {self._parent.dtype}"
|
194 |
+
)
|
195 |
+
result = getattr(self._parent.array, f"_dt_{name}")
|
196 |
+
|
197 |
+
if not is_list_like(result):
|
198 |
+
return result
|
199 |
+
|
200 |
+
if self._orig is not None:
|
201 |
+
index = self._orig.index
|
202 |
+
else:
|
203 |
+
index = self._parent.index
|
204 |
+
# return the result as a Series, which is by definition a copy
|
205 |
+
result = type(self._parent)(
|
206 |
+
result, index=index, name=self._parent.name
|
207 |
+
).__finalize__(self._parent)
|
208 |
+
|
209 |
+
return result
|
210 |
+
|
211 |
+
def _delegate_method(self, name: str, *args, **kwargs):
|
212 |
+
if not hasattr(self._parent.array, f"_dt_{name}"):
|
213 |
+
raise NotImplementedError(
|
214 |
+
f"dt.{name} is not supported for {self._parent.dtype}"
|
215 |
+
)
|
216 |
+
|
217 |
+
result = getattr(self._parent.array, f"_dt_{name}")(*args, **kwargs)
|
218 |
+
|
219 |
+
if self._orig is not None:
|
220 |
+
index = self._orig.index
|
221 |
+
else:
|
222 |
+
index = self._parent.index
|
223 |
+
# return the result as a Series, which is by definition a copy
|
224 |
+
result = type(self._parent)(
|
225 |
+
result, index=index, name=self._parent.name
|
226 |
+
).__finalize__(self._parent)
|
227 |
+
|
228 |
+
return result
|
229 |
+
|
230 |
+
def to_pytimedelta(self):
|
231 |
+
return cast(ArrowExtensionArray, self._parent.array)._dt_to_pytimedelta()
|
232 |
+
|
233 |
+
def to_pydatetime(self):
|
234 |
+
# GH#20306
|
235 |
+
warnings.warn(
|
236 |
+
f"The behavior of {type(self).__name__}.to_pydatetime is deprecated, "
|
237 |
+
"in a future version this will return a Series containing python "
|
238 |
+
"datetime objects instead of an ndarray. To retain the old behavior, "
|
239 |
+
"call `np.array` on the result",
|
240 |
+
FutureWarning,
|
241 |
+
stacklevel=find_stack_level(),
|
242 |
+
)
|
243 |
+
return cast(ArrowExtensionArray, self._parent.array)._dt_to_pydatetime()
|
244 |
+
|
245 |
+
def isocalendar(self) -> DataFrame:
|
246 |
+
from pandas import DataFrame
|
247 |
+
|
248 |
+
result = (
|
249 |
+
cast(ArrowExtensionArray, self._parent.array)
|
250 |
+
._dt_isocalendar()
|
251 |
+
._pa_array.combine_chunks()
|
252 |
+
)
|
253 |
+
iso_calendar_df = DataFrame(
|
254 |
+
{
|
255 |
+
col: type(self._parent.array)(result.field(i)) # type: ignore[call-arg]
|
256 |
+
for i, col in enumerate(["year", "week", "day"])
|
257 |
+
}
|
258 |
+
)
|
259 |
+
return iso_calendar_df
|
260 |
+
|
261 |
+
@property
|
262 |
+
def components(self) -> DataFrame:
|
263 |
+
from pandas import DataFrame
|
264 |
+
|
265 |
+
components_df = DataFrame(
|
266 |
+
{
|
267 |
+
col: getattr(self._parent.array, f"_dt_{col}")
|
268 |
+
for col in [
|
269 |
+
"days",
|
270 |
+
"hours",
|
271 |
+
"minutes",
|
272 |
+
"seconds",
|
273 |
+
"milliseconds",
|
274 |
+
"microseconds",
|
275 |
+
"nanoseconds",
|
276 |
+
]
|
277 |
+
}
|
278 |
+
)
|
279 |
+
return components_df
|
280 |
+
|
281 |
+
|
282 |
+
@delegate_names(
|
283 |
+
delegate=DatetimeArray,
|
284 |
+
accessors=DatetimeArray._datetimelike_ops + ["unit"],
|
285 |
+
typ="property",
|
286 |
+
)
|
287 |
+
@delegate_names(
|
288 |
+
delegate=DatetimeArray,
|
289 |
+
accessors=DatetimeArray._datetimelike_methods + ["as_unit"],
|
290 |
+
typ="method",
|
291 |
+
)
|
292 |
+
class DatetimeProperties(Properties):
|
293 |
+
"""
|
294 |
+
Accessor object for datetimelike properties of the Series values.
|
295 |
+
|
296 |
+
Examples
|
297 |
+
--------
|
298 |
+
>>> seconds_series = pd.Series(pd.date_range("2000-01-01", periods=3, freq="s"))
|
299 |
+
>>> seconds_series
|
300 |
+
0 2000-01-01 00:00:00
|
301 |
+
1 2000-01-01 00:00:01
|
302 |
+
2 2000-01-01 00:00:02
|
303 |
+
dtype: datetime64[ns]
|
304 |
+
>>> seconds_series.dt.second
|
305 |
+
0 0
|
306 |
+
1 1
|
307 |
+
2 2
|
308 |
+
dtype: int32
|
309 |
+
|
310 |
+
>>> hours_series = pd.Series(pd.date_range("2000-01-01", periods=3, freq="h"))
|
311 |
+
>>> hours_series
|
312 |
+
0 2000-01-01 00:00:00
|
313 |
+
1 2000-01-01 01:00:00
|
314 |
+
2 2000-01-01 02:00:00
|
315 |
+
dtype: datetime64[ns]
|
316 |
+
>>> hours_series.dt.hour
|
317 |
+
0 0
|
318 |
+
1 1
|
319 |
+
2 2
|
320 |
+
dtype: int32
|
321 |
+
|
322 |
+
>>> quarters_series = pd.Series(pd.date_range("2000-01-01", periods=3, freq="QE"))
|
323 |
+
>>> quarters_series
|
324 |
+
0 2000-03-31
|
325 |
+
1 2000-06-30
|
326 |
+
2 2000-09-30
|
327 |
+
dtype: datetime64[ns]
|
328 |
+
>>> quarters_series.dt.quarter
|
329 |
+
0 1
|
330 |
+
1 2
|
331 |
+
2 3
|
332 |
+
dtype: int32
|
333 |
+
|
334 |
+
Returns a Series indexed like the original Series.
|
335 |
+
Raises TypeError if the Series does not contain datetimelike values.
|
336 |
+
"""
|
337 |
+
|
338 |
+
def to_pydatetime(self) -> np.ndarray:
|
339 |
+
"""
|
340 |
+
Return the data as an array of :class:`datetime.datetime` objects.
|
341 |
+
|
342 |
+
.. deprecated:: 2.1.0
|
343 |
+
|
344 |
+
The current behavior of dt.to_pydatetime is deprecated.
|
345 |
+
In a future version this will return a Series containing python
|
346 |
+
datetime objects instead of a ndarray.
|
347 |
+
|
348 |
+
Timezone information is retained if present.
|
349 |
+
|
350 |
+
.. warning::
|
351 |
+
|
352 |
+
Python's datetime uses microsecond resolution, which is lower than
|
353 |
+
pandas (nanosecond). The values are truncated.
|
354 |
+
|
355 |
+
Returns
|
356 |
+
-------
|
357 |
+
numpy.ndarray
|
358 |
+
Object dtype array containing native Python datetime objects.
|
359 |
+
|
360 |
+
See Also
|
361 |
+
--------
|
362 |
+
datetime.datetime : Standard library value for a datetime.
|
363 |
+
|
364 |
+
Examples
|
365 |
+
--------
|
366 |
+
>>> s = pd.Series(pd.date_range('20180310', periods=2))
|
367 |
+
>>> s
|
368 |
+
0 2018-03-10
|
369 |
+
1 2018-03-11
|
370 |
+
dtype: datetime64[ns]
|
371 |
+
|
372 |
+
>>> s.dt.to_pydatetime()
|
373 |
+
array([datetime.datetime(2018, 3, 10, 0, 0),
|
374 |
+
datetime.datetime(2018, 3, 11, 0, 0)], dtype=object)
|
375 |
+
|
376 |
+
pandas' nanosecond precision is truncated to microseconds.
|
377 |
+
|
378 |
+
>>> s = pd.Series(pd.date_range('20180310', periods=2, freq='ns'))
|
379 |
+
>>> s
|
380 |
+
0 2018-03-10 00:00:00.000000000
|
381 |
+
1 2018-03-10 00:00:00.000000001
|
382 |
+
dtype: datetime64[ns]
|
383 |
+
|
384 |
+
>>> s.dt.to_pydatetime()
|
385 |
+
array([datetime.datetime(2018, 3, 10, 0, 0),
|
386 |
+
datetime.datetime(2018, 3, 10, 0, 0)], dtype=object)
|
387 |
+
"""
|
388 |
+
# GH#20306
|
389 |
+
warnings.warn(
|
390 |
+
f"The behavior of {type(self).__name__}.to_pydatetime is deprecated, "
|
391 |
+
"in a future version this will return a Series containing python "
|
392 |
+
"datetime objects instead of an ndarray. To retain the old behavior, "
|
393 |
+
"call `np.array` on the result",
|
394 |
+
FutureWarning,
|
395 |
+
stacklevel=find_stack_level(),
|
396 |
+
)
|
397 |
+
return self._get_values().to_pydatetime()
|
398 |
+
|
399 |
+
@property
|
400 |
+
def freq(self):
|
401 |
+
return self._get_values().inferred_freq
|
402 |
+
|
403 |
+
def isocalendar(self) -> DataFrame:
|
404 |
+
"""
|
405 |
+
Calculate year, week, and day according to the ISO 8601 standard.
|
406 |
+
|
407 |
+
Returns
|
408 |
+
-------
|
409 |
+
DataFrame
|
410 |
+
With columns year, week and day.
|
411 |
+
|
412 |
+
See Also
|
413 |
+
--------
|
414 |
+
Timestamp.isocalendar : Function return a 3-tuple containing ISO year,
|
415 |
+
week number, and weekday for the given Timestamp object.
|
416 |
+
datetime.date.isocalendar : Return a named tuple object with
|
417 |
+
three components: year, week and weekday.
|
418 |
+
|
419 |
+
Examples
|
420 |
+
--------
|
421 |
+
>>> ser = pd.to_datetime(pd.Series(["2010-01-01", pd.NaT]))
|
422 |
+
>>> ser.dt.isocalendar()
|
423 |
+
year week day
|
424 |
+
0 2009 53 5
|
425 |
+
1 <NA> <NA> <NA>
|
426 |
+
>>> ser.dt.isocalendar().week
|
427 |
+
0 53
|
428 |
+
1 <NA>
|
429 |
+
Name: week, dtype: UInt32
|
430 |
+
"""
|
431 |
+
return self._get_values().isocalendar().set_index(self._parent.index)
|
432 |
+
|
433 |
+
|
434 |
+
@delegate_names(
|
435 |
+
delegate=TimedeltaArray, accessors=TimedeltaArray._datetimelike_ops, typ="property"
|
436 |
+
)
|
437 |
+
@delegate_names(
|
438 |
+
delegate=TimedeltaArray,
|
439 |
+
accessors=TimedeltaArray._datetimelike_methods,
|
440 |
+
typ="method",
|
441 |
+
)
|
442 |
+
class TimedeltaProperties(Properties):
|
443 |
+
"""
|
444 |
+
Accessor object for datetimelike properties of the Series values.
|
445 |
+
|
446 |
+
Returns a Series indexed like the original Series.
|
447 |
+
Raises TypeError if the Series does not contain datetimelike values.
|
448 |
+
|
449 |
+
Examples
|
450 |
+
--------
|
451 |
+
>>> seconds_series = pd.Series(
|
452 |
+
... pd.timedelta_range(start="1 second", periods=3, freq="s")
|
453 |
+
... )
|
454 |
+
>>> seconds_series
|
455 |
+
0 0 days 00:00:01
|
456 |
+
1 0 days 00:00:02
|
457 |
+
2 0 days 00:00:03
|
458 |
+
dtype: timedelta64[ns]
|
459 |
+
>>> seconds_series.dt.seconds
|
460 |
+
0 1
|
461 |
+
1 2
|
462 |
+
2 3
|
463 |
+
dtype: int32
|
464 |
+
"""
|
465 |
+
|
466 |
+
def to_pytimedelta(self) -> np.ndarray:
|
467 |
+
"""
|
468 |
+
Return an array of native :class:`datetime.timedelta` objects.
|
469 |
+
|
470 |
+
Python's standard `datetime` library uses a different representation
|
471 |
+
timedelta's. This method converts a Series of pandas Timedeltas
|
472 |
+
to `datetime.timedelta` format with the same length as the original
|
473 |
+
Series.
|
474 |
+
|
475 |
+
Returns
|
476 |
+
-------
|
477 |
+
numpy.ndarray
|
478 |
+
Array of 1D containing data with `datetime.timedelta` type.
|
479 |
+
|
480 |
+
See Also
|
481 |
+
--------
|
482 |
+
datetime.timedelta : A duration expressing the difference
|
483 |
+
between two date, time, or datetime.
|
484 |
+
|
485 |
+
Examples
|
486 |
+
--------
|
487 |
+
>>> s = pd.Series(pd.to_timedelta(np.arange(5), unit="d"))
|
488 |
+
>>> s
|
489 |
+
0 0 days
|
490 |
+
1 1 days
|
491 |
+
2 2 days
|
492 |
+
3 3 days
|
493 |
+
4 4 days
|
494 |
+
dtype: timedelta64[ns]
|
495 |
+
|
496 |
+
>>> s.dt.to_pytimedelta()
|
497 |
+
array([datetime.timedelta(0), datetime.timedelta(days=1),
|
498 |
+
datetime.timedelta(days=2), datetime.timedelta(days=3),
|
499 |
+
datetime.timedelta(days=4)], dtype=object)
|
500 |
+
"""
|
501 |
+
return self._get_values().to_pytimedelta()
|
502 |
+
|
503 |
+
@property
|
504 |
+
def components(self):
|
505 |
+
"""
|
506 |
+
Return a Dataframe of the components of the Timedeltas.
|
507 |
+
|
508 |
+
Returns
|
509 |
+
-------
|
510 |
+
DataFrame
|
511 |
+
|
512 |
+
Examples
|
513 |
+
--------
|
514 |
+
>>> s = pd.Series(pd.to_timedelta(np.arange(5), unit='s'))
|
515 |
+
>>> s
|
516 |
+
0 0 days 00:00:00
|
517 |
+
1 0 days 00:00:01
|
518 |
+
2 0 days 00:00:02
|
519 |
+
3 0 days 00:00:03
|
520 |
+
4 0 days 00:00:04
|
521 |
+
dtype: timedelta64[ns]
|
522 |
+
>>> s.dt.components
|
523 |
+
days hours minutes seconds milliseconds microseconds nanoseconds
|
524 |
+
0 0 0 0 0 0 0 0
|
525 |
+
1 0 0 0 1 0 0 0
|
526 |
+
2 0 0 0 2 0 0 0
|
527 |
+
3 0 0 0 3 0 0 0
|
528 |
+
4 0 0 0 4 0 0 0
|
529 |
+
"""
|
530 |
+
return (
|
531 |
+
self._get_values()
|
532 |
+
.components.set_index(self._parent.index)
|
533 |
+
.__finalize__(self._parent)
|
534 |
+
)
|
535 |
+
|
536 |
+
@property
|
537 |
+
def freq(self):
|
538 |
+
return self._get_values().inferred_freq
|
539 |
+
|
540 |
+
|
541 |
+
@delegate_names(
|
542 |
+
delegate=PeriodArray, accessors=PeriodArray._datetimelike_ops, typ="property"
|
543 |
+
)
|
544 |
+
@delegate_names(
|
545 |
+
delegate=PeriodArray, accessors=PeriodArray._datetimelike_methods, typ="method"
|
546 |
+
)
|
547 |
+
class PeriodProperties(Properties):
|
548 |
+
"""
|
549 |
+
Accessor object for datetimelike properties of the Series values.
|
550 |
+
|
551 |
+
Returns a Series indexed like the original Series.
|
552 |
+
Raises TypeError if the Series does not contain datetimelike values.
|
553 |
+
|
554 |
+
Examples
|
555 |
+
--------
|
556 |
+
>>> seconds_series = pd.Series(
|
557 |
+
... pd.period_range(
|
558 |
+
... start="2000-01-01 00:00:00", end="2000-01-01 00:00:03", freq="s"
|
559 |
+
... )
|
560 |
+
... )
|
561 |
+
>>> seconds_series
|
562 |
+
0 2000-01-01 00:00:00
|
563 |
+
1 2000-01-01 00:00:01
|
564 |
+
2 2000-01-01 00:00:02
|
565 |
+
3 2000-01-01 00:00:03
|
566 |
+
dtype: period[s]
|
567 |
+
>>> seconds_series.dt.second
|
568 |
+
0 0
|
569 |
+
1 1
|
570 |
+
2 2
|
571 |
+
3 3
|
572 |
+
dtype: int64
|
573 |
+
|
574 |
+
>>> hours_series = pd.Series(
|
575 |
+
... pd.period_range(start="2000-01-01 00:00", end="2000-01-01 03:00", freq="h")
|
576 |
+
... )
|
577 |
+
>>> hours_series
|
578 |
+
0 2000-01-01 00:00
|
579 |
+
1 2000-01-01 01:00
|
580 |
+
2 2000-01-01 02:00
|
581 |
+
3 2000-01-01 03:00
|
582 |
+
dtype: period[h]
|
583 |
+
>>> hours_series.dt.hour
|
584 |
+
0 0
|
585 |
+
1 1
|
586 |
+
2 2
|
587 |
+
3 3
|
588 |
+
dtype: int64
|
589 |
+
|
590 |
+
>>> quarters_series = pd.Series(
|
591 |
+
... pd.period_range(start="2000-01-01", end="2000-12-31", freq="Q-DEC")
|
592 |
+
... )
|
593 |
+
>>> quarters_series
|
594 |
+
0 2000Q1
|
595 |
+
1 2000Q2
|
596 |
+
2 2000Q3
|
597 |
+
3 2000Q4
|
598 |
+
dtype: period[Q-DEC]
|
599 |
+
>>> quarters_series.dt.quarter
|
600 |
+
0 1
|
601 |
+
1 2
|
602 |
+
2 3
|
603 |
+
3 4
|
604 |
+
dtype: int64
|
605 |
+
"""
|
606 |
+
|
607 |
+
|
608 |
+
class CombinedDatetimelikeProperties(
|
609 |
+
DatetimeProperties, TimedeltaProperties, PeriodProperties
|
610 |
+
):
|
611 |
+
def __new__(cls, data: Series): # pyright: ignore[reportInconsistentConstructor]
|
612 |
+
# CombinedDatetimelikeProperties isn't really instantiated. Instead
|
613 |
+
# we need to choose which parent (datetime or timedelta) is
|
614 |
+
# appropriate. Since we're checking the dtypes anyway, we'll just
|
615 |
+
# do all the validation here.
|
616 |
+
|
617 |
+
if not isinstance(data, ABCSeries):
|
618 |
+
raise TypeError(
|
619 |
+
f"cannot convert an object of type {type(data)} to a datetimelike index"
|
620 |
+
)
|
621 |
+
|
622 |
+
orig = data if isinstance(data.dtype, CategoricalDtype) else None
|
623 |
+
if orig is not None:
|
624 |
+
data = data._constructor(
|
625 |
+
orig.array,
|
626 |
+
name=orig.name,
|
627 |
+
copy=False,
|
628 |
+
dtype=orig._values.categories.dtype,
|
629 |
+
index=orig.index,
|
630 |
+
)
|
631 |
+
|
632 |
+
if isinstance(data.dtype, ArrowDtype) and data.dtype.kind in "Mm":
|
633 |
+
return ArrowTemporalProperties(data, orig)
|
634 |
+
if lib.is_np_dtype(data.dtype, "M"):
|
635 |
+
return DatetimeProperties(data, orig)
|
636 |
+
elif isinstance(data.dtype, DatetimeTZDtype):
|
637 |
+
return DatetimeProperties(data, orig)
|
638 |
+
elif lib.is_np_dtype(data.dtype, "m"):
|
639 |
+
return TimedeltaProperties(data, orig)
|
640 |
+
elif isinstance(data.dtype, PeriodDtype):
|
641 |
+
return PeriodProperties(data, orig)
|
642 |
+
|
643 |
+
raise AttributeError("Can only use .dt accessor with datetimelike values")
|
llmeval-env/lib/python3.10/site-packages/pandas/core/indexes/api.py
ADDED
@@ -0,0 +1,388 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from __future__ import annotations
|
2 |
+
|
3 |
+
import textwrap
|
4 |
+
from typing import (
|
5 |
+
TYPE_CHECKING,
|
6 |
+
cast,
|
7 |
+
)
|
8 |
+
|
9 |
+
import numpy as np
|
10 |
+
|
11 |
+
from pandas._libs import (
|
12 |
+
NaT,
|
13 |
+
lib,
|
14 |
+
)
|
15 |
+
from pandas.errors import InvalidIndexError
|
16 |
+
|
17 |
+
from pandas.core.dtypes.cast import find_common_type
|
18 |
+
|
19 |
+
from pandas.core.algorithms import safe_sort
|
20 |
+
from pandas.core.indexes.base import (
|
21 |
+
Index,
|
22 |
+
_new_Index,
|
23 |
+
ensure_index,
|
24 |
+
ensure_index_from_sequences,
|
25 |
+
get_unanimous_names,
|
26 |
+
)
|
27 |
+
from pandas.core.indexes.category import CategoricalIndex
|
28 |
+
from pandas.core.indexes.datetimes import DatetimeIndex
|
29 |
+
from pandas.core.indexes.interval import IntervalIndex
|
30 |
+
from pandas.core.indexes.multi import MultiIndex
|
31 |
+
from pandas.core.indexes.period import PeriodIndex
|
32 |
+
from pandas.core.indexes.range import RangeIndex
|
33 |
+
from pandas.core.indexes.timedeltas import TimedeltaIndex
|
34 |
+
|
35 |
+
if TYPE_CHECKING:
|
36 |
+
from pandas._typing import Axis
|
37 |
+
_sort_msg = textwrap.dedent(
|
38 |
+
"""\
|
39 |
+
Sorting because non-concatenation axis is not aligned. A future version
|
40 |
+
of pandas will change to not sort by default.
|
41 |
+
|
42 |
+
To accept the future behavior, pass 'sort=False'.
|
43 |
+
|
44 |
+
To retain the current behavior and silence the warning, pass 'sort=True'.
|
45 |
+
"""
|
46 |
+
)
|
47 |
+
|
48 |
+
|
49 |
+
__all__ = [
|
50 |
+
"Index",
|
51 |
+
"MultiIndex",
|
52 |
+
"CategoricalIndex",
|
53 |
+
"IntervalIndex",
|
54 |
+
"RangeIndex",
|
55 |
+
"InvalidIndexError",
|
56 |
+
"TimedeltaIndex",
|
57 |
+
"PeriodIndex",
|
58 |
+
"DatetimeIndex",
|
59 |
+
"_new_Index",
|
60 |
+
"NaT",
|
61 |
+
"ensure_index",
|
62 |
+
"ensure_index_from_sequences",
|
63 |
+
"get_objs_combined_axis",
|
64 |
+
"union_indexes",
|
65 |
+
"get_unanimous_names",
|
66 |
+
"all_indexes_same",
|
67 |
+
"default_index",
|
68 |
+
"safe_sort_index",
|
69 |
+
]
|
70 |
+
|
71 |
+
|
72 |
+
def get_objs_combined_axis(
|
73 |
+
objs,
|
74 |
+
intersect: bool = False,
|
75 |
+
axis: Axis = 0,
|
76 |
+
sort: bool = True,
|
77 |
+
copy: bool = False,
|
78 |
+
) -> Index:
|
79 |
+
"""
|
80 |
+
Extract combined index: return intersection or union (depending on the
|
81 |
+
value of "intersect") of indexes on given axis, or None if all objects
|
82 |
+
lack indexes (e.g. they are numpy arrays).
|
83 |
+
|
84 |
+
Parameters
|
85 |
+
----------
|
86 |
+
objs : list
|
87 |
+
Series or DataFrame objects, may be mix of the two.
|
88 |
+
intersect : bool, default False
|
89 |
+
If True, calculate the intersection between indexes. Otherwise,
|
90 |
+
calculate the union.
|
91 |
+
axis : {0 or 'index', 1 or 'outer'}, default 0
|
92 |
+
The axis to extract indexes from.
|
93 |
+
sort : bool, default True
|
94 |
+
Whether the result index should come out sorted or not.
|
95 |
+
copy : bool, default False
|
96 |
+
If True, return a copy of the combined index.
|
97 |
+
|
98 |
+
Returns
|
99 |
+
-------
|
100 |
+
Index
|
101 |
+
"""
|
102 |
+
obs_idxes = [obj._get_axis(axis) for obj in objs]
|
103 |
+
return _get_combined_index(obs_idxes, intersect=intersect, sort=sort, copy=copy)
|
104 |
+
|
105 |
+
|
106 |
+
def _get_distinct_objs(objs: list[Index]) -> list[Index]:
|
107 |
+
"""
|
108 |
+
Return a list with distinct elements of "objs" (different ids).
|
109 |
+
Preserves order.
|
110 |
+
"""
|
111 |
+
ids: set[int] = set()
|
112 |
+
res = []
|
113 |
+
for obj in objs:
|
114 |
+
if id(obj) not in ids:
|
115 |
+
ids.add(id(obj))
|
116 |
+
res.append(obj)
|
117 |
+
return res
|
118 |
+
|
119 |
+
|
120 |
+
def _get_combined_index(
|
121 |
+
indexes: list[Index],
|
122 |
+
intersect: bool = False,
|
123 |
+
sort: bool = False,
|
124 |
+
copy: bool = False,
|
125 |
+
) -> Index:
|
126 |
+
"""
|
127 |
+
Return the union or intersection of indexes.
|
128 |
+
|
129 |
+
Parameters
|
130 |
+
----------
|
131 |
+
indexes : list of Index or list objects
|
132 |
+
When intersect=True, do not accept list of lists.
|
133 |
+
intersect : bool, default False
|
134 |
+
If True, calculate the intersection between indexes. Otherwise,
|
135 |
+
calculate the union.
|
136 |
+
sort : bool, default False
|
137 |
+
Whether the result index should come out sorted or not.
|
138 |
+
copy : bool, default False
|
139 |
+
If True, return a copy of the combined index.
|
140 |
+
|
141 |
+
Returns
|
142 |
+
-------
|
143 |
+
Index
|
144 |
+
"""
|
145 |
+
# TODO: handle index names!
|
146 |
+
indexes = _get_distinct_objs(indexes)
|
147 |
+
if len(indexes) == 0:
|
148 |
+
index = Index([])
|
149 |
+
elif len(indexes) == 1:
|
150 |
+
index = indexes[0]
|
151 |
+
elif intersect:
|
152 |
+
index = indexes[0]
|
153 |
+
for other in indexes[1:]:
|
154 |
+
index = index.intersection(other)
|
155 |
+
else:
|
156 |
+
index = union_indexes(indexes, sort=False)
|
157 |
+
index = ensure_index(index)
|
158 |
+
|
159 |
+
if sort:
|
160 |
+
index = safe_sort_index(index)
|
161 |
+
# GH 29879
|
162 |
+
if copy:
|
163 |
+
index = index.copy()
|
164 |
+
|
165 |
+
return index
|
166 |
+
|
167 |
+
|
168 |
+
def safe_sort_index(index: Index) -> Index:
|
169 |
+
"""
|
170 |
+
Returns the sorted index
|
171 |
+
|
172 |
+
We keep the dtypes and the name attributes.
|
173 |
+
|
174 |
+
Parameters
|
175 |
+
----------
|
176 |
+
index : an Index
|
177 |
+
|
178 |
+
Returns
|
179 |
+
-------
|
180 |
+
Index
|
181 |
+
"""
|
182 |
+
if index.is_monotonic_increasing:
|
183 |
+
return index
|
184 |
+
|
185 |
+
try:
|
186 |
+
array_sorted = safe_sort(index)
|
187 |
+
except TypeError:
|
188 |
+
pass
|
189 |
+
else:
|
190 |
+
if isinstance(array_sorted, Index):
|
191 |
+
return array_sorted
|
192 |
+
|
193 |
+
array_sorted = cast(np.ndarray, array_sorted)
|
194 |
+
if isinstance(index, MultiIndex):
|
195 |
+
index = MultiIndex.from_tuples(array_sorted, names=index.names)
|
196 |
+
else:
|
197 |
+
index = Index(array_sorted, name=index.name, dtype=index.dtype)
|
198 |
+
|
199 |
+
return index
|
200 |
+
|
201 |
+
|
202 |
+
def union_indexes(indexes, sort: bool | None = True) -> Index:
|
203 |
+
"""
|
204 |
+
Return the union of indexes.
|
205 |
+
|
206 |
+
The behavior of sort and names is not consistent.
|
207 |
+
|
208 |
+
Parameters
|
209 |
+
----------
|
210 |
+
indexes : list of Index or list objects
|
211 |
+
sort : bool, default True
|
212 |
+
Whether the result index should come out sorted or not.
|
213 |
+
|
214 |
+
Returns
|
215 |
+
-------
|
216 |
+
Index
|
217 |
+
"""
|
218 |
+
if len(indexes) == 0:
|
219 |
+
raise AssertionError("Must have at least 1 Index to union")
|
220 |
+
if len(indexes) == 1:
|
221 |
+
result = indexes[0]
|
222 |
+
if isinstance(result, list):
|
223 |
+
if not sort:
|
224 |
+
result = Index(result)
|
225 |
+
else:
|
226 |
+
result = Index(sorted(result))
|
227 |
+
return result
|
228 |
+
|
229 |
+
indexes, kind = _sanitize_and_check(indexes)
|
230 |
+
|
231 |
+
def _unique_indices(inds, dtype) -> Index:
|
232 |
+
"""
|
233 |
+
Concatenate indices and remove duplicates.
|
234 |
+
|
235 |
+
Parameters
|
236 |
+
----------
|
237 |
+
inds : list of Index or list objects
|
238 |
+
dtype : dtype to set for the resulting Index
|
239 |
+
|
240 |
+
Returns
|
241 |
+
-------
|
242 |
+
Index
|
243 |
+
"""
|
244 |
+
if all(isinstance(ind, Index) for ind in inds):
|
245 |
+
inds = [ind.astype(dtype, copy=False) for ind in inds]
|
246 |
+
result = inds[0].unique()
|
247 |
+
other = inds[1].append(inds[2:])
|
248 |
+
diff = other[result.get_indexer_for(other) == -1]
|
249 |
+
if len(diff):
|
250 |
+
result = result.append(diff.unique())
|
251 |
+
if sort:
|
252 |
+
result = result.sort_values()
|
253 |
+
return result
|
254 |
+
|
255 |
+
def conv(i):
|
256 |
+
if isinstance(i, Index):
|
257 |
+
i = i.tolist()
|
258 |
+
return i
|
259 |
+
|
260 |
+
return Index(
|
261 |
+
lib.fast_unique_multiple_list([conv(i) for i in inds], sort=sort),
|
262 |
+
dtype=dtype,
|
263 |
+
)
|
264 |
+
|
265 |
+
def _find_common_index_dtype(inds):
|
266 |
+
"""
|
267 |
+
Finds a common type for the indexes to pass through to resulting index.
|
268 |
+
|
269 |
+
Parameters
|
270 |
+
----------
|
271 |
+
inds: list of Index or list objects
|
272 |
+
|
273 |
+
Returns
|
274 |
+
-------
|
275 |
+
The common type or None if no indexes were given
|
276 |
+
"""
|
277 |
+
dtypes = [idx.dtype for idx in indexes if isinstance(idx, Index)]
|
278 |
+
if dtypes:
|
279 |
+
dtype = find_common_type(dtypes)
|
280 |
+
else:
|
281 |
+
dtype = None
|
282 |
+
|
283 |
+
return dtype
|
284 |
+
|
285 |
+
if kind == "special":
|
286 |
+
result = indexes[0]
|
287 |
+
|
288 |
+
dtis = [x for x in indexes if isinstance(x, DatetimeIndex)]
|
289 |
+
dti_tzs = [x for x in dtis if x.tz is not None]
|
290 |
+
if len(dti_tzs) not in [0, len(dtis)]:
|
291 |
+
# TODO: this behavior is not tested (so may not be desired),
|
292 |
+
# but is kept in order to keep behavior the same when
|
293 |
+
# deprecating union_many
|
294 |
+
# test_frame_from_dict_with_mixed_indexes
|
295 |
+
raise TypeError("Cannot join tz-naive with tz-aware DatetimeIndex")
|
296 |
+
|
297 |
+
if len(dtis) == len(indexes):
|
298 |
+
sort = True
|
299 |
+
result = indexes[0]
|
300 |
+
|
301 |
+
elif len(dtis) > 1:
|
302 |
+
# If we have mixed timezones, our casting behavior may depend on
|
303 |
+
# the order of indexes, which we don't want.
|
304 |
+
sort = False
|
305 |
+
|
306 |
+
# TODO: what about Categorical[dt64]?
|
307 |
+
# test_frame_from_dict_with_mixed_indexes
|
308 |
+
indexes = [x.astype(object, copy=False) for x in indexes]
|
309 |
+
result = indexes[0]
|
310 |
+
|
311 |
+
for other in indexes[1:]:
|
312 |
+
result = result.union(other, sort=None if sort else False)
|
313 |
+
return result
|
314 |
+
|
315 |
+
elif kind == "array":
|
316 |
+
dtype = _find_common_index_dtype(indexes)
|
317 |
+
index = indexes[0]
|
318 |
+
if not all(index.equals(other) for other in indexes[1:]):
|
319 |
+
index = _unique_indices(indexes, dtype)
|
320 |
+
|
321 |
+
name = get_unanimous_names(*indexes)[0]
|
322 |
+
if name != index.name:
|
323 |
+
index = index.rename(name)
|
324 |
+
return index
|
325 |
+
else: # kind='list'
|
326 |
+
dtype = _find_common_index_dtype(indexes)
|
327 |
+
return _unique_indices(indexes, dtype)
|
328 |
+
|
329 |
+
|
330 |
+
def _sanitize_and_check(indexes):
|
331 |
+
"""
|
332 |
+
Verify the type of indexes and convert lists to Index.
|
333 |
+
|
334 |
+
Cases:
|
335 |
+
|
336 |
+
- [list, list, ...]: Return ([list, list, ...], 'list')
|
337 |
+
- [list, Index, ...]: Return _sanitize_and_check([Index, Index, ...])
|
338 |
+
Lists are sorted and converted to Index.
|
339 |
+
- [Index, Index, ...]: Return ([Index, Index, ...], TYPE)
|
340 |
+
TYPE = 'special' if at least one special type, 'array' otherwise.
|
341 |
+
|
342 |
+
Parameters
|
343 |
+
----------
|
344 |
+
indexes : list of Index or list objects
|
345 |
+
|
346 |
+
Returns
|
347 |
+
-------
|
348 |
+
sanitized_indexes : list of Index or list objects
|
349 |
+
type : {'list', 'array', 'special'}
|
350 |
+
"""
|
351 |
+
kinds = list({type(index) for index in indexes})
|
352 |
+
|
353 |
+
if list in kinds:
|
354 |
+
if len(kinds) > 1:
|
355 |
+
indexes = [
|
356 |
+
Index(list(x)) if not isinstance(x, Index) else x for x in indexes
|
357 |
+
]
|
358 |
+
kinds.remove(list)
|
359 |
+
else:
|
360 |
+
return indexes, "list"
|
361 |
+
|
362 |
+
if len(kinds) > 1 or Index not in kinds:
|
363 |
+
return indexes, "special"
|
364 |
+
else:
|
365 |
+
return indexes, "array"
|
366 |
+
|
367 |
+
|
368 |
+
def all_indexes_same(indexes) -> bool:
|
369 |
+
"""
|
370 |
+
Determine if all indexes contain the same elements.
|
371 |
+
|
372 |
+
Parameters
|
373 |
+
----------
|
374 |
+
indexes : iterable of Index objects
|
375 |
+
|
376 |
+
Returns
|
377 |
+
-------
|
378 |
+
bool
|
379 |
+
True if all indexes contain the same elements, False otherwise.
|
380 |
+
"""
|
381 |
+
itr = iter(indexes)
|
382 |
+
first = next(itr)
|
383 |
+
return all(first.equals(index) for index in itr)
|
384 |
+
|
385 |
+
|
386 |
+
def default_index(n: int) -> RangeIndex:
|
387 |
+
rng = range(n)
|
388 |
+
return RangeIndex._simple_new(rng, name=None)
|
llmeval-env/lib/python3.10/site-packages/pandas/core/indexes/base.py
ADDED
The diff for this file is too large to render.
See raw diff
|
|
llmeval-env/lib/python3.10/site-packages/pandas/core/indexes/category.py
ADDED
@@ -0,0 +1,513 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from __future__ import annotations
|
2 |
+
|
3 |
+
from typing import (
|
4 |
+
TYPE_CHECKING,
|
5 |
+
Any,
|
6 |
+
Literal,
|
7 |
+
cast,
|
8 |
+
)
|
9 |
+
|
10 |
+
import numpy as np
|
11 |
+
|
12 |
+
from pandas._libs import index as libindex
|
13 |
+
from pandas.util._decorators import (
|
14 |
+
cache_readonly,
|
15 |
+
doc,
|
16 |
+
)
|
17 |
+
|
18 |
+
from pandas.core.dtypes.common import is_scalar
|
19 |
+
from pandas.core.dtypes.concat import concat_compat
|
20 |
+
from pandas.core.dtypes.dtypes import CategoricalDtype
|
21 |
+
from pandas.core.dtypes.missing import (
|
22 |
+
is_valid_na_for_dtype,
|
23 |
+
isna,
|
24 |
+
)
|
25 |
+
|
26 |
+
from pandas.core.arrays.categorical import (
|
27 |
+
Categorical,
|
28 |
+
contains,
|
29 |
+
)
|
30 |
+
from pandas.core.construction import extract_array
|
31 |
+
from pandas.core.indexes.base import (
|
32 |
+
Index,
|
33 |
+
maybe_extract_name,
|
34 |
+
)
|
35 |
+
from pandas.core.indexes.extension import (
|
36 |
+
NDArrayBackedExtensionIndex,
|
37 |
+
inherit_names,
|
38 |
+
)
|
39 |
+
|
40 |
+
if TYPE_CHECKING:
|
41 |
+
from collections.abc import Hashable
|
42 |
+
|
43 |
+
from pandas._typing import (
|
44 |
+
Dtype,
|
45 |
+
DtypeObj,
|
46 |
+
Self,
|
47 |
+
npt,
|
48 |
+
)
|
49 |
+
|
50 |
+
|
51 |
+
@inherit_names(
|
52 |
+
[
|
53 |
+
"argsort",
|
54 |
+
"tolist",
|
55 |
+
"codes",
|
56 |
+
"categories",
|
57 |
+
"ordered",
|
58 |
+
"_reverse_indexer",
|
59 |
+
"searchsorted",
|
60 |
+
"min",
|
61 |
+
"max",
|
62 |
+
],
|
63 |
+
Categorical,
|
64 |
+
)
|
65 |
+
@inherit_names(
|
66 |
+
[
|
67 |
+
"rename_categories",
|
68 |
+
"reorder_categories",
|
69 |
+
"add_categories",
|
70 |
+
"remove_categories",
|
71 |
+
"remove_unused_categories",
|
72 |
+
"set_categories",
|
73 |
+
"as_ordered",
|
74 |
+
"as_unordered",
|
75 |
+
],
|
76 |
+
Categorical,
|
77 |
+
wrap=True,
|
78 |
+
)
|
79 |
+
class CategoricalIndex(NDArrayBackedExtensionIndex):
|
80 |
+
"""
|
81 |
+
Index based on an underlying :class:`Categorical`.
|
82 |
+
|
83 |
+
CategoricalIndex, like Categorical, can only take on a limited,
|
84 |
+
and usually fixed, number of possible values (`categories`). Also,
|
85 |
+
like Categorical, it might have an order, but numerical operations
|
86 |
+
(additions, divisions, ...) are not possible.
|
87 |
+
|
88 |
+
Parameters
|
89 |
+
----------
|
90 |
+
data : array-like (1-dimensional)
|
91 |
+
The values of the categorical. If `categories` are given, values not in
|
92 |
+
`categories` will be replaced with NaN.
|
93 |
+
categories : index-like, optional
|
94 |
+
The categories for the categorical. Items need to be unique.
|
95 |
+
If the categories are not given here (and also not in `dtype`), they
|
96 |
+
will be inferred from the `data`.
|
97 |
+
ordered : bool, optional
|
98 |
+
Whether or not this categorical is treated as an ordered
|
99 |
+
categorical. If not given here or in `dtype`, the resulting
|
100 |
+
categorical will be unordered.
|
101 |
+
dtype : CategoricalDtype or "category", optional
|
102 |
+
If :class:`CategoricalDtype`, cannot be used together with
|
103 |
+
`categories` or `ordered`.
|
104 |
+
copy : bool, default False
|
105 |
+
Make a copy of input ndarray.
|
106 |
+
name : object, optional
|
107 |
+
Name to be stored in the index.
|
108 |
+
|
109 |
+
Attributes
|
110 |
+
----------
|
111 |
+
codes
|
112 |
+
categories
|
113 |
+
ordered
|
114 |
+
|
115 |
+
Methods
|
116 |
+
-------
|
117 |
+
rename_categories
|
118 |
+
reorder_categories
|
119 |
+
add_categories
|
120 |
+
remove_categories
|
121 |
+
remove_unused_categories
|
122 |
+
set_categories
|
123 |
+
as_ordered
|
124 |
+
as_unordered
|
125 |
+
map
|
126 |
+
|
127 |
+
Raises
|
128 |
+
------
|
129 |
+
ValueError
|
130 |
+
If the categories do not validate.
|
131 |
+
TypeError
|
132 |
+
If an explicit ``ordered=True`` is given but no `categories` and the
|
133 |
+
`values` are not sortable.
|
134 |
+
|
135 |
+
See Also
|
136 |
+
--------
|
137 |
+
Index : The base pandas Index type.
|
138 |
+
Categorical : A categorical array.
|
139 |
+
CategoricalDtype : Type for categorical data.
|
140 |
+
|
141 |
+
Notes
|
142 |
+
-----
|
143 |
+
See the `user guide
|
144 |
+
<https://pandas.pydata.org/pandas-docs/stable/user_guide/advanced.html#categoricalindex>`__
|
145 |
+
for more.
|
146 |
+
|
147 |
+
Examples
|
148 |
+
--------
|
149 |
+
>>> pd.CategoricalIndex(["a", "b", "c", "a", "b", "c"])
|
150 |
+
CategoricalIndex(['a', 'b', 'c', 'a', 'b', 'c'],
|
151 |
+
categories=['a', 'b', 'c'], ordered=False, dtype='category')
|
152 |
+
|
153 |
+
``CategoricalIndex`` can also be instantiated from a ``Categorical``:
|
154 |
+
|
155 |
+
>>> c = pd.Categorical(["a", "b", "c", "a", "b", "c"])
|
156 |
+
>>> pd.CategoricalIndex(c)
|
157 |
+
CategoricalIndex(['a', 'b', 'c', 'a', 'b', 'c'],
|
158 |
+
categories=['a', 'b', 'c'], ordered=False, dtype='category')
|
159 |
+
|
160 |
+
Ordered ``CategoricalIndex`` can have a min and max value.
|
161 |
+
|
162 |
+
>>> ci = pd.CategoricalIndex(
|
163 |
+
... ["a", "b", "c", "a", "b", "c"], ordered=True, categories=["c", "b", "a"]
|
164 |
+
... )
|
165 |
+
>>> ci
|
166 |
+
CategoricalIndex(['a', 'b', 'c', 'a', 'b', 'c'],
|
167 |
+
categories=['c', 'b', 'a'], ordered=True, dtype='category')
|
168 |
+
>>> ci.min()
|
169 |
+
'c'
|
170 |
+
"""
|
171 |
+
|
172 |
+
_typ = "categoricalindex"
|
173 |
+
_data_cls = Categorical
|
174 |
+
|
175 |
+
@property
|
176 |
+
def _can_hold_strings(self):
|
177 |
+
return self.categories._can_hold_strings
|
178 |
+
|
179 |
+
@cache_readonly
|
180 |
+
def _should_fallback_to_positional(self) -> bool:
|
181 |
+
return self.categories._should_fallback_to_positional
|
182 |
+
|
183 |
+
codes: np.ndarray
|
184 |
+
categories: Index
|
185 |
+
ordered: bool | None
|
186 |
+
_data: Categorical
|
187 |
+
_values: Categorical
|
188 |
+
|
189 |
+
@property
|
190 |
+
def _engine_type(self) -> type[libindex.IndexEngine]:
|
191 |
+
# self.codes can have dtype int8, int16, int32 or int64, so we need
|
192 |
+
# to return the corresponding engine type (libindex.Int8Engine, etc.).
|
193 |
+
return {
|
194 |
+
np.int8: libindex.Int8Engine,
|
195 |
+
np.int16: libindex.Int16Engine,
|
196 |
+
np.int32: libindex.Int32Engine,
|
197 |
+
np.int64: libindex.Int64Engine,
|
198 |
+
}[self.codes.dtype.type]
|
199 |
+
|
200 |
+
# --------------------------------------------------------------------
|
201 |
+
# Constructors
|
202 |
+
|
203 |
+
def __new__(
|
204 |
+
cls,
|
205 |
+
data=None,
|
206 |
+
categories=None,
|
207 |
+
ordered=None,
|
208 |
+
dtype: Dtype | None = None,
|
209 |
+
copy: bool = False,
|
210 |
+
name: Hashable | None = None,
|
211 |
+
) -> Self:
|
212 |
+
name = maybe_extract_name(name, data, cls)
|
213 |
+
|
214 |
+
if is_scalar(data):
|
215 |
+
# GH#38944 include None here, which pre-2.0 subbed in []
|
216 |
+
cls._raise_scalar_data_error(data)
|
217 |
+
|
218 |
+
data = Categorical(
|
219 |
+
data, categories=categories, ordered=ordered, dtype=dtype, copy=copy
|
220 |
+
)
|
221 |
+
|
222 |
+
return cls._simple_new(data, name=name)
|
223 |
+
|
224 |
+
# --------------------------------------------------------------------
|
225 |
+
|
226 |
+
def _is_dtype_compat(self, other: Index) -> Categorical:
|
227 |
+
"""
|
228 |
+
*this is an internal non-public method*
|
229 |
+
|
230 |
+
provide a comparison between the dtype of self and other (coercing if
|
231 |
+
needed)
|
232 |
+
|
233 |
+
Parameters
|
234 |
+
----------
|
235 |
+
other : Index
|
236 |
+
|
237 |
+
Returns
|
238 |
+
-------
|
239 |
+
Categorical
|
240 |
+
|
241 |
+
Raises
|
242 |
+
------
|
243 |
+
TypeError if the dtypes are not compatible
|
244 |
+
"""
|
245 |
+
if isinstance(other.dtype, CategoricalDtype):
|
246 |
+
cat = extract_array(other)
|
247 |
+
cat = cast(Categorical, cat)
|
248 |
+
if not cat._categories_match_up_to_permutation(self._values):
|
249 |
+
raise TypeError(
|
250 |
+
"categories must match existing categories when appending"
|
251 |
+
)
|
252 |
+
|
253 |
+
elif other._is_multi:
|
254 |
+
# preempt raising NotImplementedError in isna call
|
255 |
+
raise TypeError("MultiIndex is not dtype-compatible with CategoricalIndex")
|
256 |
+
else:
|
257 |
+
values = other
|
258 |
+
|
259 |
+
cat = Categorical(other, dtype=self.dtype)
|
260 |
+
other = CategoricalIndex(cat)
|
261 |
+
if not other.isin(values).all():
|
262 |
+
raise TypeError(
|
263 |
+
"cannot append a non-category item to a CategoricalIndex"
|
264 |
+
)
|
265 |
+
cat = other._values
|
266 |
+
|
267 |
+
if not ((cat == values) | (isna(cat) & isna(values))).all():
|
268 |
+
# GH#37667 see test_equals_non_category
|
269 |
+
raise TypeError(
|
270 |
+
"categories must match existing categories when appending"
|
271 |
+
)
|
272 |
+
|
273 |
+
return cat
|
274 |
+
|
275 |
+
def equals(self, other: object) -> bool:
|
276 |
+
"""
|
277 |
+
Determine if two CategoricalIndex objects contain the same elements.
|
278 |
+
|
279 |
+
Returns
|
280 |
+
-------
|
281 |
+
bool
|
282 |
+
``True`` if two :class:`pandas.CategoricalIndex` objects have equal
|
283 |
+
elements, ``False`` otherwise.
|
284 |
+
|
285 |
+
Examples
|
286 |
+
--------
|
287 |
+
>>> ci = pd.CategoricalIndex(['a', 'b', 'c', 'a', 'b', 'c'])
|
288 |
+
>>> ci2 = pd.CategoricalIndex(pd.Categorical(['a', 'b', 'c', 'a', 'b', 'c']))
|
289 |
+
>>> ci.equals(ci2)
|
290 |
+
True
|
291 |
+
|
292 |
+
The order of elements matters.
|
293 |
+
|
294 |
+
>>> ci3 = pd.CategoricalIndex(['c', 'b', 'a', 'a', 'b', 'c'])
|
295 |
+
>>> ci.equals(ci3)
|
296 |
+
False
|
297 |
+
|
298 |
+
The orderedness also matters.
|
299 |
+
|
300 |
+
>>> ci4 = ci.as_ordered()
|
301 |
+
>>> ci.equals(ci4)
|
302 |
+
False
|
303 |
+
|
304 |
+
The categories matter, but the order of the categories matters only when
|
305 |
+
``ordered=True``.
|
306 |
+
|
307 |
+
>>> ci5 = ci.set_categories(['a', 'b', 'c', 'd'])
|
308 |
+
>>> ci.equals(ci5)
|
309 |
+
False
|
310 |
+
|
311 |
+
>>> ci6 = ci.set_categories(['b', 'c', 'a'])
|
312 |
+
>>> ci.equals(ci6)
|
313 |
+
True
|
314 |
+
>>> ci_ordered = pd.CategoricalIndex(['a', 'b', 'c', 'a', 'b', 'c'],
|
315 |
+
... ordered=True)
|
316 |
+
>>> ci2_ordered = ci_ordered.set_categories(['b', 'c', 'a'])
|
317 |
+
>>> ci_ordered.equals(ci2_ordered)
|
318 |
+
False
|
319 |
+
"""
|
320 |
+
if self.is_(other):
|
321 |
+
return True
|
322 |
+
|
323 |
+
if not isinstance(other, Index):
|
324 |
+
return False
|
325 |
+
|
326 |
+
try:
|
327 |
+
other = self._is_dtype_compat(other)
|
328 |
+
except (TypeError, ValueError):
|
329 |
+
return False
|
330 |
+
|
331 |
+
return self._data.equals(other)
|
332 |
+
|
333 |
+
# --------------------------------------------------------------------
|
334 |
+
# Rendering Methods
|
335 |
+
|
336 |
+
@property
|
337 |
+
def _formatter_func(self):
|
338 |
+
return self.categories._formatter_func
|
339 |
+
|
340 |
+
def _format_attrs(self):
|
341 |
+
"""
|
342 |
+
Return a list of tuples of the (attr,formatted_value)
|
343 |
+
"""
|
344 |
+
attrs: list[tuple[str, str | int | bool | None]]
|
345 |
+
|
346 |
+
attrs = [
|
347 |
+
(
|
348 |
+
"categories",
|
349 |
+
f"[{', '.join(self._data._repr_categories())}]",
|
350 |
+
),
|
351 |
+
("ordered", self.ordered),
|
352 |
+
]
|
353 |
+
extra = super()._format_attrs()
|
354 |
+
return attrs + extra
|
355 |
+
|
356 |
+
# --------------------------------------------------------------------
|
357 |
+
|
358 |
+
@property
|
359 |
+
def inferred_type(self) -> str:
|
360 |
+
return "categorical"
|
361 |
+
|
362 |
+
@doc(Index.__contains__)
|
363 |
+
def __contains__(self, key: Any) -> bool:
|
364 |
+
# if key is a NaN, check if any NaN is in self.
|
365 |
+
if is_valid_na_for_dtype(key, self.categories.dtype):
|
366 |
+
return self.hasnans
|
367 |
+
|
368 |
+
return contains(self, key, container=self._engine)
|
369 |
+
|
370 |
+
def reindex(
|
371 |
+
self, target, method=None, level=None, limit: int | None = None, tolerance=None
|
372 |
+
) -> tuple[Index, npt.NDArray[np.intp] | None]:
|
373 |
+
"""
|
374 |
+
Create index with target's values (move/add/delete values as necessary)
|
375 |
+
|
376 |
+
Returns
|
377 |
+
-------
|
378 |
+
new_index : pd.Index
|
379 |
+
Resulting index
|
380 |
+
indexer : np.ndarray[np.intp] or None
|
381 |
+
Indices of output values in original index
|
382 |
+
|
383 |
+
"""
|
384 |
+
if method is not None:
|
385 |
+
raise NotImplementedError(
|
386 |
+
"argument method is not implemented for CategoricalIndex.reindex"
|
387 |
+
)
|
388 |
+
if level is not None:
|
389 |
+
raise NotImplementedError(
|
390 |
+
"argument level is not implemented for CategoricalIndex.reindex"
|
391 |
+
)
|
392 |
+
if limit is not None:
|
393 |
+
raise NotImplementedError(
|
394 |
+
"argument limit is not implemented for CategoricalIndex.reindex"
|
395 |
+
)
|
396 |
+
return super().reindex(target)
|
397 |
+
|
398 |
+
# --------------------------------------------------------------------
|
399 |
+
# Indexing Methods
|
400 |
+
|
401 |
+
def _maybe_cast_indexer(self, key) -> int:
|
402 |
+
# GH#41933: we have to do this instead of self._data._validate_scalar
|
403 |
+
# because this will correctly get partial-indexing on Interval categories
|
404 |
+
try:
|
405 |
+
return self._data._unbox_scalar(key)
|
406 |
+
except KeyError:
|
407 |
+
if is_valid_na_for_dtype(key, self.categories.dtype):
|
408 |
+
return -1
|
409 |
+
raise
|
410 |
+
|
411 |
+
def _maybe_cast_listlike_indexer(self, values) -> CategoricalIndex:
|
412 |
+
if isinstance(values, CategoricalIndex):
|
413 |
+
values = values._data
|
414 |
+
if isinstance(values, Categorical):
|
415 |
+
# Indexing on codes is more efficient if categories are the same,
|
416 |
+
# so we can apply some optimizations based on the degree of
|
417 |
+
# dtype-matching.
|
418 |
+
cat = self._data._encode_with_my_categories(values)
|
419 |
+
codes = cat._codes
|
420 |
+
else:
|
421 |
+
codes = self.categories.get_indexer(values)
|
422 |
+
codes = codes.astype(self.codes.dtype, copy=False)
|
423 |
+
cat = self._data._from_backing_data(codes)
|
424 |
+
return type(self)._simple_new(cat)
|
425 |
+
|
426 |
+
# --------------------------------------------------------------------
|
427 |
+
|
428 |
+
def _is_comparable_dtype(self, dtype: DtypeObj) -> bool:
|
429 |
+
return self.categories._is_comparable_dtype(dtype)
|
430 |
+
|
431 |
+
def map(self, mapper, na_action: Literal["ignore"] | None = None):
|
432 |
+
"""
|
433 |
+
Map values using input an input mapping or function.
|
434 |
+
|
435 |
+
Maps the values (their categories, not the codes) of the index to new
|
436 |
+
categories. If the mapping correspondence is one-to-one the result is a
|
437 |
+
:class:`~pandas.CategoricalIndex` which has the same order property as
|
438 |
+
the original, otherwise an :class:`~pandas.Index` is returned.
|
439 |
+
|
440 |
+
If a `dict` or :class:`~pandas.Series` is used any unmapped category is
|
441 |
+
mapped to `NaN`. Note that if this happens an :class:`~pandas.Index`
|
442 |
+
will be returned.
|
443 |
+
|
444 |
+
Parameters
|
445 |
+
----------
|
446 |
+
mapper : function, dict, or Series
|
447 |
+
Mapping correspondence.
|
448 |
+
|
449 |
+
Returns
|
450 |
+
-------
|
451 |
+
pandas.CategoricalIndex or pandas.Index
|
452 |
+
Mapped index.
|
453 |
+
|
454 |
+
See Also
|
455 |
+
--------
|
456 |
+
Index.map : Apply a mapping correspondence on an
|
457 |
+
:class:`~pandas.Index`.
|
458 |
+
Series.map : Apply a mapping correspondence on a
|
459 |
+
:class:`~pandas.Series`.
|
460 |
+
Series.apply : Apply more complex functions on a
|
461 |
+
:class:`~pandas.Series`.
|
462 |
+
|
463 |
+
Examples
|
464 |
+
--------
|
465 |
+
>>> idx = pd.CategoricalIndex(['a', 'b', 'c'])
|
466 |
+
>>> idx
|
467 |
+
CategoricalIndex(['a', 'b', 'c'], categories=['a', 'b', 'c'],
|
468 |
+
ordered=False, dtype='category')
|
469 |
+
>>> idx.map(lambda x: x.upper())
|
470 |
+
CategoricalIndex(['A', 'B', 'C'], categories=['A', 'B', 'C'],
|
471 |
+
ordered=False, dtype='category')
|
472 |
+
>>> idx.map({'a': 'first', 'b': 'second', 'c': 'third'})
|
473 |
+
CategoricalIndex(['first', 'second', 'third'], categories=['first',
|
474 |
+
'second', 'third'], ordered=False, dtype='category')
|
475 |
+
|
476 |
+
If the mapping is one-to-one the ordering of the categories is
|
477 |
+
preserved:
|
478 |
+
|
479 |
+
>>> idx = pd.CategoricalIndex(['a', 'b', 'c'], ordered=True)
|
480 |
+
>>> idx
|
481 |
+
CategoricalIndex(['a', 'b', 'c'], categories=['a', 'b', 'c'],
|
482 |
+
ordered=True, dtype='category')
|
483 |
+
>>> idx.map({'a': 3, 'b': 2, 'c': 1})
|
484 |
+
CategoricalIndex([3, 2, 1], categories=[3, 2, 1], ordered=True,
|
485 |
+
dtype='category')
|
486 |
+
|
487 |
+
If the mapping is not one-to-one an :class:`~pandas.Index` is returned:
|
488 |
+
|
489 |
+
>>> idx.map({'a': 'first', 'b': 'second', 'c': 'first'})
|
490 |
+
Index(['first', 'second', 'first'], dtype='object')
|
491 |
+
|
492 |
+
If a `dict` is used, all unmapped categories are mapped to `NaN` and
|
493 |
+
the result is an :class:`~pandas.Index`:
|
494 |
+
|
495 |
+
>>> idx.map({'a': 'first', 'b': 'second'})
|
496 |
+
Index(['first', 'second', nan], dtype='object')
|
497 |
+
"""
|
498 |
+
mapped = self._values.map(mapper, na_action=na_action)
|
499 |
+
return Index(mapped, name=self.name)
|
500 |
+
|
501 |
+
def _concat(self, to_concat: list[Index], name: Hashable) -> Index:
|
502 |
+
# if calling index is category, don't check dtype of others
|
503 |
+
try:
|
504 |
+
cat = Categorical._concat_same_type(
|
505 |
+
[self._is_dtype_compat(c) for c in to_concat]
|
506 |
+
)
|
507 |
+
except TypeError:
|
508 |
+
# not all to_concat elements are among our categories (or NA)
|
509 |
+
|
510 |
+
res = concat_compat([x._values for x in to_concat])
|
511 |
+
return Index(res, name=name)
|
512 |
+
else:
|
513 |
+
return type(self)._simple_new(cat, name=name)
|
llmeval-env/lib/python3.10/site-packages/pandas/core/indexes/datetimelike.py
ADDED
@@ -0,0 +1,843 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
"""
|
2 |
+
Base and utility classes for tseries type pandas objects.
|
3 |
+
"""
|
4 |
+
from __future__ import annotations
|
5 |
+
|
6 |
+
from abc import (
|
7 |
+
ABC,
|
8 |
+
abstractmethod,
|
9 |
+
)
|
10 |
+
from typing import (
|
11 |
+
TYPE_CHECKING,
|
12 |
+
Any,
|
13 |
+
Callable,
|
14 |
+
cast,
|
15 |
+
final,
|
16 |
+
)
|
17 |
+
import warnings
|
18 |
+
|
19 |
+
import numpy as np
|
20 |
+
|
21 |
+
from pandas._config import using_copy_on_write
|
22 |
+
|
23 |
+
from pandas._libs import (
|
24 |
+
NaT,
|
25 |
+
Timedelta,
|
26 |
+
lib,
|
27 |
+
)
|
28 |
+
from pandas._libs.tslibs import (
|
29 |
+
BaseOffset,
|
30 |
+
Resolution,
|
31 |
+
Tick,
|
32 |
+
parsing,
|
33 |
+
to_offset,
|
34 |
+
)
|
35 |
+
from pandas._libs.tslibs.dtypes import freq_to_period_freqstr
|
36 |
+
from pandas.compat.numpy import function as nv
|
37 |
+
from pandas.errors import (
|
38 |
+
InvalidIndexError,
|
39 |
+
NullFrequencyError,
|
40 |
+
)
|
41 |
+
from pandas.util._decorators import (
|
42 |
+
Appender,
|
43 |
+
cache_readonly,
|
44 |
+
doc,
|
45 |
+
)
|
46 |
+
from pandas.util._exceptions import find_stack_level
|
47 |
+
|
48 |
+
from pandas.core.dtypes.common import (
|
49 |
+
is_integer,
|
50 |
+
is_list_like,
|
51 |
+
)
|
52 |
+
from pandas.core.dtypes.concat import concat_compat
|
53 |
+
from pandas.core.dtypes.dtypes import CategoricalDtype
|
54 |
+
|
55 |
+
from pandas.core.arrays import (
|
56 |
+
DatetimeArray,
|
57 |
+
ExtensionArray,
|
58 |
+
PeriodArray,
|
59 |
+
TimedeltaArray,
|
60 |
+
)
|
61 |
+
from pandas.core.arrays.datetimelike import DatetimeLikeArrayMixin
|
62 |
+
import pandas.core.common as com
|
63 |
+
import pandas.core.indexes.base as ibase
|
64 |
+
from pandas.core.indexes.base import (
|
65 |
+
Index,
|
66 |
+
_index_shared_docs,
|
67 |
+
)
|
68 |
+
from pandas.core.indexes.extension import NDArrayBackedExtensionIndex
|
69 |
+
from pandas.core.indexes.range import RangeIndex
|
70 |
+
from pandas.core.tools.timedeltas import to_timedelta
|
71 |
+
|
72 |
+
if TYPE_CHECKING:
|
73 |
+
from collections.abc import Sequence
|
74 |
+
from datetime import datetime
|
75 |
+
|
76 |
+
from pandas._typing import (
|
77 |
+
Axis,
|
78 |
+
Self,
|
79 |
+
npt,
|
80 |
+
)
|
81 |
+
|
82 |
+
from pandas import CategoricalIndex
|
83 |
+
|
84 |
+
_index_doc_kwargs = dict(ibase._index_doc_kwargs)
|
85 |
+
|
86 |
+
|
87 |
+
class DatetimeIndexOpsMixin(NDArrayBackedExtensionIndex, ABC):
|
88 |
+
"""
|
89 |
+
Common ops mixin to support a unified interface datetimelike Index.
|
90 |
+
"""
|
91 |
+
|
92 |
+
_can_hold_strings = False
|
93 |
+
_data: DatetimeArray | TimedeltaArray | PeriodArray
|
94 |
+
|
95 |
+
@doc(DatetimeLikeArrayMixin.mean)
|
96 |
+
def mean(self, *, skipna: bool = True, axis: int | None = 0):
|
97 |
+
return self._data.mean(skipna=skipna, axis=axis)
|
98 |
+
|
99 |
+
@property
|
100 |
+
def freq(self) -> BaseOffset | None:
|
101 |
+
return self._data.freq
|
102 |
+
|
103 |
+
@freq.setter
|
104 |
+
def freq(self, value) -> None:
|
105 |
+
# error: Property "freq" defined in "PeriodArray" is read-only [misc]
|
106 |
+
self._data.freq = value # type: ignore[misc]
|
107 |
+
|
108 |
+
@property
|
109 |
+
def asi8(self) -> npt.NDArray[np.int64]:
|
110 |
+
return self._data.asi8
|
111 |
+
|
112 |
+
@property
|
113 |
+
@doc(DatetimeLikeArrayMixin.freqstr)
|
114 |
+
def freqstr(self) -> str:
|
115 |
+
from pandas import PeriodIndex
|
116 |
+
|
117 |
+
if self._data.freqstr is not None and isinstance(
|
118 |
+
self._data, (PeriodArray, PeriodIndex)
|
119 |
+
):
|
120 |
+
freq = freq_to_period_freqstr(self._data.freq.n, self._data.freq.name)
|
121 |
+
return freq
|
122 |
+
else:
|
123 |
+
return self._data.freqstr # type: ignore[return-value]
|
124 |
+
|
125 |
+
@cache_readonly
|
126 |
+
@abstractmethod
|
127 |
+
def _resolution_obj(self) -> Resolution:
|
128 |
+
...
|
129 |
+
|
130 |
+
@cache_readonly
|
131 |
+
@doc(DatetimeLikeArrayMixin.resolution)
|
132 |
+
def resolution(self) -> str:
|
133 |
+
return self._data.resolution
|
134 |
+
|
135 |
+
# ------------------------------------------------------------------------
|
136 |
+
|
137 |
+
@cache_readonly
|
138 |
+
def hasnans(self) -> bool:
|
139 |
+
return self._data._hasna
|
140 |
+
|
141 |
+
def equals(self, other: Any) -> bool:
|
142 |
+
"""
|
143 |
+
Determines if two Index objects contain the same elements.
|
144 |
+
"""
|
145 |
+
if self.is_(other):
|
146 |
+
return True
|
147 |
+
|
148 |
+
if not isinstance(other, Index):
|
149 |
+
return False
|
150 |
+
elif other.dtype.kind in "iufc":
|
151 |
+
return False
|
152 |
+
elif not isinstance(other, type(self)):
|
153 |
+
should_try = False
|
154 |
+
inferable = self._data._infer_matches
|
155 |
+
if other.dtype == object:
|
156 |
+
should_try = other.inferred_type in inferable
|
157 |
+
elif isinstance(other.dtype, CategoricalDtype):
|
158 |
+
other = cast("CategoricalIndex", other)
|
159 |
+
should_try = other.categories.inferred_type in inferable
|
160 |
+
|
161 |
+
if should_try:
|
162 |
+
try:
|
163 |
+
other = type(self)(other)
|
164 |
+
except (ValueError, TypeError, OverflowError):
|
165 |
+
# e.g.
|
166 |
+
# ValueError -> cannot parse str entry, or OutOfBoundsDatetime
|
167 |
+
# TypeError -> trying to convert IntervalIndex to DatetimeIndex
|
168 |
+
# OverflowError -> Index([very_large_timedeltas])
|
169 |
+
return False
|
170 |
+
|
171 |
+
if self.dtype != other.dtype:
|
172 |
+
# have different timezone
|
173 |
+
return False
|
174 |
+
|
175 |
+
return np.array_equal(self.asi8, other.asi8)
|
176 |
+
|
177 |
+
@Appender(Index.__contains__.__doc__)
|
178 |
+
def __contains__(self, key: Any) -> bool:
|
179 |
+
hash(key)
|
180 |
+
try:
|
181 |
+
self.get_loc(key)
|
182 |
+
except (KeyError, TypeError, ValueError, InvalidIndexError):
|
183 |
+
return False
|
184 |
+
return True
|
185 |
+
|
186 |
+
def _convert_tolerance(self, tolerance, target):
|
187 |
+
tolerance = np.asarray(to_timedelta(tolerance).to_numpy())
|
188 |
+
return super()._convert_tolerance(tolerance, target)
|
189 |
+
|
190 |
+
# --------------------------------------------------------------------
|
191 |
+
# Rendering Methods
|
192 |
+
_default_na_rep = "NaT"
|
193 |
+
|
194 |
+
def format(
|
195 |
+
self,
|
196 |
+
name: bool = False,
|
197 |
+
formatter: Callable | None = None,
|
198 |
+
na_rep: str = "NaT",
|
199 |
+
date_format: str | None = None,
|
200 |
+
) -> list[str]:
|
201 |
+
"""
|
202 |
+
Render a string representation of the Index.
|
203 |
+
"""
|
204 |
+
warnings.warn(
|
205 |
+
# GH#55413
|
206 |
+
f"{type(self).__name__}.format is deprecated and will be removed "
|
207 |
+
"in a future version. Convert using index.astype(str) or "
|
208 |
+
"index.map(formatter) instead.",
|
209 |
+
FutureWarning,
|
210 |
+
stacklevel=find_stack_level(),
|
211 |
+
)
|
212 |
+
header = []
|
213 |
+
if name:
|
214 |
+
header.append(
|
215 |
+
ibase.pprint_thing(self.name, escape_chars=("\t", "\r", "\n"))
|
216 |
+
if self.name is not None
|
217 |
+
else ""
|
218 |
+
)
|
219 |
+
|
220 |
+
if formatter is not None:
|
221 |
+
return header + list(self.map(formatter))
|
222 |
+
|
223 |
+
return self._format_with_header(
|
224 |
+
header=header, na_rep=na_rep, date_format=date_format
|
225 |
+
)
|
226 |
+
|
227 |
+
def _format_with_header(
|
228 |
+
self, *, header: list[str], na_rep: str, date_format: str | None = None
|
229 |
+
) -> list[str]:
|
230 |
+
# TODO: not reached in tests 2023-10-11
|
231 |
+
# matches base class except for whitespace padding and date_format
|
232 |
+
return header + list(
|
233 |
+
self._get_values_for_csv(na_rep=na_rep, date_format=date_format)
|
234 |
+
)
|
235 |
+
|
236 |
+
@property
|
237 |
+
def _formatter_func(self):
|
238 |
+
return self._data._formatter()
|
239 |
+
|
240 |
+
def _format_attrs(self):
|
241 |
+
"""
|
242 |
+
Return a list of tuples of the (attr,formatted_value).
|
243 |
+
"""
|
244 |
+
attrs = super()._format_attrs()
|
245 |
+
for attrib in self._attributes:
|
246 |
+
# iterating over _attributes prevents us from doing this for PeriodIndex
|
247 |
+
if attrib == "freq":
|
248 |
+
freq = self.freqstr
|
249 |
+
if freq is not None:
|
250 |
+
freq = repr(freq) # e.g. D -> 'D'
|
251 |
+
attrs.append(("freq", freq))
|
252 |
+
return attrs
|
253 |
+
|
254 |
+
@Appender(Index._summary.__doc__)
|
255 |
+
def _summary(self, name=None) -> str:
|
256 |
+
result = super()._summary(name=name)
|
257 |
+
if self.freq:
|
258 |
+
result += f"\nFreq: {self.freqstr}"
|
259 |
+
|
260 |
+
return result
|
261 |
+
|
262 |
+
# --------------------------------------------------------------------
|
263 |
+
# Indexing Methods
|
264 |
+
|
265 |
+
@final
|
266 |
+
def _can_partial_date_slice(self, reso: Resolution) -> bool:
|
267 |
+
# e.g. test_getitem_setitem_periodindex
|
268 |
+
# History of conversation GH#3452, GH#3931, GH#2369, GH#14826
|
269 |
+
return reso > self._resolution_obj
|
270 |
+
# NB: for DTI/PI, not TDI
|
271 |
+
|
272 |
+
def _parsed_string_to_bounds(self, reso: Resolution, parsed):
|
273 |
+
raise NotImplementedError
|
274 |
+
|
275 |
+
def _parse_with_reso(self, label: str):
|
276 |
+
# overridden by TimedeltaIndex
|
277 |
+
try:
|
278 |
+
if self.freq is None or hasattr(self.freq, "rule_code"):
|
279 |
+
freq = self.freq
|
280 |
+
except NotImplementedError:
|
281 |
+
freq = getattr(self, "freqstr", getattr(self, "inferred_freq", None))
|
282 |
+
|
283 |
+
freqstr: str | None
|
284 |
+
if freq is not None and not isinstance(freq, str):
|
285 |
+
freqstr = freq.rule_code
|
286 |
+
else:
|
287 |
+
freqstr = freq
|
288 |
+
|
289 |
+
if isinstance(label, np.str_):
|
290 |
+
# GH#45580
|
291 |
+
label = str(label)
|
292 |
+
|
293 |
+
parsed, reso_str = parsing.parse_datetime_string_with_reso(label, freqstr)
|
294 |
+
reso = Resolution.from_attrname(reso_str)
|
295 |
+
return parsed, reso
|
296 |
+
|
297 |
+
def _get_string_slice(self, key: str):
|
298 |
+
# overridden by TimedeltaIndex
|
299 |
+
parsed, reso = self._parse_with_reso(key)
|
300 |
+
try:
|
301 |
+
return self._partial_date_slice(reso, parsed)
|
302 |
+
except KeyError as err:
|
303 |
+
raise KeyError(key) from err
|
304 |
+
|
305 |
+
@final
|
306 |
+
def _partial_date_slice(
|
307 |
+
self,
|
308 |
+
reso: Resolution,
|
309 |
+
parsed: datetime,
|
310 |
+
) -> slice | npt.NDArray[np.intp]:
|
311 |
+
"""
|
312 |
+
Parameters
|
313 |
+
----------
|
314 |
+
reso : Resolution
|
315 |
+
parsed : datetime
|
316 |
+
|
317 |
+
Returns
|
318 |
+
-------
|
319 |
+
slice or ndarray[intp]
|
320 |
+
"""
|
321 |
+
if not self._can_partial_date_slice(reso):
|
322 |
+
raise ValueError
|
323 |
+
|
324 |
+
t1, t2 = self._parsed_string_to_bounds(reso, parsed)
|
325 |
+
vals = self._data._ndarray
|
326 |
+
unbox = self._data._unbox
|
327 |
+
|
328 |
+
if self.is_monotonic_increasing:
|
329 |
+
if len(self) and (
|
330 |
+
(t1 < self[0] and t2 < self[0]) or (t1 > self[-1] and t2 > self[-1])
|
331 |
+
):
|
332 |
+
# we are out of range
|
333 |
+
raise KeyError
|
334 |
+
|
335 |
+
# TODO: does this depend on being monotonic _increasing_?
|
336 |
+
|
337 |
+
# a monotonic (sorted) series can be sliced
|
338 |
+
left = vals.searchsorted(unbox(t1), side="left")
|
339 |
+
right = vals.searchsorted(unbox(t2), side="right")
|
340 |
+
return slice(left, right)
|
341 |
+
|
342 |
+
else:
|
343 |
+
lhs_mask = vals >= unbox(t1)
|
344 |
+
rhs_mask = vals <= unbox(t2)
|
345 |
+
|
346 |
+
# try to find the dates
|
347 |
+
return (lhs_mask & rhs_mask).nonzero()[0]
|
348 |
+
|
349 |
+
def _maybe_cast_slice_bound(self, label, side: str):
|
350 |
+
"""
|
351 |
+
If label is a string, cast it to scalar type according to resolution.
|
352 |
+
|
353 |
+
Parameters
|
354 |
+
----------
|
355 |
+
label : object
|
356 |
+
side : {'left', 'right'}
|
357 |
+
|
358 |
+
Returns
|
359 |
+
-------
|
360 |
+
label : object
|
361 |
+
|
362 |
+
Notes
|
363 |
+
-----
|
364 |
+
Value of `side` parameter should be validated in caller.
|
365 |
+
"""
|
366 |
+
if isinstance(label, str):
|
367 |
+
try:
|
368 |
+
parsed, reso = self._parse_with_reso(label)
|
369 |
+
except ValueError as err:
|
370 |
+
# DTI -> parsing.DateParseError
|
371 |
+
# TDI -> 'unit abbreviation w/o a number'
|
372 |
+
# PI -> string cannot be parsed as datetime-like
|
373 |
+
self._raise_invalid_indexer("slice", label, err)
|
374 |
+
|
375 |
+
lower, upper = self._parsed_string_to_bounds(reso, parsed)
|
376 |
+
return lower if side == "left" else upper
|
377 |
+
elif not isinstance(label, self._data._recognized_scalars):
|
378 |
+
self._raise_invalid_indexer("slice", label)
|
379 |
+
|
380 |
+
return label
|
381 |
+
|
382 |
+
# --------------------------------------------------------------------
|
383 |
+
# Arithmetic Methods
|
384 |
+
|
385 |
+
def shift(self, periods: int = 1, freq=None) -> Self:
|
386 |
+
"""
|
387 |
+
Shift index by desired number of time frequency increments.
|
388 |
+
|
389 |
+
This method is for shifting the values of datetime-like indexes
|
390 |
+
by a specified time increment a given number of times.
|
391 |
+
|
392 |
+
Parameters
|
393 |
+
----------
|
394 |
+
periods : int, default 1
|
395 |
+
Number of periods (or increments) to shift by,
|
396 |
+
can be positive or negative.
|
397 |
+
freq : pandas.DateOffset, pandas.Timedelta or string, optional
|
398 |
+
Frequency increment to shift by.
|
399 |
+
If None, the index is shifted by its own `freq` attribute.
|
400 |
+
Offset aliases are valid strings, e.g., 'D', 'W', 'M' etc.
|
401 |
+
|
402 |
+
Returns
|
403 |
+
-------
|
404 |
+
pandas.DatetimeIndex
|
405 |
+
Shifted index.
|
406 |
+
|
407 |
+
See Also
|
408 |
+
--------
|
409 |
+
Index.shift : Shift values of Index.
|
410 |
+
PeriodIndex.shift : Shift values of PeriodIndex.
|
411 |
+
"""
|
412 |
+
raise NotImplementedError
|
413 |
+
|
414 |
+
# --------------------------------------------------------------------
|
415 |
+
|
416 |
+
@doc(Index._maybe_cast_listlike_indexer)
|
417 |
+
def _maybe_cast_listlike_indexer(self, keyarr):
|
418 |
+
try:
|
419 |
+
res = self._data._validate_listlike(keyarr, allow_object=True)
|
420 |
+
except (ValueError, TypeError):
|
421 |
+
if not isinstance(keyarr, ExtensionArray):
|
422 |
+
# e.g. we don't want to cast DTA to ndarray[object]
|
423 |
+
res = com.asarray_tuplesafe(keyarr)
|
424 |
+
# TODO: com.asarray_tuplesafe shouldn't cast e.g. DatetimeArray
|
425 |
+
else:
|
426 |
+
res = keyarr
|
427 |
+
return Index(res, dtype=res.dtype)
|
428 |
+
|
429 |
+
|
430 |
+
class DatetimeTimedeltaMixin(DatetimeIndexOpsMixin, ABC):
|
431 |
+
"""
|
432 |
+
Mixin class for methods shared by DatetimeIndex and TimedeltaIndex,
|
433 |
+
but not PeriodIndex
|
434 |
+
"""
|
435 |
+
|
436 |
+
_data: DatetimeArray | TimedeltaArray
|
437 |
+
_comparables = ["name", "freq"]
|
438 |
+
_attributes = ["name", "freq"]
|
439 |
+
|
440 |
+
# Compat for frequency inference, see GH#23789
|
441 |
+
_is_monotonic_increasing = Index.is_monotonic_increasing
|
442 |
+
_is_monotonic_decreasing = Index.is_monotonic_decreasing
|
443 |
+
_is_unique = Index.is_unique
|
444 |
+
|
445 |
+
@property
|
446 |
+
def unit(self) -> str:
|
447 |
+
return self._data.unit
|
448 |
+
|
449 |
+
def as_unit(self, unit: str) -> Self:
|
450 |
+
"""
|
451 |
+
Convert to a dtype with the given unit resolution.
|
452 |
+
|
453 |
+
Parameters
|
454 |
+
----------
|
455 |
+
unit : {'s', 'ms', 'us', 'ns'}
|
456 |
+
|
457 |
+
Returns
|
458 |
+
-------
|
459 |
+
same type as self
|
460 |
+
|
461 |
+
Examples
|
462 |
+
--------
|
463 |
+
For :class:`pandas.DatetimeIndex`:
|
464 |
+
|
465 |
+
>>> idx = pd.DatetimeIndex(['2020-01-02 01:02:03.004005006'])
|
466 |
+
>>> idx
|
467 |
+
DatetimeIndex(['2020-01-02 01:02:03.004005006'],
|
468 |
+
dtype='datetime64[ns]', freq=None)
|
469 |
+
>>> idx.as_unit('s')
|
470 |
+
DatetimeIndex(['2020-01-02 01:02:03'], dtype='datetime64[s]', freq=None)
|
471 |
+
|
472 |
+
For :class:`pandas.TimedeltaIndex`:
|
473 |
+
|
474 |
+
>>> tdelta_idx = pd.to_timedelta(['1 day 3 min 2 us 42 ns'])
|
475 |
+
>>> tdelta_idx
|
476 |
+
TimedeltaIndex(['1 days 00:03:00.000002042'],
|
477 |
+
dtype='timedelta64[ns]', freq=None)
|
478 |
+
>>> tdelta_idx.as_unit('s')
|
479 |
+
TimedeltaIndex(['1 days 00:03:00'], dtype='timedelta64[s]', freq=None)
|
480 |
+
"""
|
481 |
+
arr = self._data.as_unit(unit)
|
482 |
+
return type(self)._simple_new(arr, name=self.name)
|
483 |
+
|
484 |
+
def _with_freq(self, freq):
|
485 |
+
arr = self._data._with_freq(freq)
|
486 |
+
return type(self)._simple_new(arr, name=self._name)
|
487 |
+
|
488 |
+
@property
|
489 |
+
def values(self) -> np.ndarray:
|
490 |
+
# NB: For Datetime64TZ this is lossy
|
491 |
+
data = self._data._ndarray
|
492 |
+
if using_copy_on_write():
|
493 |
+
data = data.view()
|
494 |
+
data.flags.writeable = False
|
495 |
+
return data
|
496 |
+
|
497 |
+
@doc(DatetimeIndexOpsMixin.shift)
|
498 |
+
def shift(self, periods: int = 1, freq=None) -> Self:
|
499 |
+
if freq is not None and freq != self.freq:
|
500 |
+
if isinstance(freq, str):
|
501 |
+
freq = to_offset(freq)
|
502 |
+
offset = periods * freq
|
503 |
+
return self + offset
|
504 |
+
|
505 |
+
if periods == 0 or len(self) == 0:
|
506 |
+
# GH#14811 empty case
|
507 |
+
return self.copy()
|
508 |
+
|
509 |
+
if self.freq is None:
|
510 |
+
raise NullFrequencyError("Cannot shift with no freq")
|
511 |
+
|
512 |
+
start = self[0] + periods * self.freq
|
513 |
+
end = self[-1] + periods * self.freq
|
514 |
+
|
515 |
+
# Note: in the DatetimeTZ case, _generate_range will infer the
|
516 |
+
# appropriate timezone from `start` and `end`, so tz does not need
|
517 |
+
# to be passed explicitly.
|
518 |
+
result = self._data._generate_range(
|
519 |
+
start=start, end=end, periods=None, freq=self.freq, unit=self.unit
|
520 |
+
)
|
521 |
+
return type(self)._simple_new(result, name=self.name)
|
522 |
+
|
523 |
+
@cache_readonly
|
524 |
+
@doc(DatetimeLikeArrayMixin.inferred_freq)
|
525 |
+
def inferred_freq(self) -> str | None:
|
526 |
+
return self._data.inferred_freq
|
527 |
+
|
528 |
+
# --------------------------------------------------------------------
|
529 |
+
# Set Operation Methods
|
530 |
+
|
531 |
+
@cache_readonly
|
532 |
+
def _as_range_index(self) -> RangeIndex:
|
533 |
+
# Convert our i8 representations to RangeIndex
|
534 |
+
# Caller is responsible for checking isinstance(self.freq, Tick)
|
535 |
+
freq = cast(Tick, self.freq)
|
536 |
+
tick = Timedelta(freq).as_unit("ns")._value
|
537 |
+
rng = range(self[0]._value, self[-1]._value + tick, tick)
|
538 |
+
return RangeIndex(rng)
|
539 |
+
|
540 |
+
def _can_range_setop(self, other) -> bool:
|
541 |
+
return isinstance(self.freq, Tick) and isinstance(other.freq, Tick)
|
542 |
+
|
543 |
+
def _wrap_range_setop(self, other, res_i8) -> Self:
|
544 |
+
new_freq = None
|
545 |
+
if not len(res_i8):
|
546 |
+
# RangeIndex defaults to step=1, which we don't want.
|
547 |
+
new_freq = self.freq
|
548 |
+
elif isinstance(res_i8, RangeIndex):
|
549 |
+
new_freq = to_offset(Timedelta(res_i8.step))
|
550 |
+
|
551 |
+
# TODO(GH#41493): we cannot just do
|
552 |
+
# type(self._data)(res_i8.values, dtype=self.dtype, freq=new_freq)
|
553 |
+
# because test_setops_preserve_freq fails with _validate_frequency raising.
|
554 |
+
# This raising is incorrect, as 'on_freq' is incorrect. This will
|
555 |
+
# be fixed by GH#41493
|
556 |
+
res_values = res_i8.values.view(self._data._ndarray.dtype)
|
557 |
+
result = type(self._data)._simple_new(
|
558 |
+
# error: Argument "dtype" to "_simple_new" of "DatetimeArray" has
|
559 |
+
# incompatible type "Union[dtype[Any], ExtensionDtype]"; expected
|
560 |
+
# "Union[dtype[datetime64], DatetimeTZDtype]"
|
561 |
+
res_values,
|
562 |
+
dtype=self.dtype, # type: ignore[arg-type]
|
563 |
+
freq=new_freq, # type: ignore[arg-type]
|
564 |
+
)
|
565 |
+
return cast("Self", self._wrap_setop_result(other, result))
|
566 |
+
|
567 |
+
def _range_intersect(self, other, sort) -> Self:
|
568 |
+
# Dispatch to RangeIndex intersection logic.
|
569 |
+
left = self._as_range_index
|
570 |
+
right = other._as_range_index
|
571 |
+
res_i8 = left.intersection(right, sort=sort)
|
572 |
+
return self._wrap_range_setop(other, res_i8)
|
573 |
+
|
574 |
+
def _range_union(self, other, sort) -> Self:
|
575 |
+
# Dispatch to RangeIndex union logic.
|
576 |
+
left = self._as_range_index
|
577 |
+
right = other._as_range_index
|
578 |
+
res_i8 = left.union(right, sort=sort)
|
579 |
+
return self._wrap_range_setop(other, res_i8)
|
580 |
+
|
581 |
+
def _intersection(self, other: Index, sort: bool = False) -> Index:
|
582 |
+
"""
|
583 |
+
intersection specialized to the case with matching dtypes and both non-empty.
|
584 |
+
"""
|
585 |
+
other = cast("DatetimeTimedeltaMixin", other)
|
586 |
+
|
587 |
+
if self._can_range_setop(other):
|
588 |
+
return self._range_intersect(other, sort=sort)
|
589 |
+
|
590 |
+
if not self._can_fast_intersect(other):
|
591 |
+
result = Index._intersection(self, other, sort=sort)
|
592 |
+
# We need to invalidate the freq because Index._intersection
|
593 |
+
# uses _shallow_copy on a view of self._data, which will preserve
|
594 |
+
# self.freq if we're not careful.
|
595 |
+
# At this point we should have result.dtype == self.dtype
|
596 |
+
# and type(result) is type(self._data)
|
597 |
+
result = self._wrap_setop_result(other, result)
|
598 |
+
return result._with_freq(None)._with_freq("infer")
|
599 |
+
|
600 |
+
else:
|
601 |
+
return self._fast_intersect(other, sort)
|
602 |
+
|
603 |
+
def _fast_intersect(self, other, sort):
|
604 |
+
# to make our life easier, "sort" the two ranges
|
605 |
+
if self[0] <= other[0]:
|
606 |
+
left, right = self, other
|
607 |
+
else:
|
608 |
+
left, right = other, self
|
609 |
+
|
610 |
+
# after sorting, the intersection always starts with the right index
|
611 |
+
# and ends with the index of which the last elements is smallest
|
612 |
+
end = min(left[-1], right[-1])
|
613 |
+
start = right[0]
|
614 |
+
|
615 |
+
if end < start:
|
616 |
+
result = self[:0]
|
617 |
+
else:
|
618 |
+
lslice = slice(*left.slice_locs(start, end))
|
619 |
+
result = left._values[lslice]
|
620 |
+
|
621 |
+
return result
|
622 |
+
|
623 |
+
def _can_fast_intersect(self, other: Self) -> bool:
|
624 |
+
# Note: we only get here with len(self) > 0 and len(other) > 0
|
625 |
+
if self.freq is None:
|
626 |
+
return False
|
627 |
+
|
628 |
+
elif other.freq != self.freq:
|
629 |
+
return False
|
630 |
+
|
631 |
+
elif not self.is_monotonic_increasing:
|
632 |
+
# Because freq is not None, we must then be monotonic decreasing
|
633 |
+
return False
|
634 |
+
|
635 |
+
# this along with matching freqs ensure that we "line up",
|
636 |
+
# so intersection will preserve freq
|
637 |
+
# Note we are assuming away Ticks, as those go through _range_intersect
|
638 |
+
# GH#42104
|
639 |
+
return self.freq.n == 1
|
640 |
+
|
641 |
+
def _can_fast_union(self, other: Self) -> bool:
|
642 |
+
# Assumes that type(self) == type(other), as per the annotation
|
643 |
+
# The ability to fast_union also implies that `freq` should be
|
644 |
+
# retained on union.
|
645 |
+
freq = self.freq
|
646 |
+
|
647 |
+
if freq is None or freq != other.freq:
|
648 |
+
return False
|
649 |
+
|
650 |
+
if not self.is_monotonic_increasing:
|
651 |
+
# Because freq is not None, we must then be monotonic decreasing
|
652 |
+
# TODO: do union on the reversed indexes?
|
653 |
+
return False
|
654 |
+
|
655 |
+
if len(self) == 0 or len(other) == 0:
|
656 |
+
# only reached via union_many
|
657 |
+
return True
|
658 |
+
|
659 |
+
# to make our life easier, "sort" the two ranges
|
660 |
+
if self[0] <= other[0]:
|
661 |
+
left, right = self, other
|
662 |
+
else:
|
663 |
+
left, right = other, self
|
664 |
+
|
665 |
+
right_start = right[0]
|
666 |
+
left_end = left[-1]
|
667 |
+
|
668 |
+
# Only need to "adjoin", not overlap
|
669 |
+
return (right_start == left_end + freq) or right_start in left
|
670 |
+
|
671 |
+
def _fast_union(self, other: Self, sort=None) -> Self:
|
672 |
+
# Caller is responsible for ensuring self and other are non-empty
|
673 |
+
|
674 |
+
# to make our life easier, "sort" the two ranges
|
675 |
+
if self[0] <= other[0]:
|
676 |
+
left, right = self, other
|
677 |
+
elif sort is False:
|
678 |
+
# TDIs are not in the "correct" order and we don't want
|
679 |
+
# to sort but want to remove overlaps
|
680 |
+
left, right = self, other
|
681 |
+
left_start = left[0]
|
682 |
+
loc = right.searchsorted(left_start, side="left")
|
683 |
+
right_chunk = right._values[:loc]
|
684 |
+
dates = concat_compat((left._values, right_chunk))
|
685 |
+
result = type(self)._simple_new(dates, name=self.name)
|
686 |
+
return result
|
687 |
+
else:
|
688 |
+
left, right = other, self
|
689 |
+
|
690 |
+
left_end = left[-1]
|
691 |
+
right_end = right[-1]
|
692 |
+
|
693 |
+
# concatenate
|
694 |
+
if left_end < right_end:
|
695 |
+
loc = right.searchsorted(left_end, side="right")
|
696 |
+
right_chunk = right._values[loc:]
|
697 |
+
dates = concat_compat([left._values, right_chunk])
|
698 |
+
# The can_fast_union check ensures that the result.freq
|
699 |
+
# should match self.freq
|
700 |
+
assert isinstance(dates, type(self._data))
|
701 |
+
# error: Item "ExtensionArray" of "ExtensionArray |
|
702 |
+
# ndarray[Any, Any]" has no attribute "_freq"
|
703 |
+
assert dates._freq == self.freq # type: ignore[union-attr]
|
704 |
+
result = type(self)._simple_new(dates)
|
705 |
+
return result
|
706 |
+
else:
|
707 |
+
return left
|
708 |
+
|
709 |
+
def _union(self, other, sort):
|
710 |
+
# We are called by `union`, which is responsible for this validation
|
711 |
+
assert isinstance(other, type(self))
|
712 |
+
assert self.dtype == other.dtype
|
713 |
+
|
714 |
+
if self._can_range_setop(other):
|
715 |
+
return self._range_union(other, sort=sort)
|
716 |
+
|
717 |
+
if self._can_fast_union(other):
|
718 |
+
result = self._fast_union(other, sort=sort)
|
719 |
+
# in the case with sort=None, the _can_fast_union check ensures
|
720 |
+
# that result.freq == self.freq
|
721 |
+
return result
|
722 |
+
else:
|
723 |
+
return super()._union(other, sort)._with_freq("infer")
|
724 |
+
|
725 |
+
# --------------------------------------------------------------------
|
726 |
+
# Join Methods
|
727 |
+
|
728 |
+
def _get_join_freq(self, other):
|
729 |
+
"""
|
730 |
+
Get the freq to attach to the result of a join operation.
|
731 |
+
"""
|
732 |
+
freq = None
|
733 |
+
if self._can_fast_union(other):
|
734 |
+
freq = self.freq
|
735 |
+
return freq
|
736 |
+
|
737 |
+
def _wrap_joined_index(
|
738 |
+
self, joined, other, lidx: npt.NDArray[np.intp], ridx: npt.NDArray[np.intp]
|
739 |
+
):
|
740 |
+
assert other.dtype == self.dtype, (other.dtype, self.dtype)
|
741 |
+
result = super()._wrap_joined_index(joined, other, lidx, ridx)
|
742 |
+
result._data._freq = self._get_join_freq(other)
|
743 |
+
return result
|
744 |
+
|
745 |
+
def _get_engine_target(self) -> np.ndarray:
|
746 |
+
# engine methods and libjoin methods need dt64/td64 values cast to i8
|
747 |
+
return self._data._ndarray.view("i8")
|
748 |
+
|
749 |
+
def _from_join_target(self, result: np.ndarray):
|
750 |
+
# view e.g. i8 back to M8[ns]
|
751 |
+
result = result.view(self._data._ndarray.dtype)
|
752 |
+
return self._data._from_backing_data(result)
|
753 |
+
|
754 |
+
# --------------------------------------------------------------------
|
755 |
+
# List-like Methods
|
756 |
+
|
757 |
+
def _get_delete_freq(self, loc: int | slice | Sequence[int]):
|
758 |
+
"""
|
759 |
+
Find the `freq` for self.delete(loc).
|
760 |
+
"""
|
761 |
+
freq = None
|
762 |
+
if self.freq is not None:
|
763 |
+
if is_integer(loc):
|
764 |
+
if loc in (0, -len(self), -1, len(self) - 1):
|
765 |
+
freq = self.freq
|
766 |
+
else:
|
767 |
+
if is_list_like(loc):
|
768 |
+
# error: Incompatible types in assignment (expression has
|
769 |
+
# type "Union[slice, ndarray]", variable has type
|
770 |
+
# "Union[int, slice, Sequence[int]]")
|
771 |
+
loc = lib.maybe_indices_to_slice( # type: ignore[assignment]
|
772 |
+
np.asarray(loc, dtype=np.intp), len(self)
|
773 |
+
)
|
774 |
+
if isinstance(loc, slice) and loc.step in (1, None):
|
775 |
+
if loc.start in (0, None) or loc.stop in (len(self), None):
|
776 |
+
freq = self.freq
|
777 |
+
return freq
|
778 |
+
|
779 |
+
def _get_insert_freq(self, loc: int, item):
|
780 |
+
"""
|
781 |
+
Find the `freq` for self.insert(loc, item).
|
782 |
+
"""
|
783 |
+
value = self._data._validate_scalar(item)
|
784 |
+
item = self._data._box_func(value)
|
785 |
+
|
786 |
+
freq = None
|
787 |
+
if self.freq is not None:
|
788 |
+
# freq can be preserved on edge cases
|
789 |
+
if self.size:
|
790 |
+
if item is NaT:
|
791 |
+
pass
|
792 |
+
elif loc in (0, -len(self)) and item + self.freq == self[0]:
|
793 |
+
freq = self.freq
|
794 |
+
elif (loc == len(self)) and item - self.freq == self[-1]:
|
795 |
+
freq = self.freq
|
796 |
+
else:
|
797 |
+
# Adding a single item to an empty index may preserve freq
|
798 |
+
if isinstance(self.freq, Tick):
|
799 |
+
# all TimedeltaIndex cases go through here; is_on_offset
|
800 |
+
# would raise TypeError
|
801 |
+
freq = self.freq
|
802 |
+
elif self.freq.is_on_offset(item):
|
803 |
+
freq = self.freq
|
804 |
+
return freq
|
805 |
+
|
806 |
+
@doc(NDArrayBackedExtensionIndex.delete)
|
807 |
+
def delete(self, loc) -> Self:
|
808 |
+
result = super().delete(loc)
|
809 |
+
result._data._freq = self._get_delete_freq(loc)
|
810 |
+
return result
|
811 |
+
|
812 |
+
@doc(NDArrayBackedExtensionIndex.insert)
|
813 |
+
def insert(self, loc: int, item):
|
814 |
+
result = super().insert(loc, item)
|
815 |
+
if isinstance(result, type(self)):
|
816 |
+
# i.e. parent class method did not cast
|
817 |
+
result._data._freq = self._get_insert_freq(loc, item)
|
818 |
+
return result
|
819 |
+
|
820 |
+
# --------------------------------------------------------------------
|
821 |
+
# NDArray-Like Methods
|
822 |
+
|
823 |
+
@Appender(_index_shared_docs["take"] % _index_doc_kwargs)
|
824 |
+
def take(
|
825 |
+
self,
|
826 |
+
indices,
|
827 |
+
axis: Axis = 0,
|
828 |
+
allow_fill: bool = True,
|
829 |
+
fill_value=None,
|
830 |
+
**kwargs,
|
831 |
+
) -> Self:
|
832 |
+
nv.validate_take((), kwargs)
|
833 |
+
indices = np.asarray(indices, dtype=np.intp)
|
834 |
+
|
835 |
+
result = NDArrayBackedExtensionIndex.take(
|
836 |
+
self, indices, axis, allow_fill, fill_value, **kwargs
|
837 |
+
)
|
838 |
+
|
839 |
+
maybe_slice = lib.maybe_indices_to_slice(indices, len(self))
|
840 |
+
if isinstance(maybe_slice, slice):
|
841 |
+
freq = self._data._get_getitem_freq(maybe_slice)
|
842 |
+
result._data._freq = freq
|
843 |
+
return result
|