Add files using upload-large-folder tool
Browse filesThis view is limited to 50 files because it contains too many changes.
See raw diff
- llmeval-env/lib/python3.10/site-packages/pandas/core/array_algos/__init__.py +9 -0
- llmeval-env/lib/python3.10/site-packages/pandas/core/array_algos/__pycache__/__init__.cpython-310.pyc +0 -0
- llmeval-env/lib/python3.10/site-packages/pandas/core/array_algos/__pycache__/datetimelike_accumulations.cpython-310.pyc +0 -0
- llmeval-env/lib/python3.10/site-packages/pandas/core/array_algos/__pycache__/masked_accumulations.cpython-310.pyc +0 -0
- llmeval-env/lib/python3.10/site-packages/pandas/core/array_algos/__pycache__/masked_reductions.cpython-310.pyc +0 -0
- llmeval-env/lib/python3.10/site-packages/pandas/core/array_algos/__pycache__/putmask.cpython-310.pyc +0 -0
- llmeval-env/lib/python3.10/site-packages/pandas/core/array_algos/__pycache__/quantile.cpython-310.pyc +0 -0
- llmeval-env/lib/python3.10/site-packages/pandas/core/array_algos/__pycache__/replace.cpython-310.pyc +0 -0
- llmeval-env/lib/python3.10/site-packages/pandas/core/array_algos/__pycache__/take.cpython-310.pyc +0 -0
- llmeval-env/lib/python3.10/site-packages/pandas/core/array_algos/__pycache__/transforms.cpython-310.pyc +0 -0
- llmeval-env/lib/python3.10/site-packages/pandas/core/array_algos/datetimelike_accumulations.py +67 -0
- llmeval-env/lib/python3.10/site-packages/pandas/core/array_algos/masked_accumulations.py +90 -0
- llmeval-env/lib/python3.10/site-packages/pandas/core/array_algos/masked_reductions.py +197 -0
- llmeval-env/lib/python3.10/site-packages/pandas/core/array_algos/putmask.py +149 -0
- llmeval-env/lib/python3.10/site-packages/pandas/core/array_algos/quantile.py +226 -0
- llmeval-env/lib/python3.10/site-packages/pandas/core/array_algos/replace.py +152 -0
- llmeval-env/lib/python3.10/site-packages/pandas/core/array_algos/take.py +594 -0
- llmeval-env/lib/python3.10/site-packages/pandas/core/array_algos/transforms.py +50 -0
- llmeval-env/lib/python3.10/site-packages/pandas/core/arrays/arrow/__init__.py +7 -0
- llmeval-env/lib/python3.10/site-packages/pandas/core/arrays/arrow/_arrow_utils.py +66 -0
- llmeval-env/lib/python3.10/site-packages/pandas/core/arrays/arrow/accessors.py +473 -0
- llmeval-env/lib/python3.10/site-packages/pandas/core/arrays/arrow/array.py +0 -0
- llmeval-env/lib/python3.10/site-packages/pandas/core/arrays/arrow/extension_types.py +174 -0
- llmeval-env/lib/python3.10/site-packages/pandas/core/arrays/sparse/__pycache__/__init__.cpython-310.pyc +0 -0
- llmeval-env/lib/python3.10/site-packages/pandas/core/arrays/sparse/__pycache__/accessor.cpython-310.pyc +0 -0
- llmeval-env/lib/python3.10/site-packages/pandas/core/arrays/sparse/__pycache__/array.cpython-310.pyc +0 -0
- llmeval-env/lib/python3.10/site-packages/pandas/core/arrays/sparse/__pycache__/scipy_sparse.cpython-310.pyc +0 -0
- llmeval-env/lib/python3.10/site-packages/pandas/core/arrays/sparse/scipy_sparse.py +207 -0
- llmeval-env/lib/python3.10/site-packages/pandas/core/dtypes/__init__.py +0 -0
- llmeval-env/lib/python3.10/site-packages/pandas/core/dtypes/__pycache__/__init__.cpython-310.pyc +0 -0
- llmeval-env/lib/python3.10/site-packages/pandas/core/dtypes/__pycache__/api.cpython-310.pyc +0 -0
- llmeval-env/lib/python3.10/site-packages/pandas/core/dtypes/__pycache__/astype.cpython-310.pyc +0 -0
- llmeval-env/lib/python3.10/site-packages/pandas/core/dtypes/__pycache__/dtypes.cpython-310.pyc +0 -0
- llmeval-env/lib/python3.10/site-packages/pandas/core/dtypes/api.py +85 -0
- llmeval-env/lib/python3.10/site-packages/pandas/core/dtypes/astype.py +301 -0
- llmeval-env/lib/python3.10/site-packages/pandas/core/dtypes/base.py +583 -0
- llmeval-env/lib/python3.10/site-packages/pandas/core/dtypes/cast.py +1973 -0
- llmeval-env/lib/python3.10/site-packages/pandas/core/dtypes/common.py +1748 -0
- llmeval-env/lib/python3.10/site-packages/pandas/core/dtypes/concat.py +348 -0
- llmeval-env/lib/python3.10/site-packages/pandas/core/dtypes/dtypes.py +2348 -0
- llmeval-env/lib/python3.10/site-packages/pandas/core/dtypes/generic.py +147 -0
- llmeval-env/lib/python3.10/site-packages/pandas/core/dtypes/inference.py +437 -0
- llmeval-env/lib/python3.10/site-packages/pandas/core/dtypes/missing.py +810 -0
- llmeval-env/lib/python3.10/site-packages/pandas/core/interchange/__init__.py +0 -0
- llmeval-env/lib/python3.10/site-packages/pandas/core/interchange/__pycache__/__init__.cpython-310.pyc +0 -0
- llmeval-env/lib/python3.10/site-packages/pandas/core/interchange/__pycache__/buffer.cpython-310.pyc +0 -0
- llmeval-env/lib/python3.10/site-packages/pandas/core/interchange/__pycache__/column.cpython-310.pyc +0 -0
- llmeval-env/lib/python3.10/site-packages/pandas/core/interchange/__pycache__/dataframe.cpython-310.pyc +0 -0
- llmeval-env/lib/python3.10/site-packages/pandas/core/interchange/__pycache__/dataframe_protocol.cpython-310.pyc +0 -0
- llmeval-env/lib/python3.10/site-packages/pandas/core/interchange/__pycache__/from_dataframe.cpython-310.pyc +0 -0
llmeval-env/lib/python3.10/site-packages/pandas/core/array_algos/__init__.py
ADDED
@@ -0,0 +1,9 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
"""
|
2 |
+
core.array_algos is for algorithms that operate on ndarray and ExtensionArray.
|
3 |
+
These should:
|
4 |
+
|
5 |
+
- Assume that any Index, Series, or DataFrame objects have already been unwrapped.
|
6 |
+
- Assume that any list arguments have already been cast to ndarray/EA.
|
7 |
+
- Not depend on Index, Series, or DataFrame, nor import any of these.
|
8 |
+
- May dispatch to ExtensionArray methods, but should not import from core.arrays.
|
9 |
+
"""
|
llmeval-env/lib/python3.10/site-packages/pandas/core/array_algos/__pycache__/__init__.cpython-310.pyc
ADDED
Binary file (612 Bytes). View file
|
|
llmeval-env/lib/python3.10/site-packages/pandas/core/array_algos/__pycache__/datetimelike_accumulations.cpython-310.pyc
ADDED
Binary file (1.99 kB). View file
|
|
llmeval-env/lib/python3.10/site-packages/pandas/core/array_algos/__pycache__/masked_accumulations.cpython-310.pyc
ADDED
Binary file (2.37 kB). View file
|
|
llmeval-env/lib/python3.10/site-packages/pandas/core/array_algos/__pycache__/masked_reductions.cpython-310.pyc
ADDED
Binary file (3.98 kB). View file
|
|
llmeval-env/lib/python3.10/site-packages/pandas/core/array_algos/__pycache__/putmask.cpython-310.pyc
ADDED
Binary file (3.44 kB). View file
|
|
llmeval-env/lib/python3.10/site-packages/pandas/core/array_algos/__pycache__/quantile.cpython-310.pyc
ADDED
Binary file (4.63 kB). View file
|
|
llmeval-env/lib/python3.10/site-packages/pandas/core/array_algos/__pycache__/replace.cpython-310.pyc
ADDED
Binary file (3.83 kB). View file
|
|
llmeval-env/lib/python3.10/site-packages/pandas/core/array_algos/__pycache__/take.cpython-310.pyc
ADDED
Binary file (13.4 kB). View file
|
|
llmeval-env/lib/python3.10/site-packages/pandas/core/array_algos/__pycache__/transforms.cpython-310.pyc
ADDED
Binary file (1.02 kB). View file
|
|
llmeval-env/lib/python3.10/site-packages/pandas/core/array_algos/datetimelike_accumulations.py
ADDED
@@ -0,0 +1,67 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
"""
|
2 |
+
datetimelke_accumulations.py is for accumulations of datetimelike extension arrays
|
3 |
+
"""
|
4 |
+
|
5 |
+
from __future__ import annotations
|
6 |
+
|
7 |
+
from typing import Callable
|
8 |
+
|
9 |
+
import numpy as np
|
10 |
+
|
11 |
+
from pandas._libs import iNaT
|
12 |
+
|
13 |
+
from pandas.core.dtypes.missing import isna
|
14 |
+
|
15 |
+
|
16 |
+
def _cum_func(
|
17 |
+
func: Callable,
|
18 |
+
values: np.ndarray,
|
19 |
+
*,
|
20 |
+
skipna: bool = True,
|
21 |
+
):
|
22 |
+
"""
|
23 |
+
Accumulations for 1D datetimelike arrays.
|
24 |
+
|
25 |
+
Parameters
|
26 |
+
----------
|
27 |
+
func : np.cumsum, np.maximum.accumulate, np.minimum.accumulate
|
28 |
+
values : np.ndarray
|
29 |
+
Numpy array with the values (can be of any dtype that support the
|
30 |
+
operation). Values is changed is modified inplace.
|
31 |
+
skipna : bool, default True
|
32 |
+
Whether to skip NA.
|
33 |
+
"""
|
34 |
+
try:
|
35 |
+
fill_value = {
|
36 |
+
np.maximum.accumulate: np.iinfo(np.int64).min,
|
37 |
+
np.cumsum: 0,
|
38 |
+
np.minimum.accumulate: np.iinfo(np.int64).max,
|
39 |
+
}[func]
|
40 |
+
except KeyError:
|
41 |
+
raise ValueError(f"No accumulation for {func} implemented on BaseMaskedArray")
|
42 |
+
|
43 |
+
mask = isna(values)
|
44 |
+
y = values.view("i8")
|
45 |
+
y[mask] = fill_value
|
46 |
+
|
47 |
+
if not skipna:
|
48 |
+
mask = np.maximum.accumulate(mask)
|
49 |
+
|
50 |
+
result = func(y)
|
51 |
+
result[mask] = iNaT
|
52 |
+
|
53 |
+
if values.dtype.kind in "mM":
|
54 |
+
return result.view(values.dtype.base)
|
55 |
+
return result
|
56 |
+
|
57 |
+
|
58 |
+
def cumsum(values: np.ndarray, *, skipna: bool = True) -> np.ndarray:
|
59 |
+
return _cum_func(np.cumsum, values, skipna=skipna)
|
60 |
+
|
61 |
+
|
62 |
+
def cummin(values: np.ndarray, *, skipna: bool = True):
|
63 |
+
return _cum_func(np.minimum.accumulate, values, skipna=skipna)
|
64 |
+
|
65 |
+
|
66 |
+
def cummax(values: np.ndarray, *, skipna: bool = True):
|
67 |
+
return _cum_func(np.maximum.accumulate, values, skipna=skipna)
|
llmeval-env/lib/python3.10/site-packages/pandas/core/array_algos/masked_accumulations.py
ADDED
@@ -0,0 +1,90 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
"""
|
2 |
+
masked_accumulations.py is for accumulation algorithms using a mask-based approach
|
3 |
+
for missing values.
|
4 |
+
"""
|
5 |
+
|
6 |
+
from __future__ import annotations
|
7 |
+
|
8 |
+
from typing import (
|
9 |
+
TYPE_CHECKING,
|
10 |
+
Callable,
|
11 |
+
)
|
12 |
+
|
13 |
+
import numpy as np
|
14 |
+
|
15 |
+
if TYPE_CHECKING:
|
16 |
+
from pandas._typing import npt
|
17 |
+
|
18 |
+
|
19 |
+
def _cum_func(
|
20 |
+
func: Callable,
|
21 |
+
values: np.ndarray,
|
22 |
+
mask: npt.NDArray[np.bool_],
|
23 |
+
*,
|
24 |
+
skipna: bool = True,
|
25 |
+
):
|
26 |
+
"""
|
27 |
+
Accumulations for 1D masked array.
|
28 |
+
|
29 |
+
We will modify values in place to replace NAs with the appropriate fill value.
|
30 |
+
|
31 |
+
Parameters
|
32 |
+
----------
|
33 |
+
func : np.cumsum, np.cumprod, np.maximum.accumulate, np.minimum.accumulate
|
34 |
+
values : np.ndarray
|
35 |
+
Numpy array with the values (can be of any dtype that support the
|
36 |
+
operation).
|
37 |
+
mask : np.ndarray
|
38 |
+
Boolean numpy array (True values indicate missing values).
|
39 |
+
skipna : bool, default True
|
40 |
+
Whether to skip NA.
|
41 |
+
"""
|
42 |
+
dtype_info: np.iinfo | np.finfo
|
43 |
+
if values.dtype.kind == "f":
|
44 |
+
dtype_info = np.finfo(values.dtype.type)
|
45 |
+
elif values.dtype.kind in "iu":
|
46 |
+
dtype_info = np.iinfo(values.dtype.type)
|
47 |
+
elif values.dtype.kind == "b":
|
48 |
+
# Max value of bool is 1, but since we are setting into a boolean
|
49 |
+
# array, 255 is fine as well. Min value has to be 0 when setting
|
50 |
+
# into the boolean array.
|
51 |
+
dtype_info = np.iinfo(np.uint8)
|
52 |
+
else:
|
53 |
+
raise NotImplementedError(
|
54 |
+
f"No masked accumulation defined for dtype {values.dtype.type}"
|
55 |
+
)
|
56 |
+
try:
|
57 |
+
fill_value = {
|
58 |
+
np.cumprod: 1,
|
59 |
+
np.maximum.accumulate: dtype_info.min,
|
60 |
+
np.cumsum: 0,
|
61 |
+
np.minimum.accumulate: dtype_info.max,
|
62 |
+
}[func]
|
63 |
+
except KeyError:
|
64 |
+
raise NotImplementedError(
|
65 |
+
f"No accumulation for {func} implemented on BaseMaskedArray"
|
66 |
+
)
|
67 |
+
|
68 |
+
values[mask] = fill_value
|
69 |
+
|
70 |
+
if not skipna:
|
71 |
+
mask = np.maximum.accumulate(mask)
|
72 |
+
|
73 |
+
values = func(values)
|
74 |
+
return values, mask
|
75 |
+
|
76 |
+
|
77 |
+
def cumsum(values: np.ndarray, mask: npt.NDArray[np.bool_], *, skipna: bool = True):
|
78 |
+
return _cum_func(np.cumsum, values, mask, skipna=skipna)
|
79 |
+
|
80 |
+
|
81 |
+
def cumprod(values: np.ndarray, mask: npt.NDArray[np.bool_], *, skipna: bool = True):
|
82 |
+
return _cum_func(np.cumprod, values, mask, skipna=skipna)
|
83 |
+
|
84 |
+
|
85 |
+
def cummin(values: np.ndarray, mask: npt.NDArray[np.bool_], *, skipna: bool = True):
|
86 |
+
return _cum_func(np.minimum.accumulate, values, mask, skipna=skipna)
|
87 |
+
|
88 |
+
|
89 |
+
def cummax(values: np.ndarray, mask: npt.NDArray[np.bool_], *, skipna: bool = True):
|
90 |
+
return _cum_func(np.maximum.accumulate, values, mask, skipna=skipna)
|
llmeval-env/lib/python3.10/site-packages/pandas/core/array_algos/masked_reductions.py
ADDED
@@ -0,0 +1,197 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
"""
|
2 |
+
masked_reductions.py is for reduction algorithms using a mask-based approach
|
3 |
+
for missing values.
|
4 |
+
"""
|
5 |
+
from __future__ import annotations
|
6 |
+
|
7 |
+
from typing import (
|
8 |
+
TYPE_CHECKING,
|
9 |
+
Callable,
|
10 |
+
)
|
11 |
+
import warnings
|
12 |
+
|
13 |
+
import numpy as np
|
14 |
+
|
15 |
+
from pandas._libs import missing as libmissing
|
16 |
+
|
17 |
+
from pandas.core.nanops import check_below_min_count
|
18 |
+
|
19 |
+
if TYPE_CHECKING:
|
20 |
+
from pandas._typing import (
|
21 |
+
AxisInt,
|
22 |
+
npt,
|
23 |
+
)
|
24 |
+
|
25 |
+
|
26 |
+
def _reductions(
|
27 |
+
func: Callable,
|
28 |
+
values: np.ndarray,
|
29 |
+
mask: npt.NDArray[np.bool_],
|
30 |
+
*,
|
31 |
+
skipna: bool = True,
|
32 |
+
min_count: int = 0,
|
33 |
+
axis: AxisInt | None = None,
|
34 |
+
**kwargs,
|
35 |
+
):
|
36 |
+
"""
|
37 |
+
Sum, mean or product for 1D masked array.
|
38 |
+
|
39 |
+
Parameters
|
40 |
+
----------
|
41 |
+
func : np.sum or np.prod
|
42 |
+
values : np.ndarray
|
43 |
+
Numpy array with the values (can be of any dtype that support the
|
44 |
+
operation).
|
45 |
+
mask : np.ndarray[bool]
|
46 |
+
Boolean numpy array (True values indicate missing values).
|
47 |
+
skipna : bool, default True
|
48 |
+
Whether to skip NA.
|
49 |
+
min_count : int, default 0
|
50 |
+
The required number of valid values to perform the operation. If fewer than
|
51 |
+
``min_count`` non-NA values are present the result will be NA.
|
52 |
+
axis : int, optional, default None
|
53 |
+
"""
|
54 |
+
if not skipna:
|
55 |
+
if mask.any() or check_below_min_count(values.shape, None, min_count):
|
56 |
+
return libmissing.NA
|
57 |
+
else:
|
58 |
+
return func(values, axis=axis, **kwargs)
|
59 |
+
else:
|
60 |
+
if check_below_min_count(values.shape, mask, min_count) and (
|
61 |
+
axis is None or values.ndim == 1
|
62 |
+
):
|
63 |
+
return libmissing.NA
|
64 |
+
|
65 |
+
return func(values, where=~mask, axis=axis, **kwargs)
|
66 |
+
|
67 |
+
|
68 |
+
def sum(
|
69 |
+
values: np.ndarray,
|
70 |
+
mask: npt.NDArray[np.bool_],
|
71 |
+
*,
|
72 |
+
skipna: bool = True,
|
73 |
+
min_count: int = 0,
|
74 |
+
axis: AxisInt | None = None,
|
75 |
+
):
|
76 |
+
return _reductions(
|
77 |
+
np.sum, values=values, mask=mask, skipna=skipna, min_count=min_count, axis=axis
|
78 |
+
)
|
79 |
+
|
80 |
+
|
81 |
+
def prod(
|
82 |
+
values: np.ndarray,
|
83 |
+
mask: npt.NDArray[np.bool_],
|
84 |
+
*,
|
85 |
+
skipna: bool = True,
|
86 |
+
min_count: int = 0,
|
87 |
+
axis: AxisInt | None = None,
|
88 |
+
):
|
89 |
+
return _reductions(
|
90 |
+
np.prod, values=values, mask=mask, skipna=skipna, min_count=min_count, axis=axis
|
91 |
+
)
|
92 |
+
|
93 |
+
|
94 |
+
def _minmax(
|
95 |
+
func: Callable,
|
96 |
+
values: np.ndarray,
|
97 |
+
mask: npt.NDArray[np.bool_],
|
98 |
+
*,
|
99 |
+
skipna: bool = True,
|
100 |
+
axis: AxisInt | None = None,
|
101 |
+
):
|
102 |
+
"""
|
103 |
+
Reduction for 1D masked array.
|
104 |
+
|
105 |
+
Parameters
|
106 |
+
----------
|
107 |
+
func : np.min or np.max
|
108 |
+
values : np.ndarray
|
109 |
+
Numpy array with the values (can be of any dtype that support the
|
110 |
+
operation).
|
111 |
+
mask : np.ndarray[bool]
|
112 |
+
Boolean numpy array (True values indicate missing values).
|
113 |
+
skipna : bool, default True
|
114 |
+
Whether to skip NA.
|
115 |
+
axis : int, optional, default None
|
116 |
+
"""
|
117 |
+
if not skipna:
|
118 |
+
if mask.any() or not values.size:
|
119 |
+
# min/max with empty array raise in numpy, pandas returns NA
|
120 |
+
return libmissing.NA
|
121 |
+
else:
|
122 |
+
return func(values, axis=axis)
|
123 |
+
else:
|
124 |
+
subset = values[~mask]
|
125 |
+
if subset.size:
|
126 |
+
return func(subset, axis=axis)
|
127 |
+
else:
|
128 |
+
# min/max with empty array raise in numpy, pandas returns NA
|
129 |
+
return libmissing.NA
|
130 |
+
|
131 |
+
|
132 |
+
def min(
|
133 |
+
values: np.ndarray,
|
134 |
+
mask: npt.NDArray[np.bool_],
|
135 |
+
*,
|
136 |
+
skipna: bool = True,
|
137 |
+
axis: AxisInt | None = None,
|
138 |
+
):
|
139 |
+
return _minmax(np.min, values=values, mask=mask, skipna=skipna, axis=axis)
|
140 |
+
|
141 |
+
|
142 |
+
def max(
|
143 |
+
values: np.ndarray,
|
144 |
+
mask: npt.NDArray[np.bool_],
|
145 |
+
*,
|
146 |
+
skipna: bool = True,
|
147 |
+
axis: AxisInt | None = None,
|
148 |
+
):
|
149 |
+
return _minmax(np.max, values=values, mask=mask, skipna=skipna, axis=axis)
|
150 |
+
|
151 |
+
|
152 |
+
def mean(
|
153 |
+
values: np.ndarray,
|
154 |
+
mask: npt.NDArray[np.bool_],
|
155 |
+
*,
|
156 |
+
skipna: bool = True,
|
157 |
+
axis: AxisInt | None = None,
|
158 |
+
):
|
159 |
+
if not values.size or mask.all():
|
160 |
+
return libmissing.NA
|
161 |
+
return _reductions(np.mean, values=values, mask=mask, skipna=skipna, axis=axis)
|
162 |
+
|
163 |
+
|
164 |
+
def var(
|
165 |
+
values: np.ndarray,
|
166 |
+
mask: npt.NDArray[np.bool_],
|
167 |
+
*,
|
168 |
+
skipna: bool = True,
|
169 |
+
axis: AxisInt | None = None,
|
170 |
+
ddof: int = 1,
|
171 |
+
):
|
172 |
+
if not values.size or mask.all():
|
173 |
+
return libmissing.NA
|
174 |
+
|
175 |
+
with warnings.catch_warnings():
|
176 |
+
warnings.simplefilter("ignore", RuntimeWarning)
|
177 |
+
return _reductions(
|
178 |
+
np.var, values=values, mask=mask, skipna=skipna, axis=axis, ddof=ddof
|
179 |
+
)
|
180 |
+
|
181 |
+
|
182 |
+
def std(
|
183 |
+
values: np.ndarray,
|
184 |
+
mask: npt.NDArray[np.bool_],
|
185 |
+
*,
|
186 |
+
skipna: bool = True,
|
187 |
+
axis: AxisInt | None = None,
|
188 |
+
ddof: int = 1,
|
189 |
+
):
|
190 |
+
if not values.size or mask.all():
|
191 |
+
return libmissing.NA
|
192 |
+
|
193 |
+
with warnings.catch_warnings():
|
194 |
+
warnings.simplefilter("ignore", RuntimeWarning)
|
195 |
+
return _reductions(
|
196 |
+
np.std, values=values, mask=mask, skipna=skipna, axis=axis, ddof=ddof
|
197 |
+
)
|
llmeval-env/lib/python3.10/site-packages/pandas/core/array_algos/putmask.py
ADDED
@@ -0,0 +1,149 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
"""
|
2 |
+
EA-compatible analogue to np.putmask
|
3 |
+
"""
|
4 |
+
from __future__ import annotations
|
5 |
+
|
6 |
+
from typing import (
|
7 |
+
TYPE_CHECKING,
|
8 |
+
Any,
|
9 |
+
)
|
10 |
+
|
11 |
+
import numpy as np
|
12 |
+
|
13 |
+
from pandas._libs import lib
|
14 |
+
|
15 |
+
from pandas.core.dtypes.cast import infer_dtype_from
|
16 |
+
from pandas.core.dtypes.common import is_list_like
|
17 |
+
|
18 |
+
from pandas.core.arrays import ExtensionArray
|
19 |
+
|
20 |
+
if TYPE_CHECKING:
|
21 |
+
from pandas._typing import (
|
22 |
+
ArrayLike,
|
23 |
+
npt,
|
24 |
+
)
|
25 |
+
|
26 |
+
from pandas import MultiIndex
|
27 |
+
|
28 |
+
|
29 |
+
def putmask_inplace(values: ArrayLike, mask: npt.NDArray[np.bool_], value: Any) -> None:
|
30 |
+
"""
|
31 |
+
ExtensionArray-compatible implementation of np.putmask. The main
|
32 |
+
difference is we do not handle repeating or truncating like numpy.
|
33 |
+
|
34 |
+
Parameters
|
35 |
+
----------
|
36 |
+
values: np.ndarray or ExtensionArray
|
37 |
+
mask : np.ndarray[bool]
|
38 |
+
We assume extract_bool_array has already been called.
|
39 |
+
value : Any
|
40 |
+
"""
|
41 |
+
|
42 |
+
if (
|
43 |
+
not isinstance(values, np.ndarray)
|
44 |
+
or (values.dtype == object and not lib.is_scalar(value))
|
45 |
+
# GH#43424: np.putmask raises TypeError if we cannot cast between types with
|
46 |
+
# rule = "safe", a stricter guarantee we may not have here
|
47 |
+
or (
|
48 |
+
isinstance(value, np.ndarray) and not np.can_cast(value.dtype, values.dtype)
|
49 |
+
)
|
50 |
+
):
|
51 |
+
# GH#19266 using np.putmask gives unexpected results with listlike value
|
52 |
+
# along with object dtype
|
53 |
+
if is_list_like(value) and len(value) == len(values):
|
54 |
+
values[mask] = value[mask]
|
55 |
+
else:
|
56 |
+
values[mask] = value
|
57 |
+
else:
|
58 |
+
# GH#37833 np.putmask is more performant than __setitem__
|
59 |
+
np.putmask(values, mask, value)
|
60 |
+
|
61 |
+
|
62 |
+
def putmask_without_repeat(
|
63 |
+
values: np.ndarray, mask: npt.NDArray[np.bool_], new: Any
|
64 |
+
) -> None:
|
65 |
+
"""
|
66 |
+
np.putmask will truncate or repeat if `new` is a listlike with
|
67 |
+
len(new) != len(values). We require an exact match.
|
68 |
+
|
69 |
+
Parameters
|
70 |
+
----------
|
71 |
+
values : np.ndarray
|
72 |
+
mask : np.ndarray[bool]
|
73 |
+
new : Any
|
74 |
+
"""
|
75 |
+
if getattr(new, "ndim", 0) >= 1:
|
76 |
+
new = new.astype(values.dtype, copy=False)
|
77 |
+
|
78 |
+
# TODO: this prob needs some better checking for 2D cases
|
79 |
+
nlocs = mask.sum()
|
80 |
+
if nlocs > 0 and is_list_like(new) and getattr(new, "ndim", 1) == 1:
|
81 |
+
shape = np.shape(new)
|
82 |
+
# np.shape compat for if setitem_datetimelike_compat
|
83 |
+
# changed arraylike to list e.g. test_where_dt64_2d
|
84 |
+
if nlocs == shape[-1]:
|
85 |
+
# GH#30567
|
86 |
+
# If length of ``new`` is less than the length of ``values``,
|
87 |
+
# `np.putmask` would first repeat the ``new`` array and then
|
88 |
+
# assign the masked values hence produces incorrect result.
|
89 |
+
# `np.place` on the other hand uses the ``new`` values at it is
|
90 |
+
# to place in the masked locations of ``values``
|
91 |
+
np.place(values, mask, new)
|
92 |
+
# i.e. values[mask] = new
|
93 |
+
elif mask.shape[-1] == shape[-1] or shape[-1] == 1:
|
94 |
+
np.putmask(values, mask, new)
|
95 |
+
else:
|
96 |
+
raise ValueError("cannot assign mismatch length to masked array")
|
97 |
+
else:
|
98 |
+
np.putmask(values, mask, new)
|
99 |
+
|
100 |
+
|
101 |
+
def validate_putmask(
|
102 |
+
values: ArrayLike | MultiIndex, mask: np.ndarray
|
103 |
+
) -> tuple[npt.NDArray[np.bool_], bool]:
|
104 |
+
"""
|
105 |
+
Validate mask and check if this putmask operation is a no-op.
|
106 |
+
"""
|
107 |
+
mask = extract_bool_array(mask)
|
108 |
+
if mask.shape != values.shape:
|
109 |
+
raise ValueError("putmask: mask and data must be the same size")
|
110 |
+
|
111 |
+
noop = not mask.any()
|
112 |
+
return mask, noop
|
113 |
+
|
114 |
+
|
115 |
+
def extract_bool_array(mask: ArrayLike) -> npt.NDArray[np.bool_]:
|
116 |
+
"""
|
117 |
+
If we have a SparseArray or BooleanArray, convert it to ndarray[bool].
|
118 |
+
"""
|
119 |
+
if isinstance(mask, ExtensionArray):
|
120 |
+
# We could have BooleanArray, Sparse[bool], ...
|
121 |
+
# Except for BooleanArray, this is equivalent to just
|
122 |
+
# np.asarray(mask, dtype=bool)
|
123 |
+
mask = mask.to_numpy(dtype=bool, na_value=False)
|
124 |
+
|
125 |
+
mask = np.asarray(mask, dtype=bool)
|
126 |
+
return mask
|
127 |
+
|
128 |
+
|
129 |
+
def setitem_datetimelike_compat(values: np.ndarray, num_set: int, other):
|
130 |
+
"""
|
131 |
+
Parameters
|
132 |
+
----------
|
133 |
+
values : np.ndarray
|
134 |
+
num_set : int
|
135 |
+
For putmask, this is mask.sum()
|
136 |
+
other : Any
|
137 |
+
"""
|
138 |
+
if values.dtype == object:
|
139 |
+
dtype, _ = infer_dtype_from(other)
|
140 |
+
|
141 |
+
if lib.is_np_dtype(dtype, "mM"):
|
142 |
+
# https://github.com/numpy/numpy/issues/12550
|
143 |
+
# timedelta64 will incorrectly cast to int
|
144 |
+
if not is_list_like(other):
|
145 |
+
other = [other] * num_set
|
146 |
+
else:
|
147 |
+
other = list(other)
|
148 |
+
|
149 |
+
return other
|
llmeval-env/lib/python3.10/site-packages/pandas/core/array_algos/quantile.py
ADDED
@@ -0,0 +1,226 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from __future__ import annotations
|
2 |
+
|
3 |
+
from typing import TYPE_CHECKING
|
4 |
+
|
5 |
+
import numpy as np
|
6 |
+
|
7 |
+
from pandas.core.dtypes.missing import (
|
8 |
+
isna,
|
9 |
+
na_value_for_dtype,
|
10 |
+
)
|
11 |
+
|
12 |
+
if TYPE_CHECKING:
|
13 |
+
from pandas._typing import (
|
14 |
+
ArrayLike,
|
15 |
+
Scalar,
|
16 |
+
npt,
|
17 |
+
)
|
18 |
+
|
19 |
+
|
20 |
+
def quantile_compat(
|
21 |
+
values: ArrayLike, qs: npt.NDArray[np.float64], interpolation: str
|
22 |
+
) -> ArrayLike:
|
23 |
+
"""
|
24 |
+
Compute the quantiles of the given values for each quantile in `qs`.
|
25 |
+
|
26 |
+
Parameters
|
27 |
+
----------
|
28 |
+
values : np.ndarray or ExtensionArray
|
29 |
+
qs : np.ndarray[float64]
|
30 |
+
interpolation : str
|
31 |
+
|
32 |
+
Returns
|
33 |
+
-------
|
34 |
+
np.ndarray or ExtensionArray
|
35 |
+
"""
|
36 |
+
if isinstance(values, np.ndarray):
|
37 |
+
fill_value = na_value_for_dtype(values.dtype, compat=False)
|
38 |
+
mask = isna(values)
|
39 |
+
return quantile_with_mask(values, mask, fill_value, qs, interpolation)
|
40 |
+
else:
|
41 |
+
return values._quantile(qs, interpolation)
|
42 |
+
|
43 |
+
|
44 |
+
def quantile_with_mask(
|
45 |
+
values: np.ndarray,
|
46 |
+
mask: npt.NDArray[np.bool_],
|
47 |
+
fill_value,
|
48 |
+
qs: npt.NDArray[np.float64],
|
49 |
+
interpolation: str,
|
50 |
+
) -> np.ndarray:
|
51 |
+
"""
|
52 |
+
Compute the quantiles of the given values for each quantile in `qs`.
|
53 |
+
|
54 |
+
Parameters
|
55 |
+
----------
|
56 |
+
values : np.ndarray
|
57 |
+
For ExtensionArray, this is _values_for_factorize()[0]
|
58 |
+
mask : np.ndarray[bool]
|
59 |
+
mask = isna(values)
|
60 |
+
For ExtensionArray, this is computed before calling _value_for_factorize
|
61 |
+
fill_value : Scalar
|
62 |
+
The value to interpret fill NA entries with
|
63 |
+
For ExtensionArray, this is _values_for_factorize()[1]
|
64 |
+
qs : np.ndarray[float64]
|
65 |
+
interpolation : str
|
66 |
+
Type of interpolation
|
67 |
+
|
68 |
+
Returns
|
69 |
+
-------
|
70 |
+
np.ndarray
|
71 |
+
|
72 |
+
Notes
|
73 |
+
-----
|
74 |
+
Assumes values is already 2D. For ExtensionArray this means np.atleast_2d
|
75 |
+
has been called on _values_for_factorize()[0]
|
76 |
+
|
77 |
+
Quantile is computed along axis=1.
|
78 |
+
"""
|
79 |
+
assert values.shape == mask.shape
|
80 |
+
if values.ndim == 1:
|
81 |
+
# unsqueeze, operate, re-squeeze
|
82 |
+
values = np.atleast_2d(values)
|
83 |
+
mask = np.atleast_2d(mask)
|
84 |
+
res_values = quantile_with_mask(values, mask, fill_value, qs, interpolation)
|
85 |
+
return res_values[0]
|
86 |
+
|
87 |
+
assert values.ndim == 2
|
88 |
+
|
89 |
+
is_empty = values.shape[1] == 0
|
90 |
+
|
91 |
+
if is_empty:
|
92 |
+
# create the array of na_values
|
93 |
+
# 2d len(values) * len(qs)
|
94 |
+
flat = np.array([fill_value] * len(qs))
|
95 |
+
result = np.repeat(flat, len(values)).reshape(len(values), len(qs))
|
96 |
+
else:
|
97 |
+
result = _nanpercentile(
|
98 |
+
values,
|
99 |
+
qs * 100.0,
|
100 |
+
na_value=fill_value,
|
101 |
+
mask=mask,
|
102 |
+
interpolation=interpolation,
|
103 |
+
)
|
104 |
+
|
105 |
+
result = np.asarray(result)
|
106 |
+
result = result.T
|
107 |
+
|
108 |
+
return result
|
109 |
+
|
110 |
+
|
111 |
+
def _nanpercentile_1d(
|
112 |
+
values: np.ndarray,
|
113 |
+
mask: npt.NDArray[np.bool_],
|
114 |
+
qs: npt.NDArray[np.float64],
|
115 |
+
na_value: Scalar,
|
116 |
+
interpolation: str,
|
117 |
+
) -> Scalar | np.ndarray:
|
118 |
+
"""
|
119 |
+
Wrapper for np.percentile that skips missing values, specialized to
|
120 |
+
1-dimensional case.
|
121 |
+
|
122 |
+
Parameters
|
123 |
+
----------
|
124 |
+
values : array over which to find quantiles
|
125 |
+
mask : ndarray[bool]
|
126 |
+
locations in values that should be considered missing
|
127 |
+
qs : np.ndarray[float64] of quantile indices to find
|
128 |
+
na_value : scalar
|
129 |
+
value to return for empty or all-null values
|
130 |
+
interpolation : str
|
131 |
+
|
132 |
+
Returns
|
133 |
+
-------
|
134 |
+
quantiles : scalar or array
|
135 |
+
"""
|
136 |
+
# mask is Union[ExtensionArray, ndarray]
|
137 |
+
values = values[~mask]
|
138 |
+
|
139 |
+
if len(values) == 0:
|
140 |
+
# Can't pass dtype=values.dtype here bc we might have na_value=np.nan
|
141 |
+
# with values.dtype=int64 see test_quantile_empty
|
142 |
+
# equiv: 'np.array([na_value] * len(qs))' but much faster
|
143 |
+
return np.full(len(qs), na_value)
|
144 |
+
|
145 |
+
return np.percentile(
|
146 |
+
values,
|
147 |
+
qs,
|
148 |
+
# error: No overload variant of "percentile" matches argument
|
149 |
+
# types "ndarray[Any, Any]", "ndarray[Any, dtype[floating[_64Bit]]]"
|
150 |
+
# , "Dict[str, str]" [call-overload]
|
151 |
+
method=interpolation, # type: ignore[call-overload]
|
152 |
+
)
|
153 |
+
|
154 |
+
|
155 |
+
def _nanpercentile(
|
156 |
+
values: np.ndarray,
|
157 |
+
qs: npt.NDArray[np.float64],
|
158 |
+
*,
|
159 |
+
na_value,
|
160 |
+
mask: npt.NDArray[np.bool_],
|
161 |
+
interpolation: str,
|
162 |
+
):
|
163 |
+
"""
|
164 |
+
Wrapper for np.percentile that skips missing values.
|
165 |
+
|
166 |
+
Parameters
|
167 |
+
----------
|
168 |
+
values : np.ndarray[ndim=2] over which to find quantiles
|
169 |
+
qs : np.ndarray[float64] of quantile indices to find
|
170 |
+
na_value : scalar
|
171 |
+
value to return for empty or all-null values
|
172 |
+
mask : np.ndarray[bool]
|
173 |
+
locations in values that should be considered missing
|
174 |
+
interpolation : str
|
175 |
+
|
176 |
+
Returns
|
177 |
+
-------
|
178 |
+
quantiles : scalar or array
|
179 |
+
"""
|
180 |
+
|
181 |
+
if values.dtype.kind in "mM":
|
182 |
+
# need to cast to integer to avoid rounding errors in numpy
|
183 |
+
result = _nanpercentile(
|
184 |
+
values.view("i8"),
|
185 |
+
qs=qs,
|
186 |
+
na_value=na_value.view("i8"),
|
187 |
+
mask=mask,
|
188 |
+
interpolation=interpolation,
|
189 |
+
)
|
190 |
+
|
191 |
+
# Note: we have to do `astype` and not view because in general we
|
192 |
+
# have float result at this point, not i8
|
193 |
+
return result.astype(values.dtype)
|
194 |
+
|
195 |
+
if mask.any():
|
196 |
+
# Caller is responsible for ensuring mask shape match
|
197 |
+
assert mask.shape == values.shape
|
198 |
+
result = [
|
199 |
+
_nanpercentile_1d(val, m, qs, na_value, interpolation=interpolation)
|
200 |
+
for (val, m) in zip(list(values), list(mask))
|
201 |
+
]
|
202 |
+
if values.dtype.kind == "f":
|
203 |
+
# preserve itemsize
|
204 |
+
result = np.asarray(result, dtype=values.dtype).T
|
205 |
+
else:
|
206 |
+
result = np.asarray(result).T
|
207 |
+
if (
|
208 |
+
result.dtype != values.dtype
|
209 |
+
and not mask.all()
|
210 |
+
and (result == result.astype(values.dtype, copy=False)).all()
|
211 |
+
):
|
212 |
+
# mask.all() will never get cast back to int
|
213 |
+
# e.g. values id integer dtype and result is floating dtype,
|
214 |
+
# only cast back to integer dtype if result values are all-integer.
|
215 |
+
result = result.astype(values.dtype, copy=False)
|
216 |
+
return result
|
217 |
+
else:
|
218 |
+
return np.percentile(
|
219 |
+
values,
|
220 |
+
qs,
|
221 |
+
axis=1,
|
222 |
+
# error: No overload variant of "percentile" matches argument types
|
223 |
+
# "ndarray[Any, Any]", "ndarray[Any, dtype[floating[_64Bit]]]",
|
224 |
+
# "int", "Dict[str, str]" [call-overload]
|
225 |
+
method=interpolation, # type: ignore[call-overload]
|
226 |
+
)
|
llmeval-env/lib/python3.10/site-packages/pandas/core/array_algos/replace.py
ADDED
@@ -0,0 +1,152 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
"""
|
2 |
+
Methods used by Block.replace and related methods.
|
3 |
+
"""
|
4 |
+
from __future__ import annotations
|
5 |
+
|
6 |
+
import operator
|
7 |
+
import re
|
8 |
+
from re import Pattern
|
9 |
+
from typing import (
|
10 |
+
TYPE_CHECKING,
|
11 |
+
Any,
|
12 |
+
)
|
13 |
+
|
14 |
+
import numpy as np
|
15 |
+
|
16 |
+
from pandas.core.dtypes.common import (
|
17 |
+
is_bool,
|
18 |
+
is_re,
|
19 |
+
is_re_compilable,
|
20 |
+
)
|
21 |
+
from pandas.core.dtypes.missing import isna
|
22 |
+
|
23 |
+
if TYPE_CHECKING:
|
24 |
+
from pandas._typing import (
|
25 |
+
ArrayLike,
|
26 |
+
Scalar,
|
27 |
+
npt,
|
28 |
+
)
|
29 |
+
|
30 |
+
|
31 |
+
def should_use_regex(regex: bool, to_replace: Any) -> bool:
|
32 |
+
"""
|
33 |
+
Decide whether to treat `to_replace` as a regular expression.
|
34 |
+
"""
|
35 |
+
if is_re(to_replace):
|
36 |
+
regex = True
|
37 |
+
|
38 |
+
regex = regex and is_re_compilable(to_replace)
|
39 |
+
|
40 |
+
# Don't use regex if the pattern is empty.
|
41 |
+
regex = regex and re.compile(to_replace).pattern != ""
|
42 |
+
return regex
|
43 |
+
|
44 |
+
|
45 |
+
def compare_or_regex_search(
|
46 |
+
a: ArrayLike, b: Scalar | Pattern, regex: bool, mask: npt.NDArray[np.bool_]
|
47 |
+
) -> ArrayLike:
|
48 |
+
"""
|
49 |
+
Compare two array-like inputs of the same shape or two scalar values
|
50 |
+
|
51 |
+
Calls operator.eq or re.search, depending on regex argument. If regex is
|
52 |
+
True, perform an element-wise regex matching.
|
53 |
+
|
54 |
+
Parameters
|
55 |
+
----------
|
56 |
+
a : array-like
|
57 |
+
b : scalar or regex pattern
|
58 |
+
regex : bool
|
59 |
+
mask : np.ndarray[bool]
|
60 |
+
|
61 |
+
Returns
|
62 |
+
-------
|
63 |
+
mask : array-like of bool
|
64 |
+
"""
|
65 |
+
if isna(b):
|
66 |
+
return ~mask
|
67 |
+
|
68 |
+
def _check_comparison_types(
|
69 |
+
result: ArrayLike | bool, a: ArrayLike, b: Scalar | Pattern
|
70 |
+
):
|
71 |
+
"""
|
72 |
+
Raises an error if the two arrays (a,b) cannot be compared.
|
73 |
+
Otherwise, returns the comparison result as expected.
|
74 |
+
"""
|
75 |
+
if is_bool(result) and isinstance(a, np.ndarray):
|
76 |
+
type_names = [type(a).__name__, type(b).__name__]
|
77 |
+
|
78 |
+
type_names[0] = f"ndarray(dtype={a.dtype})"
|
79 |
+
|
80 |
+
raise TypeError(
|
81 |
+
f"Cannot compare types {repr(type_names[0])} and {repr(type_names[1])}"
|
82 |
+
)
|
83 |
+
|
84 |
+
if not regex or not should_use_regex(regex, b):
|
85 |
+
# TODO: should use missing.mask_missing?
|
86 |
+
op = lambda x: operator.eq(x, b)
|
87 |
+
else:
|
88 |
+
op = np.vectorize(
|
89 |
+
lambda x: bool(re.search(b, x))
|
90 |
+
if isinstance(x, str) and isinstance(b, (str, Pattern))
|
91 |
+
else False
|
92 |
+
)
|
93 |
+
|
94 |
+
# GH#32621 use mask to avoid comparing to NAs
|
95 |
+
if isinstance(a, np.ndarray):
|
96 |
+
a = a[mask]
|
97 |
+
|
98 |
+
result = op(a)
|
99 |
+
|
100 |
+
if isinstance(result, np.ndarray) and mask is not None:
|
101 |
+
# The shape of the mask can differ to that of the result
|
102 |
+
# since we may compare only a subset of a's or b's elements
|
103 |
+
tmp = np.zeros(mask.shape, dtype=np.bool_)
|
104 |
+
np.place(tmp, mask, result)
|
105 |
+
result = tmp
|
106 |
+
|
107 |
+
_check_comparison_types(result, a, b)
|
108 |
+
return result
|
109 |
+
|
110 |
+
|
111 |
+
def replace_regex(
|
112 |
+
values: ArrayLike, rx: re.Pattern, value, mask: npt.NDArray[np.bool_] | None
|
113 |
+
) -> None:
|
114 |
+
"""
|
115 |
+
Parameters
|
116 |
+
----------
|
117 |
+
values : ArrayLike
|
118 |
+
Object dtype.
|
119 |
+
rx : re.Pattern
|
120 |
+
value : Any
|
121 |
+
mask : np.ndarray[bool], optional
|
122 |
+
|
123 |
+
Notes
|
124 |
+
-----
|
125 |
+
Alters values in-place.
|
126 |
+
"""
|
127 |
+
|
128 |
+
# deal with replacing values with objects (strings) that match but
|
129 |
+
# whose replacement is not a string (numeric, nan, object)
|
130 |
+
if isna(value) or not isinstance(value, str):
|
131 |
+
|
132 |
+
def re_replacer(s):
|
133 |
+
if is_re(rx) and isinstance(s, str):
|
134 |
+
return value if rx.search(s) is not None else s
|
135 |
+
else:
|
136 |
+
return s
|
137 |
+
|
138 |
+
else:
|
139 |
+
# value is guaranteed to be a string here, s can be either a string
|
140 |
+
# or null if it's null it gets returned
|
141 |
+
def re_replacer(s):
|
142 |
+
if is_re(rx) and isinstance(s, str):
|
143 |
+
return rx.sub(value, s)
|
144 |
+
else:
|
145 |
+
return s
|
146 |
+
|
147 |
+
f = np.vectorize(re_replacer, otypes=[np.object_])
|
148 |
+
|
149 |
+
if mask is None:
|
150 |
+
values[:] = f(values)
|
151 |
+
else:
|
152 |
+
values[mask] = f(values[mask])
|
llmeval-env/lib/python3.10/site-packages/pandas/core/array_algos/take.py
ADDED
@@ -0,0 +1,594 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from __future__ import annotations
|
2 |
+
|
3 |
+
import functools
|
4 |
+
from typing import (
|
5 |
+
TYPE_CHECKING,
|
6 |
+
cast,
|
7 |
+
overload,
|
8 |
+
)
|
9 |
+
|
10 |
+
import numpy as np
|
11 |
+
|
12 |
+
from pandas._libs import (
|
13 |
+
algos as libalgos,
|
14 |
+
lib,
|
15 |
+
)
|
16 |
+
|
17 |
+
from pandas.core.dtypes.cast import maybe_promote
|
18 |
+
from pandas.core.dtypes.common import (
|
19 |
+
ensure_platform_int,
|
20 |
+
is_1d_only_ea_dtype,
|
21 |
+
)
|
22 |
+
from pandas.core.dtypes.missing import na_value_for_dtype
|
23 |
+
|
24 |
+
from pandas.core.construction import ensure_wrapped_if_datetimelike
|
25 |
+
|
26 |
+
if TYPE_CHECKING:
|
27 |
+
from pandas._typing import (
|
28 |
+
ArrayLike,
|
29 |
+
AxisInt,
|
30 |
+
npt,
|
31 |
+
)
|
32 |
+
|
33 |
+
from pandas.core.arrays._mixins import NDArrayBackedExtensionArray
|
34 |
+
from pandas.core.arrays.base import ExtensionArray
|
35 |
+
|
36 |
+
|
37 |
+
@overload
|
38 |
+
def take_nd(
|
39 |
+
arr: np.ndarray,
|
40 |
+
indexer,
|
41 |
+
axis: AxisInt = ...,
|
42 |
+
fill_value=...,
|
43 |
+
allow_fill: bool = ...,
|
44 |
+
) -> np.ndarray:
|
45 |
+
...
|
46 |
+
|
47 |
+
|
48 |
+
@overload
|
49 |
+
def take_nd(
|
50 |
+
arr: ExtensionArray,
|
51 |
+
indexer,
|
52 |
+
axis: AxisInt = ...,
|
53 |
+
fill_value=...,
|
54 |
+
allow_fill: bool = ...,
|
55 |
+
) -> ArrayLike:
|
56 |
+
...
|
57 |
+
|
58 |
+
|
59 |
+
def take_nd(
|
60 |
+
arr: ArrayLike,
|
61 |
+
indexer,
|
62 |
+
axis: AxisInt = 0,
|
63 |
+
fill_value=lib.no_default,
|
64 |
+
allow_fill: bool = True,
|
65 |
+
) -> ArrayLike:
|
66 |
+
"""
|
67 |
+
Specialized Cython take which sets NaN values in one pass
|
68 |
+
|
69 |
+
This dispatches to ``take`` defined on ExtensionArrays.
|
70 |
+
|
71 |
+
Note: this function assumes that the indexer is a valid(ated) indexer with
|
72 |
+
no out of bound indices.
|
73 |
+
|
74 |
+
Parameters
|
75 |
+
----------
|
76 |
+
arr : np.ndarray or ExtensionArray
|
77 |
+
Input array.
|
78 |
+
indexer : ndarray
|
79 |
+
1-D array of indices to take, subarrays corresponding to -1 value
|
80 |
+
indices are filed with fill_value
|
81 |
+
axis : int, default 0
|
82 |
+
Axis to take from
|
83 |
+
fill_value : any, default np.nan
|
84 |
+
Fill value to replace -1 values with
|
85 |
+
allow_fill : bool, default True
|
86 |
+
If False, indexer is assumed to contain no -1 values so no filling
|
87 |
+
will be done. This short-circuits computation of a mask. Result is
|
88 |
+
undefined if allow_fill == False and -1 is present in indexer.
|
89 |
+
|
90 |
+
Returns
|
91 |
+
-------
|
92 |
+
subarray : np.ndarray or ExtensionArray
|
93 |
+
May be the same type as the input, or cast to an ndarray.
|
94 |
+
"""
|
95 |
+
if fill_value is lib.no_default:
|
96 |
+
fill_value = na_value_for_dtype(arr.dtype, compat=False)
|
97 |
+
elif lib.is_np_dtype(arr.dtype, "mM"):
|
98 |
+
dtype, fill_value = maybe_promote(arr.dtype, fill_value)
|
99 |
+
if arr.dtype != dtype:
|
100 |
+
# EA.take is strict about returning a new object of the same type
|
101 |
+
# so for that case cast upfront
|
102 |
+
arr = arr.astype(dtype)
|
103 |
+
|
104 |
+
if not isinstance(arr, np.ndarray):
|
105 |
+
# i.e. ExtensionArray,
|
106 |
+
# includes for EA to catch DatetimeArray, TimedeltaArray
|
107 |
+
if not is_1d_only_ea_dtype(arr.dtype):
|
108 |
+
# i.e. DatetimeArray, TimedeltaArray
|
109 |
+
arr = cast("NDArrayBackedExtensionArray", arr)
|
110 |
+
return arr.take(
|
111 |
+
indexer, fill_value=fill_value, allow_fill=allow_fill, axis=axis
|
112 |
+
)
|
113 |
+
|
114 |
+
return arr.take(indexer, fill_value=fill_value, allow_fill=allow_fill)
|
115 |
+
|
116 |
+
arr = np.asarray(arr)
|
117 |
+
return _take_nd_ndarray(arr, indexer, axis, fill_value, allow_fill)
|
118 |
+
|
119 |
+
|
120 |
+
def _take_nd_ndarray(
|
121 |
+
arr: np.ndarray,
|
122 |
+
indexer: npt.NDArray[np.intp] | None,
|
123 |
+
axis: AxisInt,
|
124 |
+
fill_value,
|
125 |
+
allow_fill: bool,
|
126 |
+
) -> np.ndarray:
|
127 |
+
if indexer is None:
|
128 |
+
indexer = np.arange(arr.shape[axis], dtype=np.intp)
|
129 |
+
dtype, fill_value = arr.dtype, arr.dtype.type()
|
130 |
+
else:
|
131 |
+
indexer = ensure_platform_int(indexer)
|
132 |
+
|
133 |
+
dtype, fill_value, mask_info = _take_preprocess_indexer_and_fill_value(
|
134 |
+
arr, indexer, fill_value, allow_fill
|
135 |
+
)
|
136 |
+
|
137 |
+
flip_order = False
|
138 |
+
if arr.ndim == 2 and arr.flags.f_contiguous:
|
139 |
+
flip_order = True
|
140 |
+
|
141 |
+
if flip_order:
|
142 |
+
arr = arr.T
|
143 |
+
axis = arr.ndim - axis - 1
|
144 |
+
|
145 |
+
# at this point, it's guaranteed that dtype can hold both the arr values
|
146 |
+
# and the fill_value
|
147 |
+
out_shape_ = list(arr.shape)
|
148 |
+
out_shape_[axis] = len(indexer)
|
149 |
+
out_shape = tuple(out_shape_)
|
150 |
+
if arr.flags.f_contiguous and axis == arr.ndim - 1:
|
151 |
+
# minor tweak that can make an order-of-magnitude difference
|
152 |
+
# for dataframes initialized directly from 2-d ndarrays
|
153 |
+
# (s.t. df.values is c-contiguous and df._mgr.blocks[0] is its
|
154 |
+
# f-contiguous transpose)
|
155 |
+
out = np.empty(out_shape, dtype=dtype, order="F")
|
156 |
+
else:
|
157 |
+
out = np.empty(out_shape, dtype=dtype)
|
158 |
+
|
159 |
+
func = _get_take_nd_function(
|
160 |
+
arr.ndim, arr.dtype, out.dtype, axis=axis, mask_info=mask_info
|
161 |
+
)
|
162 |
+
func(arr, indexer, out, fill_value)
|
163 |
+
|
164 |
+
if flip_order:
|
165 |
+
out = out.T
|
166 |
+
return out
|
167 |
+
|
168 |
+
|
169 |
+
def take_1d(
|
170 |
+
arr: ArrayLike,
|
171 |
+
indexer: npt.NDArray[np.intp],
|
172 |
+
fill_value=None,
|
173 |
+
allow_fill: bool = True,
|
174 |
+
mask: npt.NDArray[np.bool_] | None = None,
|
175 |
+
) -> ArrayLike:
|
176 |
+
"""
|
177 |
+
Specialized version for 1D arrays. Differences compared to `take_nd`:
|
178 |
+
|
179 |
+
- Assumes input array has already been converted to numpy array / EA
|
180 |
+
- Assumes indexer is already guaranteed to be intp dtype ndarray
|
181 |
+
- Only works for 1D arrays
|
182 |
+
|
183 |
+
To ensure the lowest possible overhead.
|
184 |
+
|
185 |
+
Note: similarly to `take_nd`, this function assumes that the indexer is
|
186 |
+
a valid(ated) indexer with no out of bound indices.
|
187 |
+
|
188 |
+
Parameters
|
189 |
+
----------
|
190 |
+
arr : np.ndarray or ExtensionArray
|
191 |
+
Input array.
|
192 |
+
indexer : ndarray
|
193 |
+
1-D array of indices to take (validated indices, intp dtype).
|
194 |
+
fill_value : any, default np.nan
|
195 |
+
Fill value to replace -1 values with
|
196 |
+
allow_fill : bool, default True
|
197 |
+
If False, indexer is assumed to contain no -1 values so no filling
|
198 |
+
will be done. This short-circuits computation of a mask. Result is
|
199 |
+
undefined if allow_fill == False and -1 is present in indexer.
|
200 |
+
mask : np.ndarray, optional, default None
|
201 |
+
If `allow_fill` is True, and the mask (where indexer == -1) is already
|
202 |
+
known, it can be passed to avoid recomputation.
|
203 |
+
"""
|
204 |
+
if not isinstance(arr, np.ndarray):
|
205 |
+
# ExtensionArray -> dispatch to their method
|
206 |
+
return arr.take(indexer, fill_value=fill_value, allow_fill=allow_fill)
|
207 |
+
|
208 |
+
if not allow_fill:
|
209 |
+
return arr.take(indexer)
|
210 |
+
|
211 |
+
dtype, fill_value, mask_info = _take_preprocess_indexer_and_fill_value(
|
212 |
+
arr, indexer, fill_value, True, mask
|
213 |
+
)
|
214 |
+
|
215 |
+
# at this point, it's guaranteed that dtype can hold both the arr values
|
216 |
+
# and the fill_value
|
217 |
+
out = np.empty(indexer.shape, dtype=dtype)
|
218 |
+
|
219 |
+
func = _get_take_nd_function(
|
220 |
+
arr.ndim, arr.dtype, out.dtype, axis=0, mask_info=mask_info
|
221 |
+
)
|
222 |
+
func(arr, indexer, out, fill_value)
|
223 |
+
|
224 |
+
return out
|
225 |
+
|
226 |
+
|
227 |
+
def take_2d_multi(
|
228 |
+
arr: np.ndarray,
|
229 |
+
indexer: tuple[npt.NDArray[np.intp], npt.NDArray[np.intp]],
|
230 |
+
fill_value=np.nan,
|
231 |
+
) -> np.ndarray:
|
232 |
+
"""
|
233 |
+
Specialized Cython take which sets NaN values in one pass.
|
234 |
+
"""
|
235 |
+
# This is only called from one place in DataFrame._reindex_multi,
|
236 |
+
# so we know indexer is well-behaved.
|
237 |
+
assert indexer is not None
|
238 |
+
assert indexer[0] is not None
|
239 |
+
assert indexer[1] is not None
|
240 |
+
|
241 |
+
row_idx, col_idx = indexer
|
242 |
+
|
243 |
+
row_idx = ensure_platform_int(row_idx)
|
244 |
+
col_idx = ensure_platform_int(col_idx)
|
245 |
+
indexer = row_idx, col_idx
|
246 |
+
mask_info = None
|
247 |
+
|
248 |
+
# check for promotion based on types only (do this first because
|
249 |
+
# it's faster than computing a mask)
|
250 |
+
dtype, fill_value = maybe_promote(arr.dtype, fill_value)
|
251 |
+
if dtype != arr.dtype:
|
252 |
+
# check if promotion is actually required based on indexer
|
253 |
+
row_mask = row_idx == -1
|
254 |
+
col_mask = col_idx == -1
|
255 |
+
row_needs = row_mask.any()
|
256 |
+
col_needs = col_mask.any()
|
257 |
+
mask_info = (row_mask, col_mask), (row_needs, col_needs)
|
258 |
+
|
259 |
+
if not (row_needs or col_needs):
|
260 |
+
# if not, then depromote, set fill_value to dummy
|
261 |
+
# (it won't be used but we don't want the cython code
|
262 |
+
# to crash when trying to cast it to dtype)
|
263 |
+
dtype, fill_value = arr.dtype, arr.dtype.type()
|
264 |
+
|
265 |
+
# at this point, it's guaranteed that dtype can hold both the arr values
|
266 |
+
# and the fill_value
|
267 |
+
out_shape = len(row_idx), len(col_idx)
|
268 |
+
out = np.empty(out_shape, dtype=dtype)
|
269 |
+
|
270 |
+
func = _take_2d_multi_dict.get((arr.dtype.name, out.dtype.name), None)
|
271 |
+
if func is None and arr.dtype != out.dtype:
|
272 |
+
func = _take_2d_multi_dict.get((out.dtype.name, out.dtype.name), None)
|
273 |
+
if func is not None:
|
274 |
+
func = _convert_wrapper(func, out.dtype)
|
275 |
+
|
276 |
+
if func is not None:
|
277 |
+
func(arr, indexer, out=out, fill_value=fill_value)
|
278 |
+
else:
|
279 |
+
# test_reindex_multi
|
280 |
+
_take_2d_multi_object(
|
281 |
+
arr, indexer, out, fill_value=fill_value, mask_info=mask_info
|
282 |
+
)
|
283 |
+
|
284 |
+
return out
|
285 |
+
|
286 |
+
|
287 |
+
@functools.lru_cache
|
288 |
+
def _get_take_nd_function_cached(
|
289 |
+
ndim: int, arr_dtype: np.dtype, out_dtype: np.dtype, axis: AxisInt
|
290 |
+
):
|
291 |
+
"""
|
292 |
+
Part of _get_take_nd_function below that doesn't need `mask_info` and thus
|
293 |
+
can be cached (mask_info potentially contains a numpy ndarray which is not
|
294 |
+
hashable and thus cannot be used as argument for cached function).
|
295 |
+
"""
|
296 |
+
tup = (arr_dtype.name, out_dtype.name)
|
297 |
+
if ndim == 1:
|
298 |
+
func = _take_1d_dict.get(tup, None)
|
299 |
+
elif ndim == 2:
|
300 |
+
if axis == 0:
|
301 |
+
func = _take_2d_axis0_dict.get(tup, None)
|
302 |
+
else:
|
303 |
+
func = _take_2d_axis1_dict.get(tup, None)
|
304 |
+
if func is not None:
|
305 |
+
return func
|
306 |
+
|
307 |
+
# We get here with string, uint, float16, and complex dtypes that could
|
308 |
+
# potentially be handled in algos_take_helper.
|
309 |
+
# Also a couple with (M8[ns], object) and (m8[ns], object)
|
310 |
+
tup = (out_dtype.name, out_dtype.name)
|
311 |
+
if ndim == 1:
|
312 |
+
func = _take_1d_dict.get(tup, None)
|
313 |
+
elif ndim == 2:
|
314 |
+
if axis == 0:
|
315 |
+
func = _take_2d_axis0_dict.get(tup, None)
|
316 |
+
else:
|
317 |
+
func = _take_2d_axis1_dict.get(tup, None)
|
318 |
+
if func is not None:
|
319 |
+
func = _convert_wrapper(func, out_dtype)
|
320 |
+
return func
|
321 |
+
|
322 |
+
return None
|
323 |
+
|
324 |
+
|
325 |
+
def _get_take_nd_function(
|
326 |
+
ndim: int,
|
327 |
+
arr_dtype: np.dtype,
|
328 |
+
out_dtype: np.dtype,
|
329 |
+
axis: AxisInt = 0,
|
330 |
+
mask_info=None,
|
331 |
+
):
|
332 |
+
"""
|
333 |
+
Get the appropriate "take" implementation for the given dimension, axis
|
334 |
+
and dtypes.
|
335 |
+
"""
|
336 |
+
func = None
|
337 |
+
if ndim <= 2:
|
338 |
+
# for this part we don't need `mask_info` -> use the cached algo lookup
|
339 |
+
func = _get_take_nd_function_cached(ndim, arr_dtype, out_dtype, axis)
|
340 |
+
|
341 |
+
if func is None:
|
342 |
+
|
343 |
+
def func(arr, indexer, out, fill_value=np.nan) -> None:
|
344 |
+
indexer = ensure_platform_int(indexer)
|
345 |
+
_take_nd_object(
|
346 |
+
arr, indexer, out, axis=axis, fill_value=fill_value, mask_info=mask_info
|
347 |
+
)
|
348 |
+
|
349 |
+
return func
|
350 |
+
|
351 |
+
|
352 |
+
def _view_wrapper(f, arr_dtype=None, out_dtype=None, fill_wrap=None):
|
353 |
+
def wrapper(
|
354 |
+
arr: np.ndarray, indexer: np.ndarray, out: np.ndarray, fill_value=np.nan
|
355 |
+
) -> None:
|
356 |
+
if arr_dtype is not None:
|
357 |
+
arr = arr.view(arr_dtype)
|
358 |
+
if out_dtype is not None:
|
359 |
+
out = out.view(out_dtype)
|
360 |
+
if fill_wrap is not None:
|
361 |
+
# FIXME: if we get here with dt64/td64 we need to be sure we have
|
362 |
+
# matching resos
|
363 |
+
if fill_value.dtype.kind == "m":
|
364 |
+
fill_value = fill_value.astype("m8[ns]")
|
365 |
+
else:
|
366 |
+
fill_value = fill_value.astype("M8[ns]")
|
367 |
+
fill_value = fill_wrap(fill_value)
|
368 |
+
|
369 |
+
f(arr, indexer, out, fill_value=fill_value)
|
370 |
+
|
371 |
+
return wrapper
|
372 |
+
|
373 |
+
|
374 |
+
def _convert_wrapper(f, conv_dtype):
|
375 |
+
def wrapper(
|
376 |
+
arr: np.ndarray, indexer: np.ndarray, out: np.ndarray, fill_value=np.nan
|
377 |
+
) -> None:
|
378 |
+
if conv_dtype == object:
|
379 |
+
# GH#39755 avoid casting dt64/td64 to integers
|
380 |
+
arr = ensure_wrapped_if_datetimelike(arr)
|
381 |
+
arr = arr.astype(conv_dtype)
|
382 |
+
f(arr, indexer, out, fill_value=fill_value)
|
383 |
+
|
384 |
+
return wrapper
|
385 |
+
|
386 |
+
|
387 |
+
_take_1d_dict = {
|
388 |
+
("int8", "int8"): libalgos.take_1d_int8_int8,
|
389 |
+
("int8", "int32"): libalgos.take_1d_int8_int32,
|
390 |
+
("int8", "int64"): libalgos.take_1d_int8_int64,
|
391 |
+
("int8", "float64"): libalgos.take_1d_int8_float64,
|
392 |
+
("int16", "int16"): libalgos.take_1d_int16_int16,
|
393 |
+
("int16", "int32"): libalgos.take_1d_int16_int32,
|
394 |
+
("int16", "int64"): libalgos.take_1d_int16_int64,
|
395 |
+
("int16", "float64"): libalgos.take_1d_int16_float64,
|
396 |
+
("int32", "int32"): libalgos.take_1d_int32_int32,
|
397 |
+
("int32", "int64"): libalgos.take_1d_int32_int64,
|
398 |
+
("int32", "float64"): libalgos.take_1d_int32_float64,
|
399 |
+
("int64", "int64"): libalgos.take_1d_int64_int64,
|
400 |
+
("int64", "float64"): libalgos.take_1d_int64_float64,
|
401 |
+
("float32", "float32"): libalgos.take_1d_float32_float32,
|
402 |
+
("float32", "float64"): libalgos.take_1d_float32_float64,
|
403 |
+
("float64", "float64"): libalgos.take_1d_float64_float64,
|
404 |
+
("object", "object"): libalgos.take_1d_object_object,
|
405 |
+
("bool", "bool"): _view_wrapper(libalgos.take_1d_bool_bool, np.uint8, np.uint8),
|
406 |
+
("bool", "object"): _view_wrapper(libalgos.take_1d_bool_object, np.uint8, None),
|
407 |
+
("datetime64[ns]", "datetime64[ns]"): _view_wrapper(
|
408 |
+
libalgos.take_1d_int64_int64, np.int64, np.int64, np.int64
|
409 |
+
),
|
410 |
+
("timedelta64[ns]", "timedelta64[ns]"): _view_wrapper(
|
411 |
+
libalgos.take_1d_int64_int64, np.int64, np.int64, np.int64
|
412 |
+
),
|
413 |
+
}
|
414 |
+
|
415 |
+
_take_2d_axis0_dict = {
|
416 |
+
("int8", "int8"): libalgos.take_2d_axis0_int8_int8,
|
417 |
+
("int8", "int32"): libalgos.take_2d_axis0_int8_int32,
|
418 |
+
("int8", "int64"): libalgos.take_2d_axis0_int8_int64,
|
419 |
+
("int8", "float64"): libalgos.take_2d_axis0_int8_float64,
|
420 |
+
("int16", "int16"): libalgos.take_2d_axis0_int16_int16,
|
421 |
+
("int16", "int32"): libalgos.take_2d_axis0_int16_int32,
|
422 |
+
("int16", "int64"): libalgos.take_2d_axis0_int16_int64,
|
423 |
+
("int16", "float64"): libalgos.take_2d_axis0_int16_float64,
|
424 |
+
("int32", "int32"): libalgos.take_2d_axis0_int32_int32,
|
425 |
+
("int32", "int64"): libalgos.take_2d_axis0_int32_int64,
|
426 |
+
("int32", "float64"): libalgos.take_2d_axis0_int32_float64,
|
427 |
+
("int64", "int64"): libalgos.take_2d_axis0_int64_int64,
|
428 |
+
("int64", "float64"): libalgos.take_2d_axis0_int64_float64,
|
429 |
+
("float32", "float32"): libalgos.take_2d_axis0_float32_float32,
|
430 |
+
("float32", "float64"): libalgos.take_2d_axis0_float32_float64,
|
431 |
+
("float64", "float64"): libalgos.take_2d_axis0_float64_float64,
|
432 |
+
("object", "object"): libalgos.take_2d_axis0_object_object,
|
433 |
+
("bool", "bool"): _view_wrapper(
|
434 |
+
libalgos.take_2d_axis0_bool_bool, np.uint8, np.uint8
|
435 |
+
),
|
436 |
+
("bool", "object"): _view_wrapper(
|
437 |
+
libalgos.take_2d_axis0_bool_object, np.uint8, None
|
438 |
+
),
|
439 |
+
("datetime64[ns]", "datetime64[ns]"): _view_wrapper(
|
440 |
+
libalgos.take_2d_axis0_int64_int64, np.int64, np.int64, fill_wrap=np.int64
|
441 |
+
),
|
442 |
+
("timedelta64[ns]", "timedelta64[ns]"): _view_wrapper(
|
443 |
+
libalgos.take_2d_axis0_int64_int64, np.int64, np.int64, fill_wrap=np.int64
|
444 |
+
),
|
445 |
+
}
|
446 |
+
|
447 |
+
_take_2d_axis1_dict = {
|
448 |
+
("int8", "int8"): libalgos.take_2d_axis1_int8_int8,
|
449 |
+
("int8", "int32"): libalgos.take_2d_axis1_int8_int32,
|
450 |
+
("int8", "int64"): libalgos.take_2d_axis1_int8_int64,
|
451 |
+
("int8", "float64"): libalgos.take_2d_axis1_int8_float64,
|
452 |
+
("int16", "int16"): libalgos.take_2d_axis1_int16_int16,
|
453 |
+
("int16", "int32"): libalgos.take_2d_axis1_int16_int32,
|
454 |
+
("int16", "int64"): libalgos.take_2d_axis1_int16_int64,
|
455 |
+
("int16", "float64"): libalgos.take_2d_axis1_int16_float64,
|
456 |
+
("int32", "int32"): libalgos.take_2d_axis1_int32_int32,
|
457 |
+
("int32", "int64"): libalgos.take_2d_axis1_int32_int64,
|
458 |
+
("int32", "float64"): libalgos.take_2d_axis1_int32_float64,
|
459 |
+
("int64", "int64"): libalgos.take_2d_axis1_int64_int64,
|
460 |
+
("int64", "float64"): libalgos.take_2d_axis1_int64_float64,
|
461 |
+
("float32", "float32"): libalgos.take_2d_axis1_float32_float32,
|
462 |
+
("float32", "float64"): libalgos.take_2d_axis1_float32_float64,
|
463 |
+
("float64", "float64"): libalgos.take_2d_axis1_float64_float64,
|
464 |
+
("object", "object"): libalgos.take_2d_axis1_object_object,
|
465 |
+
("bool", "bool"): _view_wrapper(
|
466 |
+
libalgos.take_2d_axis1_bool_bool, np.uint8, np.uint8
|
467 |
+
),
|
468 |
+
("bool", "object"): _view_wrapper(
|
469 |
+
libalgos.take_2d_axis1_bool_object, np.uint8, None
|
470 |
+
),
|
471 |
+
("datetime64[ns]", "datetime64[ns]"): _view_wrapper(
|
472 |
+
libalgos.take_2d_axis1_int64_int64, np.int64, np.int64, fill_wrap=np.int64
|
473 |
+
),
|
474 |
+
("timedelta64[ns]", "timedelta64[ns]"): _view_wrapper(
|
475 |
+
libalgos.take_2d_axis1_int64_int64, np.int64, np.int64, fill_wrap=np.int64
|
476 |
+
),
|
477 |
+
}
|
478 |
+
|
479 |
+
_take_2d_multi_dict = {
|
480 |
+
("int8", "int8"): libalgos.take_2d_multi_int8_int8,
|
481 |
+
("int8", "int32"): libalgos.take_2d_multi_int8_int32,
|
482 |
+
("int8", "int64"): libalgos.take_2d_multi_int8_int64,
|
483 |
+
("int8", "float64"): libalgos.take_2d_multi_int8_float64,
|
484 |
+
("int16", "int16"): libalgos.take_2d_multi_int16_int16,
|
485 |
+
("int16", "int32"): libalgos.take_2d_multi_int16_int32,
|
486 |
+
("int16", "int64"): libalgos.take_2d_multi_int16_int64,
|
487 |
+
("int16", "float64"): libalgos.take_2d_multi_int16_float64,
|
488 |
+
("int32", "int32"): libalgos.take_2d_multi_int32_int32,
|
489 |
+
("int32", "int64"): libalgos.take_2d_multi_int32_int64,
|
490 |
+
("int32", "float64"): libalgos.take_2d_multi_int32_float64,
|
491 |
+
("int64", "int64"): libalgos.take_2d_multi_int64_int64,
|
492 |
+
("int64", "float64"): libalgos.take_2d_multi_int64_float64,
|
493 |
+
("float32", "float32"): libalgos.take_2d_multi_float32_float32,
|
494 |
+
("float32", "float64"): libalgos.take_2d_multi_float32_float64,
|
495 |
+
("float64", "float64"): libalgos.take_2d_multi_float64_float64,
|
496 |
+
("object", "object"): libalgos.take_2d_multi_object_object,
|
497 |
+
("bool", "bool"): _view_wrapper(
|
498 |
+
libalgos.take_2d_multi_bool_bool, np.uint8, np.uint8
|
499 |
+
),
|
500 |
+
("bool", "object"): _view_wrapper(
|
501 |
+
libalgos.take_2d_multi_bool_object, np.uint8, None
|
502 |
+
),
|
503 |
+
("datetime64[ns]", "datetime64[ns]"): _view_wrapper(
|
504 |
+
libalgos.take_2d_multi_int64_int64, np.int64, np.int64, fill_wrap=np.int64
|
505 |
+
),
|
506 |
+
("timedelta64[ns]", "timedelta64[ns]"): _view_wrapper(
|
507 |
+
libalgos.take_2d_multi_int64_int64, np.int64, np.int64, fill_wrap=np.int64
|
508 |
+
),
|
509 |
+
}
|
510 |
+
|
511 |
+
|
512 |
+
def _take_nd_object(
|
513 |
+
arr: np.ndarray,
|
514 |
+
indexer: npt.NDArray[np.intp],
|
515 |
+
out: np.ndarray,
|
516 |
+
axis: AxisInt,
|
517 |
+
fill_value,
|
518 |
+
mask_info,
|
519 |
+
) -> None:
|
520 |
+
if mask_info is not None:
|
521 |
+
mask, needs_masking = mask_info
|
522 |
+
else:
|
523 |
+
mask = indexer == -1
|
524 |
+
needs_masking = mask.any()
|
525 |
+
if arr.dtype != out.dtype:
|
526 |
+
arr = arr.astype(out.dtype)
|
527 |
+
if arr.shape[axis] > 0:
|
528 |
+
arr.take(indexer, axis=axis, out=out)
|
529 |
+
if needs_masking:
|
530 |
+
outindexer = [slice(None)] * arr.ndim
|
531 |
+
outindexer[axis] = mask
|
532 |
+
out[tuple(outindexer)] = fill_value
|
533 |
+
|
534 |
+
|
535 |
+
def _take_2d_multi_object(
|
536 |
+
arr: np.ndarray,
|
537 |
+
indexer: tuple[npt.NDArray[np.intp], npt.NDArray[np.intp]],
|
538 |
+
out: np.ndarray,
|
539 |
+
fill_value,
|
540 |
+
mask_info,
|
541 |
+
) -> None:
|
542 |
+
# this is not ideal, performance-wise, but it's better than raising
|
543 |
+
# an exception (best to optimize in Cython to avoid getting here)
|
544 |
+
row_idx, col_idx = indexer # both np.intp
|
545 |
+
if mask_info is not None:
|
546 |
+
(row_mask, col_mask), (row_needs, col_needs) = mask_info
|
547 |
+
else:
|
548 |
+
row_mask = row_idx == -1
|
549 |
+
col_mask = col_idx == -1
|
550 |
+
row_needs = row_mask.any()
|
551 |
+
col_needs = col_mask.any()
|
552 |
+
if fill_value is not None:
|
553 |
+
if row_needs:
|
554 |
+
out[row_mask, :] = fill_value
|
555 |
+
if col_needs:
|
556 |
+
out[:, col_mask] = fill_value
|
557 |
+
for i, u_ in enumerate(row_idx):
|
558 |
+
if u_ != -1:
|
559 |
+
for j, v in enumerate(col_idx):
|
560 |
+
if v != -1:
|
561 |
+
out[i, j] = arr[u_, v]
|
562 |
+
|
563 |
+
|
564 |
+
def _take_preprocess_indexer_and_fill_value(
|
565 |
+
arr: np.ndarray,
|
566 |
+
indexer: npt.NDArray[np.intp],
|
567 |
+
fill_value,
|
568 |
+
allow_fill: bool,
|
569 |
+
mask: npt.NDArray[np.bool_] | None = None,
|
570 |
+
):
|
571 |
+
mask_info: tuple[np.ndarray | None, bool] | None = None
|
572 |
+
|
573 |
+
if not allow_fill:
|
574 |
+
dtype, fill_value = arr.dtype, arr.dtype.type()
|
575 |
+
mask_info = None, False
|
576 |
+
else:
|
577 |
+
# check for promotion based on types only (do this first because
|
578 |
+
# it's faster than computing a mask)
|
579 |
+
dtype, fill_value = maybe_promote(arr.dtype, fill_value)
|
580 |
+
if dtype != arr.dtype:
|
581 |
+
# check if promotion is actually required based on indexer
|
582 |
+
if mask is not None:
|
583 |
+
needs_masking = True
|
584 |
+
else:
|
585 |
+
mask = indexer == -1
|
586 |
+
needs_masking = bool(mask.any())
|
587 |
+
mask_info = mask, needs_masking
|
588 |
+
if not needs_masking:
|
589 |
+
# if not, then depromote, set fill_value to dummy
|
590 |
+
# (it won't be used but we don't want the cython code
|
591 |
+
# to crash when trying to cast it to dtype)
|
592 |
+
dtype, fill_value = arr.dtype, arr.dtype.type()
|
593 |
+
|
594 |
+
return dtype, fill_value, mask_info
|
llmeval-env/lib/python3.10/site-packages/pandas/core/array_algos/transforms.py
ADDED
@@ -0,0 +1,50 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
"""
|
2 |
+
transforms.py is for shape-preserving functions.
|
3 |
+
"""
|
4 |
+
|
5 |
+
from __future__ import annotations
|
6 |
+
|
7 |
+
from typing import TYPE_CHECKING
|
8 |
+
|
9 |
+
import numpy as np
|
10 |
+
|
11 |
+
if TYPE_CHECKING:
|
12 |
+
from pandas._typing import (
|
13 |
+
AxisInt,
|
14 |
+
Scalar,
|
15 |
+
)
|
16 |
+
|
17 |
+
|
18 |
+
def shift(
|
19 |
+
values: np.ndarray, periods: int, axis: AxisInt, fill_value: Scalar
|
20 |
+
) -> np.ndarray:
|
21 |
+
new_values = values
|
22 |
+
|
23 |
+
if periods == 0 or values.size == 0:
|
24 |
+
return new_values.copy()
|
25 |
+
|
26 |
+
# make sure array sent to np.roll is c_contiguous
|
27 |
+
f_ordered = values.flags.f_contiguous
|
28 |
+
if f_ordered:
|
29 |
+
new_values = new_values.T
|
30 |
+
axis = new_values.ndim - axis - 1
|
31 |
+
|
32 |
+
if new_values.size:
|
33 |
+
new_values = np.roll(
|
34 |
+
new_values,
|
35 |
+
np.intp(periods),
|
36 |
+
axis=axis,
|
37 |
+
)
|
38 |
+
|
39 |
+
axis_indexer = [slice(None)] * values.ndim
|
40 |
+
if periods > 0:
|
41 |
+
axis_indexer[axis] = slice(None, periods)
|
42 |
+
else:
|
43 |
+
axis_indexer[axis] = slice(periods, None)
|
44 |
+
new_values[tuple(axis_indexer)] = fill_value
|
45 |
+
|
46 |
+
# restore original order
|
47 |
+
if f_ordered:
|
48 |
+
new_values = new_values.T
|
49 |
+
|
50 |
+
return new_values
|
llmeval-env/lib/python3.10/site-packages/pandas/core/arrays/arrow/__init__.py
ADDED
@@ -0,0 +1,7 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from pandas.core.arrays.arrow.accessors import (
|
2 |
+
ListAccessor,
|
3 |
+
StructAccessor,
|
4 |
+
)
|
5 |
+
from pandas.core.arrays.arrow.array import ArrowExtensionArray
|
6 |
+
|
7 |
+
__all__ = ["ArrowExtensionArray", "StructAccessor", "ListAccessor"]
|
llmeval-env/lib/python3.10/site-packages/pandas/core/arrays/arrow/_arrow_utils.py
ADDED
@@ -0,0 +1,66 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from __future__ import annotations
|
2 |
+
|
3 |
+
import warnings
|
4 |
+
|
5 |
+
import numpy as np
|
6 |
+
import pyarrow
|
7 |
+
|
8 |
+
from pandas.errors import PerformanceWarning
|
9 |
+
from pandas.util._exceptions import find_stack_level
|
10 |
+
|
11 |
+
|
12 |
+
def fallback_performancewarning(version: str | None = None) -> None:
|
13 |
+
"""
|
14 |
+
Raise a PerformanceWarning for falling back to ExtensionArray's
|
15 |
+
non-pyarrow method
|
16 |
+
"""
|
17 |
+
msg = "Falling back on a non-pyarrow code path which may decrease performance."
|
18 |
+
if version is not None:
|
19 |
+
msg += f" Upgrade to pyarrow >={version} to possibly suppress this warning."
|
20 |
+
warnings.warn(msg, PerformanceWarning, stacklevel=find_stack_level())
|
21 |
+
|
22 |
+
|
23 |
+
def pyarrow_array_to_numpy_and_mask(
|
24 |
+
arr, dtype: np.dtype
|
25 |
+
) -> tuple[np.ndarray, np.ndarray]:
|
26 |
+
"""
|
27 |
+
Convert a primitive pyarrow.Array to a numpy array and boolean mask based
|
28 |
+
on the buffers of the Array.
|
29 |
+
|
30 |
+
At the moment pyarrow.BooleanArray is not supported.
|
31 |
+
|
32 |
+
Parameters
|
33 |
+
----------
|
34 |
+
arr : pyarrow.Array
|
35 |
+
dtype : numpy.dtype
|
36 |
+
|
37 |
+
Returns
|
38 |
+
-------
|
39 |
+
(data, mask)
|
40 |
+
Tuple of two numpy arrays with the raw data (with specified dtype) and
|
41 |
+
a boolean mask (validity mask, so False means missing)
|
42 |
+
"""
|
43 |
+
dtype = np.dtype(dtype)
|
44 |
+
|
45 |
+
if pyarrow.types.is_null(arr.type):
|
46 |
+
# No initialization of data is needed since everything is null
|
47 |
+
data = np.empty(len(arr), dtype=dtype)
|
48 |
+
mask = np.zeros(len(arr), dtype=bool)
|
49 |
+
return data, mask
|
50 |
+
buflist = arr.buffers()
|
51 |
+
# Since Arrow buffers might contain padding and the data might be offset,
|
52 |
+
# the buffer gets sliced here before handing it to numpy.
|
53 |
+
# See also https://github.com/pandas-dev/pandas/issues/40896
|
54 |
+
offset = arr.offset * dtype.itemsize
|
55 |
+
length = len(arr) * dtype.itemsize
|
56 |
+
data_buf = buflist[1][offset : offset + length]
|
57 |
+
data = np.frombuffer(data_buf, dtype=dtype)
|
58 |
+
bitmask = buflist[0]
|
59 |
+
if bitmask is not None:
|
60 |
+
mask = pyarrow.BooleanArray.from_buffers(
|
61 |
+
pyarrow.bool_(), len(arr), [None, bitmask], offset=arr.offset
|
62 |
+
)
|
63 |
+
mask = np.asarray(mask)
|
64 |
+
else:
|
65 |
+
mask = np.ones(len(arr), dtype=bool)
|
66 |
+
return data, mask
|
llmeval-env/lib/python3.10/site-packages/pandas/core/arrays/arrow/accessors.py
ADDED
@@ -0,0 +1,473 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
"""Accessors for arrow-backed data."""
|
2 |
+
|
3 |
+
from __future__ import annotations
|
4 |
+
|
5 |
+
from abc import (
|
6 |
+
ABCMeta,
|
7 |
+
abstractmethod,
|
8 |
+
)
|
9 |
+
from typing import (
|
10 |
+
TYPE_CHECKING,
|
11 |
+
cast,
|
12 |
+
)
|
13 |
+
|
14 |
+
from pandas.compat import (
|
15 |
+
pa_version_under10p1,
|
16 |
+
pa_version_under11p0,
|
17 |
+
)
|
18 |
+
|
19 |
+
from pandas.core.dtypes.common import is_list_like
|
20 |
+
|
21 |
+
if not pa_version_under10p1:
|
22 |
+
import pyarrow as pa
|
23 |
+
import pyarrow.compute as pc
|
24 |
+
|
25 |
+
from pandas.core.dtypes.dtypes import ArrowDtype
|
26 |
+
|
27 |
+
if TYPE_CHECKING:
|
28 |
+
from collections.abc import Iterator
|
29 |
+
|
30 |
+
from pandas import (
|
31 |
+
DataFrame,
|
32 |
+
Series,
|
33 |
+
)
|
34 |
+
|
35 |
+
|
36 |
+
class ArrowAccessor(metaclass=ABCMeta):
|
37 |
+
@abstractmethod
|
38 |
+
def __init__(self, data, validation_msg: str) -> None:
|
39 |
+
self._data = data
|
40 |
+
self._validation_msg = validation_msg
|
41 |
+
self._validate(data)
|
42 |
+
|
43 |
+
@abstractmethod
|
44 |
+
def _is_valid_pyarrow_dtype(self, pyarrow_dtype) -> bool:
|
45 |
+
pass
|
46 |
+
|
47 |
+
def _validate(self, data):
|
48 |
+
dtype = data.dtype
|
49 |
+
if not isinstance(dtype, ArrowDtype):
|
50 |
+
# Raise AttributeError so that inspect can handle non-struct Series.
|
51 |
+
raise AttributeError(self._validation_msg.format(dtype=dtype))
|
52 |
+
|
53 |
+
if not self._is_valid_pyarrow_dtype(dtype.pyarrow_dtype):
|
54 |
+
# Raise AttributeError so that inspect can handle invalid Series.
|
55 |
+
raise AttributeError(self._validation_msg.format(dtype=dtype))
|
56 |
+
|
57 |
+
@property
|
58 |
+
def _pa_array(self):
|
59 |
+
return self._data.array._pa_array
|
60 |
+
|
61 |
+
|
62 |
+
class ListAccessor(ArrowAccessor):
|
63 |
+
"""
|
64 |
+
Accessor object for list data properties of the Series values.
|
65 |
+
|
66 |
+
Parameters
|
67 |
+
----------
|
68 |
+
data : Series
|
69 |
+
Series containing Arrow list data.
|
70 |
+
"""
|
71 |
+
|
72 |
+
def __init__(self, data=None) -> None:
|
73 |
+
super().__init__(
|
74 |
+
data,
|
75 |
+
validation_msg="Can only use the '.list' accessor with "
|
76 |
+
"'list[pyarrow]' dtype, not {dtype}.",
|
77 |
+
)
|
78 |
+
|
79 |
+
def _is_valid_pyarrow_dtype(self, pyarrow_dtype) -> bool:
|
80 |
+
return (
|
81 |
+
pa.types.is_list(pyarrow_dtype)
|
82 |
+
or pa.types.is_fixed_size_list(pyarrow_dtype)
|
83 |
+
or pa.types.is_large_list(pyarrow_dtype)
|
84 |
+
)
|
85 |
+
|
86 |
+
def len(self) -> Series:
|
87 |
+
"""
|
88 |
+
Return the length of each list in the Series.
|
89 |
+
|
90 |
+
Returns
|
91 |
+
-------
|
92 |
+
pandas.Series
|
93 |
+
The length of each list.
|
94 |
+
|
95 |
+
Examples
|
96 |
+
--------
|
97 |
+
>>> import pyarrow as pa
|
98 |
+
>>> s = pd.Series(
|
99 |
+
... [
|
100 |
+
... [1, 2, 3],
|
101 |
+
... [3],
|
102 |
+
... ],
|
103 |
+
... dtype=pd.ArrowDtype(pa.list_(
|
104 |
+
... pa.int64()
|
105 |
+
... ))
|
106 |
+
... )
|
107 |
+
>>> s.list.len()
|
108 |
+
0 3
|
109 |
+
1 1
|
110 |
+
dtype: int32[pyarrow]
|
111 |
+
"""
|
112 |
+
from pandas import Series
|
113 |
+
|
114 |
+
value_lengths = pc.list_value_length(self._pa_array)
|
115 |
+
return Series(value_lengths, dtype=ArrowDtype(value_lengths.type))
|
116 |
+
|
117 |
+
def __getitem__(self, key: int | slice) -> Series:
|
118 |
+
"""
|
119 |
+
Index or slice lists in the Series.
|
120 |
+
|
121 |
+
Parameters
|
122 |
+
----------
|
123 |
+
key : int | slice
|
124 |
+
Index or slice of indices to access from each list.
|
125 |
+
|
126 |
+
Returns
|
127 |
+
-------
|
128 |
+
pandas.Series
|
129 |
+
The list at requested index.
|
130 |
+
|
131 |
+
Examples
|
132 |
+
--------
|
133 |
+
>>> import pyarrow as pa
|
134 |
+
>>> s = pd.Series(
|
135 |
+
... [
|
136 |
+
... [1, 2, 3],
|
137 |
+
... [3],
|
138 |
+
... ],
|
139 |
+
... dtype=pd.ArrowDtype(pa.list_(
|
140 |
+
... pa.int64()
|
141 |
+
... ))
|
142 |
+
... )
|
143 |
+
>>> s.list[0]
|
144 |
+
0 1
|
145 |
+
1 3
|
146 |
+
dtype: int64[pyarrow]
|
147 |
+
"""
|
148 |
+
from pandas import Series
|
149 |
+
|
150 |
+
if isinstance(key, int):
|
151 |
+
# TODO: Support negative key but pyarrow does not allow
|
152 |
+
# element index to be an array.
|
153 |
+
# if key < 0:
|
154 |
+
# key = pc.add(key, pc.list_value_length(self._pa_array))
|
155 |
+
element = pc.list_element(self._pa_array, key)
|
156 |
+
return Series(element, dtype=ArrowDtype(element.type))
|
157 |
+
elif isinstance(key, slice):
|
158 |
+
if pa_version_under11p0:
|
159 |
+
raise NotImplementedError(
|
160 |
+
f"List slice not supported by pyarrow {pa.__version__}."
|
161 |
+
)
|
162 |
+
|
163 |
+
# TODO: Support negative start/stop/step, ideally this would be added
|
164 |
+
# upstream in pyarrow.
|
165 |
+
start, stop, step = key.start, key.stop, key.step
|
166 |
+
if start is None:
|
167 |
+
# TODO: When adding negative step support
|
168 |
+
# this should be setto last element of array
|
169 |
+
# when step is negative.
|
170 |
+
start = 0
|
171 |
+
if step is None:
|
172 |
+
step = 1
|
173 |
+
sliced = pc.list_slice(self._pa_array, start, stop, step)
|
174 |
+
return Series(sliced, dtype=ArrowDtype(sliced.type))
|
175 |
+
else:
|
176 |
+
raise ValueError(f"key must be an int or slice, got {type(key).__name__}")
|
177 |
+
|
178 |
+
def __iter__(self) -> Iterator:
|
179 |
+
raise TypeError(f"'{type(self).__name__}' object is not iterable")
|
180 |
+
|
181 |
+
def flatten(self) -> Series:
|
182 |
+
"""
|
183 |
+
Flatten list values.
|
184 |
+
|
185 |
+
Returns
|
186 |
+
-------
|
187 |
+
pandas.Series
|
188 |
+
The data from all lists in the series flattened.
|
189 |
+
|
190 |
+
Examples
|
191 |
+
--------
|
192 |
+
>>> import pyarrow as pa
|
193 |
+
>>> s = pd.Series(
|
194 |
+
... [
|
195 |
+
... [1, 2, 3],
|
196 |
+
... [3],
|
197 |
+
... ],
|
198 |
+
... dtype=pd.ArrowDtype(pa.list_(
|
199 |
+
... pa.int64()
|
200 |
+
... ))
|
201 |
+
... )
|
202 |
+
>>> s.list.flatten()
|
203 |
+
0 1
|
204 |
+
1 2
|
205 |
+
2 3
|
206 |
+
3 3
|
207 |
+
dtype: int64[pyarrow]
|
208 |
+
"""
|
209 |
+
from pandas import Series
|
210 |
+
|
211 |
+
flattened = pc.list_flatten(self._pa_array)
|
212 |
+
return Series(flattened, dtype=ArrowDtype(flattened.type))
|
213 |
+
|
214 |
+
|
215 |
+
class StructAccessor(ArrowAccessor):
|
216 |
+
"""
|
217 |
+
Accessor object for structured data properties of the Series values.
|
218 |
+
|
219 |
+
Parameters
|
220 |
+
----------
|
221 |
+
data : Series
|
222 |
+
Series containing Arrow struct data.
|
223 |
+
"""
|
224 |
+
|
225 |
+
def __init__(self, data=None) -> None:
|
226 |
+
super().__init__(
|
227 |
+
data,
|
228 |
+
validation_msg=(
|
229 |
+
"Can only use the '.struct' accessor with 'struct[pyarrow]' "
|
230 |
+
"dtype, not {dtype}."
|
231 |
+
),
|
232 |
+
)
|
233 |
+
|
234 |
+
def _is_valid_pyarrow_dtype(self, pyarrow_dtype) -> bool:
|
235 |
+
return pa.types.is_struct(pyarrow_dtype)
|
236 |
+
|
237 |
+
@property
|
238 |
+
def dtypes(self) -> Series:
|
239 |
+
"""
|
240 |
+
Return the dtype object of each child field of the struct.
|
241 |
+
|
242 |
+
Returns
|
243 |
+
-------
|
244 |
+
pandas.Series
|
245 |
+
The data type of each child field.
|
246 |
+
|
247 |
+
Examples
|
248 |
+
--------
|
249 |
+
>>> import pyarrow as pa
|
250 |
+
>>> s = pd.Series(
|
251 |
+
... [
|
252 |
+
... {"version": 1, "project": "pandas"},
|
253 |
+
... {"version": 2, "project": "pandas"},
|
254 |
+
... {"version": 1, "project": "numpy"},
|
255 |
+
... ],
|
256 |
+
... dtype=pd.ArrowDtype(pa.struct(
|
257 |
+
... [("version", pa.int64()), ("project", pa.string())]
|
258 |
+
... ))
|
259 |
+
... )
|
260 |
+
>>> s.struct.dtypes
|
261 |
+
version int64[pyarrow]
|
262 |
+
project string[pyarrow]
|
263 |
+
dtype: object
|
264 |
+
"""
|
265 |
+
from pandas import (
|
266 |
+
Index,
|
267 |
+
Series,
|
268 |
+
)
|
269 |
+
|
270 |
+
pa_type = self._data.dtype.pyarrow_dtype
|
271 |
+
types = [ArrowDtype(struct.type) for struct in pa_type]
|
272 |
+
names = [struct.name for struct in pa_type]
|
273 |
+
return Series(types, index=Index(names))
|
274 |
+
|
275 |
+
def field(
|
276 |
+
self,
|
277 |
+
name_or_index: list[str]
|
278 |
+
| list[bytes]
|
279 |
+
| list[int]
|
280 |
+
| pc.Expression
|
281 |
+
| bytes
|
282 |
+
| str
|
283 |
+
| int,
|
284 |
+
) -> Series:
|
285 |
+
"""
|
286 |
+
Extract a child field of a struct as a Series.
|
287 |
+
|
288 |
+
Parameters
|
289 |
+
----------
|
290 |
+
name_or_index : str | bytes | int | expression | list
|
291 |
+
Name or index of the child field to extract.
|
292 |
+
|
293 |
+
For list-like inputs, this will index into a nested
|
294 |
+
struct.
|
295 |
+
|
296 |
+
Returns
|
297 |
+
-------
|
298 |
+
pandas.Series
|
299 |
+
The data corresponding to the selected child field.
|
300 |
+
|
301 |
+
See Also
|
302 |
+
--------
|
303 |
+
Series.struct.explode : Return all child fields as a DataFrame.
|
304 |
+
|
305 |
+
Notes
|
306 |
+
-----
|
307 |
+
The name of the resulting Series will be set using the following
|
308 |
+
rules:
|
309 |
+
|
310 |
+
- For string, bytes, or integer `name_or_index` (or a list of these, for
|
311 |
+
a nested selection), the Series name is set to the selected
|
312 |
+
field's name.
|
313 |
+
- For a :class:`pyarrow.compute.Expression`, this is set to
|
314 |
+
the string form of the expression.
|
315 |
+
- For list-like `name_or_index`, the name will be set to the
|
316 |
+
name of the final field selected.
|
317 |
+
|
318 |
+
Examples
|
319 |
+
--------
|
320 |
+
>>> import pyarrow as pa
|
321 |
+
>>> s = pd.Series(
|
322 |
+
... [
|
323 |
+
... {"version": 1, "project": "pandas"},
|
324 |
+
... {"version": 2, "project": "pandas"},
|
325 |
+
... {"version": 1, "project": "numpy"},
|
326 |
+
... ],
|
327 |
+
... dtype=pd.ArrowDtype(pa.struct(
|
328 |
+
... [("version", pa.int64()), ("project", pa.string())]
|
329 |
+
... ))
|
330 |
+
... )
|
331 |
+
|
332 |
+
Extract by field name.
|
333 |
+
|
334 |
+
>>> s.struct.field("project")
|
335 |
+
0 pandas
|
336 |
+
1 pandas
|
337 |
+
2 numpy
|
338 |
+
Name: project, dtype: string[pyarrow]
|
339 |
+
|
340 |
+
Extract by field index.
|
341 |
+
|
342 |
+
>>> s.struct.field(0)
|
343 |
+
0 1
|
344 |
+
1 2
|
345 |
+
2 1
|
346 |
+
Name: version, dtype: int64[pyarrow]
|
347 |
+
|
348 |
+
Or an expression
|
349 |
+
|
350 |
+
>>> import pyarrow.compute as pc
|
351 |
+
>>> s.struct.field(pc.field("project"))
|
352 |
+
0 pandas
|
353 |
+
1 pandas
|
354 |
+
2 numpy
|
355 |
+
Name: project, dtype: string[pyarrow]
|
356 |
+
|
357 |
+
For nested struct types, you can pass a list of values to index
|
358 |
+
multiple levels:
|
359 |
+
|
360 |
+
>>> version_type = pa.struct([
|
361 |
+
... ("major", pa.int64()),
|
362 |
+
... ("minor", pa.int64()),
|
363 |
+
... ])
|
364 |
+
>>> s = pd.Series(
|
365 |
+
... [
|
366 |
+
... {"version": {"major": 1, "minor": 5}, "project": "pandas"},
|
367 |
+
... {"version": {"major": 2, "minor": 1}, "project": "pandas"},
|
368 |
+
... {"version": {"major": 1, "minor": 26}, "project": "numpy"},
|
369 |
+
... ],
|
370 |
+
... dtype=pd.ArrowDtype(pa.struct(
|
371 |
+
... [("version", version_type), ("project", pa.string())]
|
372 |
+
... ))
|
373 |
+
... )
|
374 |
+
>>> s.struct.field(["version", "minor"])
|
375 |
+
0 5
|
376 |
+
1 1
|
377 |
+
2 26
|
378 |
+
Name: minor, dtype: int64[pyarrow]
|
379 |
+
>>> s.struct.field([0, 0])
|
380 |
+
0 1
|
381 |
+
1 2
|
382 |
+
2 1
|
383 |
+
Name: major, dtype: int64[pyarrow]
|
384 |
+
"""
|
385 |
+
from pandas import Series
|
386 |
+
|
387 |
+
def get_name(
|
388 |
+
level_name_or_index: list[str]
|
389 |
+
| list[bytes]
|
390 |
+
| list[int]
|
391 |
+
| pc.Expression
|
392 |
+
| bytes
|
393 |
+
| str
|
394 |
+
| int,
|
395 |
+
data: pa.ChunkedArray,
|
396 |
+
):
|
397 |
+
if isinstance(level_name_or_index, int):
|
398 |
+
name = data.type.field(level_name_or_index).name
|
399 |
+
elif isinstance(level_name_or_index, (str, bytes)):
|
400 |
+
name = level_name_or_index
|
401 |
+
elif isinstance(level_name_or_index, pc.Expression):
|
402 |
+
name = str(level_name_or_index)
|
403 |
+
elif is_list_like(level_name_or_index):
|
404 |
+
# For nested input like [2, 1, 2]
|
405 |
+
# iteratively get the struct and field name. The last
|
406 |
+
# one is used for the name of the index.
|
407 |
+
level_name_or_index = list(reversed(level_name_or_index))
|
408 |
+
selected = data
|
409 |
+
while level_name_or_index:
|
410 |
+
# we need the cast, otherwise mypy complains about
|
411 |
+
# getting ints, bytes, or str here, which isn't possible.
|
412 |
+
level_name_or_index = cast(list, level_name_or_index)
|
413 |
+
name_or_index = level_name_or_index.pop()
|
414 |
+
name = get_name(name_or_index, selected)
|
415 |
+
selected = selected.type.field(selected.type.get_field_index(name))
|
416 |
+
name = selected.name
|
417 |
+
else:
|
418 |
+
raise ValueError(
|
419 |
+
"name_or_index must be an int, str, bytes, "
|
420 |
+
"pyarrow.compute.Expression, or list of those"
|
421 |
+
)
|
422 |
+
return name
|
423 |
+
|
424 |
+
pa_arr = self._data.array._pa_array
|
425 |
+
name = get_name(name_or_index, pa_arr)
|
426 |
+
field_arr = pc.struct_field(pa_arr, name_or_index)
|
427 |
+
|
428 |
+
return Series(
|
429 |
+
field_arr,
|
430 |
+
dtype=ArrowDtype(field_arr.type),
|
431 |
+
index=self._data.index,
|
432 |
+
name=name,
|
433 |
+
)
|
434 |
+
|
435 |
+
def explode(self) -> DataFrame:
|
436 |
+
"""
|
437 |
+
Extract all child fields of a struct as a DataFrame.
|
438 |
+
|
439 |
+
Returns
|
440 |
+
-------
|
441 |
+
pandas.DataFrame
|
442 |
+
The data corresponding to all child fields.
|
443 |
+
|
444 |
+
See Also
|
445 |
+
--------
|
446 |
+
Series.struct.field : Return a single child field as a Series.
|
447 |
+
|
448 |
+
Examples
|
449 |
+
--------
|
450 |
+
>>> import pyarrow as pa
|
451 |
+
>>> s = pd.Series(
|
452 |
+
... [
|
453 |
+
... {"version": 1, "project": "pandas"},
|
454 |
+
... {"version": 2, "project": "pandas"},
|
455 |
+
... {"version": 1, "project": "numpy"},
|
456 |
+
... ],
|
457 |
+
... dtype=pd.ArrowDtype(pa.struct(
|
458 |
+
... [("version", pa.int64()), ("project", pa.string())]
|
459 |
+
... ))
|
460 |
+
... )
|
461 |
+
|
462 |
+
>>> s.struct.explode()
|
463 |
+
version project
|
464 |
+
0 1 pandas
|
465 |
+
1 2 pandas
|
466 |
+
2 1 numpy
|
467 |
+
"""
|
468 |
+
from pandas import concat
|
469 |
+
|
470 |
+
pa_type = self._pa_array.type
|
471 |
+
return concat(
|
472 |
+
[self.field(i) for i in range(pa_type.num_fields)], axis="columns"
|
473 |
+
)
|
llmeval-env/lib/python3.10/site-packages/pandas/core/arrays/arrow/array.py
ADDED
The diff for this file is too large to render.
See raw diff
|
|
llmeval-env/lib/python3.10/site-packages/pandas/core/arrays/arrow/extension_types.py
ADDED
@@ -0,0 +1,174 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from __future__ import annotations
|
2 |
+
|
3 |
+
import json
|
4 |
+
from typing import TYPE_CHECKING
|
5 |
+
|
6 |
+
import pyarrow
|
7 |
+
|
8 |
+
from pandas.compat import pa_version_under14p1
|
9 |
+
|
10 |
+
from pandas.core.dtypes.dtypes import (
|
11 |
+
IntervalDtype,
|
12 |
+
PeriodDtype,
|
13 |
+
)
|
14 |
+
|
15 |
+
from pandas.core.arrays.interval import VALID_CLOSED
|
16 |
+
|
17 |
+
if TYPE_CHECKING:
|
18 |
+
from pandas._typing import IntervalClosedType
|
19 |
+
|
20 |
+
|
21 |
+
class ArrowPeriodType(pyarrow.ExtensionType):
|
22 |
+
def __init__(self, freq) -> None:
|
23 |
+
# attributes need to be set first before calling
|
24 |
+
# super init (as that calls serialize)
|
25 |
+
self._freq = freq
|
26 |
+
pyarrow.ExtensionType.__init__(self, pyarrow.int64(), "pandas.period")
|
27 |
+
|
28 |
+
@property
|
29 |
+
def freq(self):
|
30 |
+
return self._freq
|
31 |
+
|
32 |
+
def __arrow_ext_serialize__(self) -> bytes:
|
33 |
+
metadata = {"freq": self.freq}
|
34 |
+
return json.dumps(metadata).encode()
|
35 |
+
|
36 |
+
@classmethod
|
37 |
+
def __arrow_ext_deserialize__(cls, storage_type, serialized) -> ArrowPeriodType:
|
38 |
+
metadata = json.loads(serialized.decode())
|
39 |
+
return ArrowPeriodType(metadata["freq"])
|
40 |
+
|
41 |
+
def __eq__(self, other):
|
42 |
+
if isinstance(other, pyarrow.BaseExtensionType):
|
43 |
+
return type(self) == type(other) and self.freq == other.freq
|
44 |
+
else:
|
45 |
+
return NotImplemented
|
46 |
+
|
47 |
+
def __ne__(self, other) -> bool:
|
48 |
+
return not self == other
|
49 |
+
|
50 |
+
def __hash__(self) -> int:
|
51 |
+
return hash((str(self), self.freq))
|
52 |
+
|
53 |
+
def to_pandas_dtype(self) -> PeriodDtype:
|
54 |
+
return PeriodDtype(freq=self.freq)
|
55 |
+
|
56 |
+
|
57 |
+
# register the type with a dummy instance
|
58 |
+
_period_type = ArrowPeriodType("D")
|
59 |
+
pyarrow.register_extension_type(_period_type)
|
60 |
+
|
61 |
+
|
62 |
+
class ArrowIntervalType(pyarrow.ExtensionType):
|
63 |
+
def __init__(self, subtype, closed: IntervalClosedType) -> None:
|
64 |
+
# attributes need to be set first before calling
|
65 |
+
# super init (as that calls serialize)
|
66 |
+
assert closed in VALID_CLOSED
|
67 |
+
self._closed: IntervalClosedType = closed
|
68 |
+
if not isinstance(subtype, pyarrow.DataType):
|
69 |
+
subtype = pyarrow.type_for_alias(str(subtype))
|
70 |
+
self._subtype = subtype
|
71 |
+
|
72 |
+
storage_type = pyarrow.struct([("left", subtype), ("right", subtype)])
|
73 |
+
pyarrow.ExtensionType.__init__(self, storage_type, "pandas.interval")
|
74 |
+
|
75 |
+
@property
|
76 |
+
def subtype(self):
|
77 |
+
return self._subtype
|
78 |
+
|
79 |
+
@property
|
80 |
+
def closed(self) -> IntervalClosedType:
|
81 |
+
return self._closed
|
82 |
+
|
83 |
+
def __arrow_ext_serialize__(self) -> bytes:
|
84 |
+
metadata = {"subtype": str(self.subtype), "closed": self.closed}
|
85 |
+
return json.dumps(metadata).encode()
|
86 |
+
|
87 |
+
@classmethod
|
88 |
+
def __arrow_ext_deserialize__(cls, storage_type, serialized) -> ArrowIntervalType:
|
89 |
+
metadata = json.loads(serialized.decode())
|
90 |
+
subtype = pyarrow.type_for_alias(metadata["subtype"])
|
91 |
+
closed = metadata["closed"]
|
92 |
+
return ArrowIntervalType(subtype, closed)
|
93 |
+
|
94 |
+
def __eq__(self, other):
|
95 |
+
if isinstance(other, pyarrow.BaseExtensionType):
|
96 |
+
return (
|
97 |
+
type(self) == type(other)
|
98 |
+
and self.subtype == other.subtype
|
99 |
+
and self.closed == other.closed
|
100 |
+
)
|
101 |
+
else:
|
102 |
+
return NotImplemented
|
103 |
+
|
104 |
+
def __ne__(self, other) -> bool:
|
105 |
+
return not self == other
|
106 |
+
|
107 |
+
def __hash__(self) -> int:
|
108 |
+
return hash((str(self), str(self.subtype), self.closed))
|
109 |
+
|
110 |
+
def to_pandas_dtype(self) -> IntervalDtype:
|
111 |
+
return IntervalDtype(self.subtype.to_pandas_dtype(), self.closed)
|
112 |
+
|
113 |
+
|
114 |
+
# register the type with a dummy instance
|
115 |
+
_interval_type = ArrowIntervalType(pyarrow.int64(), "left")
|
116 |
+
pyarrow.register_extension_type(_interval_type)
|
117 |
+
|
118 |
+
|
119 |
+
_ERROR_MSG = """\
|
120 |
+
Disallowed deserialization of 'arrow.py_extension_type':
|
121 |
+
storage_type = {storage_type}
|
122 |
+
serialized = {serialized}
|
123 |
+
pickle disassembly:\n{pickle_disassembly}
|
124 |
+
|
125 |
+
Reading of untrusted Parquet or Feather files with a PyExtensionType column
|
126 |
+
allows arbitrary code execution.
|
127 |
+
If you trust this file, you can enable reading the extension type by one of:
|
128 |
+
|
129 |
+
- upgrading to pyarrow >= 14.0.1, and call `pa.PyExtensionType.set_auto_load(True)`
|
130 |
+
- install pyarrow-hotfix (`pip install pyarrow-hotfix`) and disable it by running
|
131 |
+
`import pyarrow_hotfix; pyarrow_hotfix.uninstall()`
|
132 |
+
|
133 |
+
We strongly recommend updating your Parquet/Feather files to use extension types
|
134 |
+
derived from `pyarrow.ExtensionType` instead, and register this type explicitly.
|
135 |
+
"""
|
136 |
+
|
137 |
+
|
138 |
+
def patch_pyarrow():
|
139 |
+
# starting from pyarrow 14.0.1, it has its own mechanism
|
140 |
+
if not pa_version_under14p1:
|
141 |
+
return
|
142 |
+
|
143 |
+
# if https://github.com/pitrou/pyarrow-hotfix was installed and enabled
|
144 |
+
if getattr(pyarrow, "_hotfix_installed", False):
|
145 |
+
return
|
146 |
+
|
147 |
+
class ForbiddenExtensionType(pyarrow.ExtensionType):
|
148 |
+
def __arrow_ext_serialize__(self):
|
149 |
+
return b""
|
150 |
+
|
151 |
+
@classmethod
|
152 |
+
def __arrow_ext_deserialize__(cls, storage_type, serialized):
|
153 |
+
import io
|
154 |
+
import pickletools
|
155 |
+
|
156 |
+
out = io.StringIO()
|
157 |
+
pickletools.dis(serialized, out)
|
158 |
+
raise RuntimeError(
|
159 |
+
_ERROR_MSG.format(
|
160 |
+
storage_type=storage_type,
|
161 |
+
serialized=serialized,
|
162 |
+
pickle_disassembly=out.getvalue(),
|
163 |
+
)
|
164 |
+
)
|
165 |
+
|
166 |
+
pyarrow.unregister_extension_type("arrow.py_extension_type")
|
167 |
+
pyarrow.register_extension_type(
|
168 |
+
ForbiddenExtensionType(pyarrow.null(), "arrow.py_extension_type")
|
169 |
+
)
|
170 |
+
|
171 |
+
pyarrow._hotfix_installed = True
|
172 |
+
|
173 |
+
|
174 |
+
patch_pyarrow()
|
llmeval-env/lib/python3.10/site-packages/pandas/core/arrays/sparse/__pycache__/__init__.cpython-310.pyc
ADDED
Binary file (487 Bytes). View file
|
|
llmeval-env/lib/python3.10/site-packages/pandas/core/arrays/sparse/__pycache__/accessor.cpython-310.pyc
ADDED
Binary file (13.2 kB). View file
|
|
llmeval-env/lib/python3.10/site-packages/pandas/core/arrays/sparse/__pycache__/array.cpython-310.pyc
ADDED
Binary file (44.5 kB). View file
|
|
llmeval-env/lib/python3.10/site-packages/pandas/core/arrays/sparse/__pycache__/scipy_sparse.cpython-310.pyc
ADDED
Binary file (6.44 kB). View file
|
|
llmeval-env/lib/python3.10/site-packages/pandas/core/arrays/sparse/scipy_sparse.py
ADDED
@@ -0,0 +1,207 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
"""
|
2 |
+
Interaction with scipy.sparse matrices.
|
3 |
+
|
4 |
+
Currently only includes to_coo helpers.
|
5 |
+
"""
|
6 |
+
from __future__ import annotations
|
7 |
+
|
8 |
+
from typing import TYPE_CHECKING
|
9 |
+
|
10 |
+
from pandas._libs import lib
|
11 |
+
|
12 |
+
from pandas.core.dtypes.missing import notna
|
13 |
+
|
14 |
+
from pandas.core.algorithms import factorize
|
15 |
+
from pandas.core.indexes.api import MultiIndex
|
16 |
+
from pandas.core.series import Series
|
17 |
+
|
18 |
+
if TYPE_CHECKING:
|
19 |
+
from collections.abc import Iterable
|
20 |
+
|
21 |
+
import numpy as np
|
22 |
+
import scipy.sparse
|
23 |
+
|
24 |
+
from pandas._typing import (
|
25 |
+
IndexLabel,
|
26 |
+
npt,
|
27 |
+
)
|
28 |
+
|
29 |
+
|
30 |
+
def _check_is_partition(parts: Iterable, whole: Iterable):
|
31 |
+
whole = set(whole)
|
32 |
+
parts = [set(x) for x in parts]
|
33 |
+
if set.intersection(*parts) != set():
|
34 |
+
raise ValueError("Is not a partition because intersection is not null.")
|
35 |
+
if set.union(*parts) != whole:
|
36 |
+
raise ValueError("Is not a partition because union is not the whole.")
|
37 |
+
|
38 |
+
|
39 |
+
def _levels_to_axis(
|
40 |
+
ss,
|
41 |
+
levels: tuple[int] | list[int],
|
42 |
+
valid_ilocs: npt.NDArray[np.intp],
|
43 |
+
sort_labels: bool = False,
|
44 |
+
) -> tuple[npt.NDArray[np.intp], list[IndexLabel]]:
|
45 |
+
"""
|
46 |
+
For a MultiIndexed sparse Series `ss`, return `ax_coords` and `ax_labels`,
|
47 |
+
where `ax_coords` are the coordinates along one of the two axes of the
|
48 |
+
destination sparse matrix, and `ax_labels` are the labels from `ss`' Index
|
49 |
+
which correspond to these coordinates.
|
50 |
+
|
51 |
+
Parameters
|
52 |
+
----------
|
53 |
+
ss : Series
|
54 |
+
levels : tuple/list
|
55 |
+
valid_ilocs : numpy.ndarray
|
56 |
+
Array of integer positions of valid values for the sparse matrix in ss.
|
57 |
+
sort_labels : bool, default False
|
58 |
+
Sort the axis labels before forming the sparse matrix. When `levels`
|
59 |
+
refers to a single level, set to True for a faster execution.
|
60 |
+
|
61 |
+
Returns
|
62 |
+
-------
|
63 |
+
ax_coords : numpy.ndarray (axis coordinates)
|
64 |
+
ax_labels : list (axis labels)
|
65 |
+
"""
|
66 |
+
# Since the labels are sorted in `Index.levels`, when we wish to sort and
|
67 |
+
# there is only one level of the MultiIndex for this axis, the desired
|
68 |
+
# output can be obtained in the following simpler, more efficient way.
|
69 |
+
if sort_labels and len(levels) == 1:
|
70 |
+
ax_coords = ss.index.codes[levels[0]][valid_ilocs]
|
71 |
+
ax_labels = ss.index.levels[levels[0]]
|
72 |
+
|
73 |
+
else:
|
74 |
+
levels_values = lib.fast_zip(
|
75 |
+
[ss.index.get_level_values(lvl).to_numpy() for lvl in levels]
|
76 |
+
)
|
77 |
+
codes, ax_labels = factorize(levels_values, sort=sort_labels)
|
78 |
+
ax_coords = codes[valid_ilocs]
|
79 |
+
|
80 |
+
ax_labels = ax_labels.tolist()
|
81 |
+
return ax_coords, ax_labels
|
82 |
+
|
83 |
+
|
84 |
+
def _to_ijv(
|
85 |
+
ss,
|
86 |
+
row_levels: tuple[int] | list[int] = (0,),
|
87 |
+
column_levels: tuple[int] | list[int] = (1,),
|
88 |
+
sort_labels: bool = False,
|
89 |
+
) -> tuple[
|
90 |
+
np.ndarray,
|
91 |
+
npt.NDArray[np.intp],
|
92 |
+
npt.NDArray[np.intp],
|
93 |
+
list[IndexLabel],
|
94 |
+
list[IndexLabel],
|
95 |
+
]:
|
96 |
+
"""
|
97 |
+
For an arbitrary MultiIndexed sparse Series return (v, i, j, ilabels,
|
98 |
+
jlabels) where (v, (i, j)) is suitable for passing to scipy.sparse.coo
|
99 |
+
constructor, and ilabels and jlabels are the row and column labels
|
100 |
+
respectively.
|
101 |
+
|
102 |
+
Parameters
|
103 |
+
----------
|
104 |
+
ss : Series
|
105 |
+
row_levels : tuple/list
|
106 |
+
column_levels : tuple/list
|
107 |
+
sort_labels : bool, default False
|
108 |
+
Sort the row and column labels before forming the sparse matrix.
|
109 |
+
When `row_levels` and/or `column_levels` refer to a single level,
|
110 |
+
set to `True` for a faster execution.
|
111 |
+
|
112 |
+
Returns
|
113 |
+
-------
|
114 |
+
values : numpy.ndarray
|
115 |
+
Valid values to populate a sparse matrix, extracted from
|
116 |
+
ss.
|
117 |
+
i_coords : numpy.ndarray (row coordinates of the values)
|
118 |
+
j_coords : numpy.ndarray (column coordinates of the values)
|
119 |
+
i_labels : list (row labels)
|
120 |
+
j_labels : list (column labels)
|
121 |
+
"""
|
122 |
+
# index and column levels must be a partition of the index
|
123 |
+
_check_is_partition([row_levels, column_levels], range(ss.index.nlevels))
|
124 |
+
# From the sparse Series, get the integer indices and data for valid sparse
|
125 |
+
# entries.
|
126 |
+
sp_vals = ss.array.sp_values
|
127 |
+
na_mask = notna(sp_vals)
|
128 |
+
values = sp_vals[na_mask]
|
129 |
+
valid_ilocs = ss.array.sp_index.indices[na_mask]
|
130 |
+
|
131 |
+
i_coords, i_labels = _levels_to_axis(
|
132 |
+
ss, row_levels, valid_ilocs, sort_labels=sort_labels
|
133 |
+
)
|
134 |
+
|
135 |
+
j_coords, j_labels = _levels_to_axis(
|
136 |
+
ss, column_levels, valid_ilocs, sort_labels=sort_labels
|
137 |
+
)
|
138 |
+
|
139 |
+
return values, i_coords, j_coords, i_labels, j_labels
|
140 |
+
|
141 |
+
|
142 |
+
def sparse_series_to_coo(
|
143 |
+
ss: Series,
|
144 |
+
row_levels: Iterable[int] = (0,),
|
145 |
+
column_levels: Iterable[int] = (1,),
|
146 |
+
sort_labels: bool = False,
|
147 |
+
) -> tuple[scipy.sparse.coo_matrix, list[IndexLabel], list[IndexLabel]]:
|
148 |
+
"""
|
149 |
+
Convert a sparse Series to a scipy.sparse.coo_matrix using index
|
150 |
+
levels row_levels, column_levels as the row and column
|
151 |
+
labels respectively. Returns the sparse_matrix, row and column labels.
|
152 |
+
"""
|
153 |
+
import scipy.sparse
|
154 |
+
|
155 |
+
if ss.index.nlevels < 2:
|
156 |
+
raise ValueError("to_coo requires MultiIndex with nlevels >= 2.")
|
157 |
+
if not ss.index.is_unique:
|
158 |
+
raise ValueError(
|
159 |
+
"Duplicate index entries are not allowed in to_coo transformation."
|
160 |
+
)
|
161 |
+
|
162 |
+
# to keep things simple, only rely on integer indexing (not labels)
|
163 |
+
row_levels = [ss.index._get_level_number(x) for x in row_levels]
|
164 |
+
column_levels = [ss.index._get_level_number(x) for x in column_levels]
|
165 |
+
|
166 |
+
v, i, j, rows, columns = _to_ijv(
|
167 |
+
ss, row_levels=row_levels, column_levels=column_levels, sort_labels=sort_labels
|
168 |
+
)
|
169 |
+
sparse_matrix = scipy.sparse.coo_matrix(
|
170 |
+
(v, (i, j)), shape=(len(rows), len(columns))
|
171 |
+
)
|
172 |
+
return sparse_matrix, rows, columns
|
173 |
+
|
174 |
+
|
175 |
+
def coo_to_sparse_series(
|
176 |
+
A: scipy.sparse.coo_matrix, dense_index: bool = False
|
177 |
+
) -> Series:
|
178 |
+
"""
|
179 |
+
Convert a scipy.sparse.coo_matrix to a Series with type sparse.
|
180 |
+
|
181 |
+
Parameters
|
182 |
+
----------
|
183 |
+
A : scipy.sparse.coo_matrix
|
184 |
+
dense_index : bool, default False
|
185 |
+
|
186 |
+
Returns
|
187 |
+
-------
|
188 |
+
Series
|
189 |
+
|
190 |
+
Raises
|
191 |
+
------
|
192 |
+
TypeError if A is not a coo_matrix
|
193 |
+
"""
|
194 |
+
from pandas import SparseDtype
|
195 |
+
|
196 |
+
try:
|
197 |
+
ser = Series(A.data, MultiIndex.from_arrays((A.row, A.col)), copy=False)
|
198 |
+
except AttributeError as err:
|
199 |
+
raise TypeError(
|
200 |
+
f"Expected coo_matrix. Got {type(A).__name__} instead."
|
201 |
+
) from err
|
202 |
+
ser = ser.sort_index()
|
203 |
+
ser = ser.astype(SparseDtype(ser.dtype))
|
204 |
+
if dense_index:
|
205 |
+
ind = MultiIndex.from_product([A.row, A.col])
|
206 |
+
ser = ser.reindex(ind)
|
207 |
+
return ser
|
llmeval-env/lib/python3.10/site-packages/pandas/core/dtypes/__init__.py
ADDED
File without changes
|
llmeval-env/lib/python3.10/site-packages/pandas/core/dtypes/__pycache__/__init__.cpython-310.pyc
ADDED
Binary file (191 Bytes). View file
|
|
llmeval-env/lib/python3.10/site-packages/pandas/core/dtypes/__pycache__/api.cpython-310.pyc
ADDED
Binary file (1.29 kB). View file
|
|
llmeval-env/lib/python3.10/site-packages/pandas/core/dtypes/__pycache__/astype.cpython-310.pyc
ADDED
Binary file (6.72 kB). View file
|
|
llmeval-env/lib/python3.10/site-packages/pandas/core/dtypes/__pycache__/dtypes.cpython-310.pyc
ADDED
Binary file (62.6 kB). View file
|
|
llmeval-env/lib/python3.10/site-packages/pandas/core/dtypes/api.py
ADDED
@@ -0,0 +1,85 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from pandas.core.dtypes.common import (
|
2 |
+
is_any_real_numeric_dtype,
|
3 |
+
is_array_like,
|
4 |
+
is_bool,
|
5 |
+
is_bool_dtype,
|
6 |
+
is_categorical_dtype,
|
7 |
+
is_complex,
|
8 |
+
is_complex_dtype,
|
9 |
+
is_datetime64_any_dtype,
|
10 |
+
is_datetime64_dtype,
|
11 |
+
is_datetime64_ns_dtype,
|
12 |
+
is_datetime64tz_dtype,
|
13 |
+
is_dict_like,
|
14 |
+
is_dtype_equal,
|
15 |
+
is_extension_array_dtype,
|
16 |
+
is_file_like,
|
17 |
+
is_float,
|
18 |
+
is_float_dtype,
|
19 |
+
is_hashable,
|
20 |
+
is_int64_dtype,
|
21 |
+
is_integer,
|
22 |
+
is_integer_dtype,
|
23 |
+
is_interval,
|
24 |
+
is_interval_dtype,
|
25 |
+
is_iterator,
|
26 |
+
is_list_like,
|
27 |
+
is_named_tuple,
|
28 |
+
is_number,
|
29 |
+
is_numeric_dtype,
|
30 |
+
is_object_dtype,
|
31 |
+
is_period_dtype,
|
32 |
+
is_re,
|
33 |
+
is_re_compilable,
|
34 |
+
is_scalar,
|
35 |
+
is_signed_integer_dtype,
|
36 |
+
is_sparse,
|
37 |
+
is_string_dtype,
|
38 |
+
is_timedelta64_dtype,
|
39 |
+
is_timedelta64_ns_dtype,
|
40 |
+
is_unsigned_integer_dtype,
|
41 |
+
pandas_dtype,
|
42 |
+
)
|
43 |
+
|
44 |
+
__all__ = [
|
45 |
+
"is_any_real_numeric_dtype",
|
46 |
+
"is_array_like",
|
47 |
+
"is_bool",
|
48 |
+
"is_bool_dtype",
|
49 |
+
"is_categorical_dtype",
|
50 |
+
"is_complex",
|
51 |
+
"is_complex_dtype",
|
52 |
+
"is_datetime64_any_dtype",
|
53 |
+
"is_datetime64_dtype",
|
54 |
+
"is_datetime64_ns_dtype",
|
55 |
+
"is_datetime64tz_dtype",
|
56 |
+
"is_dict_like",
|
57 |
+
"is_dtype_equal",
|
58 |
+
"is_extension_array_dtype",
|
59 |
+
"is_file_like",
|
60 |
+
"is_float",
|
61 |
+
"is_float_dtype",
|
62 |
+
"is_hashable",
|
63 |
+
"is_int64_dtype",
|
64 |
+
"is_integer",
|
65 |
+
"is_integer_dtype",
|
66 |
+
"is_interval",
|
67 |
+
"is_interval_dtype",
|
68 |
+
"is_iterator",
|
69 |
+
"is_list_like",
|
70 |
+
"is_named_tuple",
|
71 |
+
"is_number",
|
72 |
+
"is_numeric_dtype",
|
73 |
+
"is_object_dtype",
|
74 |
+
"is_period_dtype",
|
75 |
+
"is_re",
|
76 |
+
"is_re_compilable",
|
77 |
+
"is_scalar",
|
78 |
+
"is_signed_integer_dtype",
|
79 |
+
"is_sparse",
|
80 |
+
"is_string_dtype",
|
81 |
+
"is_timedelta64_dtype",
|
82 |
+
"is_timedelta64_ns_dtype",
|
83 |
+
"is_unsigned_integer_dtype",
|
84 |
+
"pandas_dtype",
|
85 |
+
]
|
llmeval-env/lib/python3.10/site-packages/pandas/core/dtypes/astype.py
ADDED
@@ -0,0 +1,301 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
"""
|
2 |
+
Functions for implementing 'astype' methods according to pandas conventions,
|
3 |
+
particularly ones that differ from numpy.
|
4 |
+
"""
|
5 |
+
from __future__ import annotations
|
6 |
+
|
7 |
+
import inspect
|
8 |
+
from typing import (
|
9 |
+
TYPE_CHECKING,
|
10 |
+
overload,
|
11 |
+
)
|
12 |
+
import warnings
|
13 |
+
|
14 |
+
import numpy as np
|
15 |
+
|
16 |
+
from pandas._libs import lib
|
17 |
+
from pandas._libs.tslibs.timedeltas import array_to_timedelta64
|
18 |
+
from pandas.errors import IntCastingNaNError
|
19 |
+
|
20 |
+
from pandas.core.dtypes.common import (
|
21 |
+
is_object_dtype,
|
22 |
+
is_string_dtype,
|
23 |
+
pandas_dtype,
|
24 |
+
)
|
25 |
+
from pandas.core.dtypes.dtypes import (
|
26 |
+
ExtensionDtype,
|
27 |
+
NumpyEADtype,
|
28 |
+
)
|
29 |
+
|
30 |
+
if TYPE_CHECKING:
|
31 |
+
from pandas._typing import (
|
32 |
+
ArrayLike,
|
33 |
+
DtypeObj,
|
34 |
+
IgnoreRaise,
|
35 |
+
)
|
36 |
+
|
37 |
+
from pandas.core.arrays import ExtensionArray
|
38 |
+
|
39 |
+
_dtype_obj = np.dtype(object)
|
40 |
+
|
41 |
+
|
42 |
+
@overload
|
43 |
+
def _astype_nansafe(
|
44 |
+
arr: np.ndarray, dtype: np.dtype, copy: bool = ..., skipna: bool = ...
|
45 |
+
) -> np.ndarray:
|
46 |
+
...
|
47 |
+
|
48 |
+
|
49 |
+
@overload
|
50 |
+
def _astype_nansafe(
|
51 |
+
arr: np.ndarray, dtype: ExtensionDtype, copy: bool = ..., skipna: bool = ...
|
52 |
+
) -> ExtensionArray:
|
53 |
+
...
|
54 |
+
|
55 |
+
|
56 |
+
def _astype_nansafe(
|
57 |
+
arr: np.ndarray, dtype: DtypeObj, copy: bool = True, skipna: bool = False
|
58 |
+
) -> ArrayLike:
|
59 |
+
"""
|
60 |
+
Cast the elements of an array to a given dtype a nan-safe manner.
|
61 |
+
|
62 |
+
Parameters
|
63 |
+
----------
|
64 |
+
arr : ndarray
|
65 |
+
dtype : np.dtype or ExtensionDtype
|
66 |
+
copy : bool, default True
|
67 |
+
If False, a view will be attempted but may fail, if
|
68 |
+
e.g. the item sizes don't align.
|
69 |
+
skipna: bool, default False
|
70 |
+
Whether or not we should skip NaN when casting as a string-type.
|
71 |
+
|
72 |
+
Raises
|
73 |
+
------
|
74 |
+
ValueError
|
75 |
+
The dtype was a datetime64/timedelta64 dtype, but it had no unit.
|
76 |
+
"""
|
77 |
+
|
78 |
+
# dispatch on extension dtype if needed
|
79 |
+
if isinstance(dtype, ExtensionDtype):
|
80 |
+
return dtype.construct_array_type()._from_sequence(arr, dtype=dtype, copy=copy)
|
81 |
+
|
82 |
+
elif not isinstance(dtype, np.dtype): # pragma: no cover
|
83 |
+
raise ValueError("dtype must be np.dtype or ExtensionDtype")
|
84 |
+
|
85 |
+
if arr.dtype.kind in "mM":
|
86 |
+
from pandas.core.construction import ensure_wrapped_if_datetimelike
|
87 |
+
|
88 |
+
arr = ensure_wrapped_if_datetimelike(arr)
|
89 |
+
res = arr.astype(dtype, copy=copy)
|
90 |
+
return np.asarray(res)
|
91 |
+
|
92 |
+
if issubclass(dtype.type, str):
|
93 |
+
shape = arr.shape
|
94 |
+
if arr.ndim > 1:
|
95 |
+
arr = arr.ravel()
|
96 |
+
return lib.ensure_string_array(
|
97 |
+
arr, skipna=skipna, convert_na_value=False
|
98 |
+
).reshape(shape)
|
99 |
+
|
100 |
+
elif np.issubdtype(arr.dtype, np.floating) and dtype.kind in "iu":
|
101 |
+
return _astype_float_to_int_nansafe(arr, dtype, copy)
|
102 |
+
|
103 |
+
elif arr.dtype == object:
|
104 |
+
# if we have a datetime/timedelta array of objects
|
105 |
+
# then coerce to datetime64[ns] and use DatetimeArray.astype
|
106 |
+
|
107 |
+
if lib.is_np_dtype(dtype, "M"):
|
108 |
+
from pandas.core.arrays import DatetimeArray
|
109 |
+
|
110 |
+
dta = DatetimeArray._from_sequence(arr, dtype=dtype)
|
111 |
+
return dta._ndarray
|
112 |
+
|
113 |
+
elif lib.is_np_dtype(dtype, "m"):
|
114 |
+
from pandas.core.construction import ensure_wrapped_if_datetimelike
|
115 |
+
|
116 |
+
# bc we know arr.dtype == object, this is equivalent to
|
117 |
+
# `np.asarray(to_timedelta(arr))`, but using a lower-level API that
|
118 |
+
# does not require a circular import.
|
119 |
+
tdvals = array_to_timedelta64(arr).view("m8[ns]")
|
120 |
+
|
121 |
+
tda = ensure_wrapped_if_datetimelike(tdvals)
|
122 |
+
return tda.astype(dtype, copy=False)._ndarray
|
123 |
+
|
124 |
+
if dtype.name in ("datetime64", "timedelta64"):
|
125 |
+
msg = (
|
126 |
+
f"The '{dtype.name}' dtype has no unit. Please pass in "
|
127 |
+
f"'{dtype.name}[ns]' instead."
|
128 |
+
)
|
129 |
+
raise ValueError(msg)
|
130 |
+
|
131 |
+
if copy or arr.dtype == object or dtype == object:
|
132 |
+
# Explicit copy, or required since NumPy can't view from / to object.
|
133 |
+
return arr.astype(dtype, copy=True)
|
134 |
+
|
135 |
+
return arr.astype(dtype, copy=copy)
|
136 |
+
|
137 |
+
|
138 |
+
def _astype_float_to_int_nansafe(
|
139 |
+
values: np.ndarray, dtype: np.dtype, copy: bool
|
140 |
+
) -> np.ndarray:
|
141 |
+
"""
|
142 |
+
astype with a check preventing converting NaN to an meaningless integer value.
|
143 |
+
"""
|
144 |
+
if not np.isfinite(values).all():
|
145 |
+
raise IntCastingNaNError(
|
146 |
+
"Cannot convert non-finite values (NA or inf) to integer"
|
147 |
+
)
|
148 |
+
if dtype.kind == "u":
|
149 |
+
# GH#45151
|
150 |
+
if not (values >= 0).all():
|
151 |
+
raise ValueError(f"Cannot losslessly cast from {values.dtype} to {dtype}")
|
152 |
+
with warnings.catch_warnings():
|
153 |
+
warnings.filterwarnings("ignore", category=RuntimeWarning)
|
154 |
+
return values.astype(dtype, copy=copy)
|
155 |
+
|
156 |
+
|
157 |
+
def astype_array(values: ArrayLike, dtype: DtypeObj, copy: bool = False) -> ArrayLike:
|
158 |
+
"""
|
159 |
+
Cast array (ndarray or ExtensionArray) to the new dtype.
|
160 |
+
|
161 |
+
Parameters
|
162 |
+
----------
|
163 |
+
values : ndarray or ExtensionArray
|
164 |
+
dtype : dtype object
|
165 |
+
copy : bool, default False
|
166 |
+
copy if indicated
|
167 |
+
|
168 |
+
Returns
|
169 |
+
-------
|
170 |
+
ndarray or ExtensionArray
|
171 |
+
"""
|
172 |
+
if values.dtype == dtype:
|
173 |
+
if copy:
|
174 |
+
return values.copy()
|
175 |
+
return values
|
176 |
+
|
177 |
+
if not isinstance(values, np.ndarray):
|
178 |
+
# i.e. ExtensionArray
|
179 |
+
values = values.astype(dtype, copy=copy)
|
180 |
+
|
181 |
+
else:
|
182 |
+
values = _astype_nansafe(values, dtype, copy=copy)
|
183 |
+
|
184 |
+
# in pandas we don't store numpy str dtypes, so convert to object
|
185 |
+
if isinstance(dtype, np.dtype) and issubclass(values.dtype.type, str):
|
186 |
+
values = np.array(values, dtype=object)
|
187 |
+
|
188 |
+
return values
|
189 |
+
|
190 |
+
|
191 |
+
def astype_array_safe(
|
192 |
+
values: ArrayLike, dtype, copy: bool = False, errors: IgnoreRaise = "raise"
|
193 |
+
) -> ArrayLike:
|
194 |
+
"""
|
195 |
+
Cast array (ndarray or ExtensionArray) to the new dtype.
|
196 |
+
|
197 |
+
This basically is the implementation for DataFrame/Series.astype and
|
198 |
+
includes all custom logic for pandas (NaN-safety, converting str to object,
|
199 |
+
not allowing )
|
200 |
+
|
201 |
+
Parameters
|
202 |
+
----------
|
203 |
+
values : ndarray or ExtensionArray
|
204 |
+
dtype : str, dtype convertible
|
205 |
+
copy : bool, default False
|
206 |
+
copy if indicated
|
207 |
+
errors : str, {'raise', 'ignore'}, default 'raise'
|
208 |
+
- ``raise`` : allow exceptions to be raised
|
209 |
+
- ``ignore`` : suppress exceptions. On error return original object
|
210 |
+
|
211 |
+
Returns
|
212 |
+
-------
|
213 |
+
ndarray or ExtensionArray
|
214 |
+
"""
|
215 |
+
errors_legal_values = ("raise", "ignore")
|
216 |
+
|
217 |
+
if errors not in errors_legal_values:
|
218 |
+
invalid_arg = (
|
219 |
+
"Expected value of kwarg 'errors' to be one of "
|
220 |
+
f"{list(errors_legal_values)}. Supplied value is '{errors}'"
|
221 |
+
)
|
222 |
+
raise ValueError(invalid_arg)
|
223 |
+
|
224 |
+
if inspect.isclass(dtype) and issubclass(dtype, ExtensionDtype):
|
225 |
+
msg = (
|
226 |
+
f"Expected an instance of {dtype.__name__}, "
|
227 |
+
"but got the class instead. Try instantiating 'dtype'."
|
228 |
+
)
|
229 |
+
raise TypeError(msg)
|
230 |
+
|
231 |
+
dtype = pandas_dtype(dtype)
|
232 |
+
if isinstance(dtype, NumpyEADtype):
|
233 |
+
# Ensure we don't end up with a NumpyExtensionArray
|
234 |
+
dtype = dtype.numpy_dtype
|
235 |
+
|
236 |
+
try:
|
237 |
+
new_values = astype_array(values, dtype, copy=copy)
|
238 |
+
except (ValueError, TypeError):
|
239 |
+
# e.g. _astype_nansafe can fail on object-dtype of strings
|
240 |
+
# trying to convert to float
|
241 |
+
if errors == "ignore":
|
242 |
+
new_values = values
|
243 |
+
else:
|
244 |
+
raise
|
245 |
+
|
246 |
+
return new_values
|
247 |
+
|
248 |
+
|
249 |
+
def astype_is_view(dtype: DtypeObj, new_dtype: DtypeObj) -> bool:
|
250 |
+
"""Checks if astype avoided copying the data.
|
251 |
+
|
252 |
+
Parameters
|
253 |
+
----------
|
254 |
+
dtype : Original dtype
|
255 |
+
new_dtype : target dtype
|
256 |
+
|
257 |
+
Returns
|
258 |
+
-------
|
259 |
+
True if new data is a view or not guaranteed to be a copy, False otherwise
|
260 |
+
"""
|
261 |
+
if isinstance(dtype, np.dtype) and not isinstance(new_dtype, np.dtype):
|
262 |
+
new_dtype, dtype = dtype, new_dtype
|
263 |
+
|
264 |
+
if dtype == new_dtype:
|
265 |
+
return True
|
266 |
+
|
267 |
+
elif isinstance(dtype, np.dtype) and isinstance(new_dtype, np.dtype):
|
268 |
+
# Only equal numpy dtypes avoid a copy
|
269 |
+
return False
|
270 |
+
|
271 |
+
elif is_string_dtype(dtype) and is_string_dtype(new_dtype):
|
272 |
+
# Potentially! a view when converting from object to string
|
273 |
+
return True
|
274 |
+
|
275 |
+
elif is_object_dtype(dtype) and new_dtype.kind == "O":
|
276 |
+
# When the underlying array has dtype object, we don't have to make a copy
|
277 |
+
return True
|
278 |
+
|
279 |
+
elif dtype.kind in "mM" and new_dtype.kind in "mM":
|
280 |
+
dtype = getattr(dtype, "numpy_dtype", dtype)
|
281 |
+
new_dtype = getattr(new_dtype, "numpy_dtype", new_dtype)
|
282 |
+
return getattr(dtype, "unit", None) == getattr(new_dtype, "unit", None)
|
283 |
+
|
284 |
+
numpy_dtype = getattr(dtype, "numpy_dtype", None)
|
285 |
+
new_numpy_dtype = getattr(new_dtype, "numpy_dtype", None)
|
286 |
+
|
287 |
+
if numpy_dtype is None and isinstance(dtype, np.dtype):
|
288 |
+
numpy_dtype = dtype
|
289 |
+
|
290 |
+
if new_numpy_dtype is None and isinstance(new_dtype, np.dtype):
|
291 |
+
new_numpy_dtype = new_dtype
|
292 |
+
|
293 |
+
if numpy_dtype is not None and new_numpy_dtype is not None:
|
294 |
+
# if both have NumPy dtype or one of them is a numpy dtype
|
295 |
+
# they are only a view when the numpy dtypes are equal, e.g.
|
296 |
+
# int64 -> Int64 or int64[pyarrow]
|
297 |
+
# int64 -> Int32 copies
|
298 |
+
return numpy_dtype == new_numpy_dtype
|
299 |
+
|
300 |
+
# Assume this is a view since we don't know for sure if a copy was made
|
301 |
+
return True
|
llmeval-env/lib/python3.10/site-packages/pandas/core/dtypes/base.py
ADDED
@@ -0,0 +1,583 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
"""
|
2 |
+
Extend pandas with custom array types.
|
3 |
+
"""
|
4 |
+
from __future__ import annotations
|
5 |
+
|
6 |
+
from typing import (
|
7 |
+
TYPE_CHECKING,
|
8 |
+
Any,
|
9 |
+
TypeVar,
|
10 |
+
cast,
|
11 |
+
overload,
|
12 |
+
)
|
13 |
+
|
14 |
+
import numpy as np
|
15 |
+
|
16 |
+
from pandas._libs import missing as libmissing
|
17 |
+
from pandas._libs.hashtable import object_hash
|
18 |
+
from pandas._libs.properties import cache_readonly
|
19 |
+
from pandas.errors import AbstractMethodError
|
20 |
+
|
21 |
+
from pandas.core.dtypes.generic import (
|
22 |
+
ABCDataFrame,
|
23 |
+
ABCIndex,
|
24 |
+
ABCSeries,
|
25 |
+
)
|
26 |
+
|
27 |
+
if TYPE_CHECKING:
|
28 |
+
from pandas._typing import (
|
29 |
+
DtypeObj,
|
30 |
+
Self,
|
31 |
+
Shape,
|
32 |
+
npt,
|
33 |
+
type_t,
|
34 |
+
)
|
35 |
+
|
36 |
+
from pandas import Index
|
37 |
+
from pandas.core.arrays import ExtensionArray
|
38 |
+
|
39 |
+
# To parameterize on same ExtensionDtype
|
40 |
+
ExtensionDtypeT = TypeVar("ExtensionDtypeT", bound="ExtensionDtype")
|
41 |
+
|
42 |
+
|
43 |
+
class ExtensionDtype:
|
44 |
+
"""
|
45 |
+
A custom data type, to be paired with an ExtensionArray.
|
46 |
+
|
47 |
+
See Also
|
48 |
+
--------
|
49 |
+
extensions.register_extension_dtype: Register an ExtensionType
|
50 |
+
with pandas as class decorator.
|
51 |
+
extensions.ExtensionArray: Abstract base class for custom 1-D array types.
|
52 |
+
|
53 |
+
Notes
|
54 |
+
-----
|
55 |
+
The interface includes the following abstract methods that must
|
56 |
+
be implemented by subclasses:
|
57 |
+
|
58 |
+
* type
|
59 |
+
* name
|
60 |
+
* construct_array_type
|
61 |
+
|
62 |
+
The following attributes and methods influence the behavior of the dtype in
|
63 |
+
pandas operations
|
64 |
+
|
65 |
+
* _is_numeric
|
66 |
+
* _is_boolean
|
67 |
+
* _get_common_dtype
|
68 |
+
|
69 |
+
The `na_value` class attribute can be used to set the default NA value
|
70 |
+
for this type. :attr:`numpy.nan` is used by default.
|
71 |
+
|
72 |
+
ExtensionDtypes are required to be hashable. The base class provides
|
73 |
+
a default implementation, which relies on the ``_metadata`` class
|
74 |
+
attribute. ``_metadata`` should be a tuple containing the strings
|
75 |
+
that define your data type. For example, with ``PeriodDtype`` that's
|
76 |
+
the ``freq`` attribute.
|
77 |
+
|
78 |
+
**If you have a parametrized dtype you should set the ``_metadata``
|
79 |
+
class property**.
|
80 |
+
|
81 |
+
Ideally, the attributes in ``_metadata`` will match the
|
82 |
+
parameters to your ``ExtensionDtype.__init__`` (if any). If any of
|
83 |
+
the attributes in ``_metadata`` don't implement the standard
|
84 |
+
``__eq__`` or ``__hash__``, the default implementations here will not
|
85 |
+
work.
|
86 |
+
|
87 |
+
Examples
|
88 |
+
--------
|
89 |
+
|
90 |
+
For interaction with Apache Arrow (pyarrow), a ``__from_arrow__`` method
|
91 |
+
can be implemented: this method receives a pyarrow Array or ChunkedArray
|
92 |
+
as only argument and is expected to return the appropriate pandas
|
93 |
+
ExtensionArray for this dtype and the passed values:
|
94 |
+
|
95 |
+
>>> import pyarrow
|
96 |
+
>>> from pandas.api.extensions import ExtensionArray
|
97 |
+
>>> class ExtensionDtype:
|
98 |
+
... def __from_arrow__(
|
99 |
+
... self,
|
100 |
+
... array: pyarrow.Array | pyarrow.ChunkedArray
|
101 |
+
... ) -> ExtensionArray:
|
102 |
+
... ...
|
103 |
+
|
104 |
+
This class does not inherit from 'abc.ABCMeta' for performance reasons.
|
105 |
+
Methods and properties required by the interface raise
|
106 |
+
``pandas.errors.AbstractMethodError`` and no ``register`` method is
|
107 |
+
provided for registering virtual subclasses.
|
108 |
+
"""
|
109 |
+
|
110 |
+
_metadata: tuple[str, ...] = ()
|
111 |
+
|
112 |
+
def __str__(self) -> str:
|
113 |
+
return self.name
|
114 |
+
|
115 |
+
def __eq__(self, other: object) -> bool:
|
116 |
+
"""
|
117 |
+
Check whether 'other' is equal to self.
|
118 |
+
|
119 |
+
By default, 'other' is considered equal if either
|
120 |
+
|
121 |
+
* it's a string matching 'self.name'.
|
122 |
+
* it's an instance of this type and all of the attributes
|
123 |
+
in ``self._metadata`` are equal between `self` and `other`.
|
124 |
+
|
125 |
+
Parameters
|
126 |
+
----------
|
127 |
+
other : Any
|
128 |
+
|
129 |
+
Returns
|
130 |
+
-------
|
131 |
+
bool
|
132 |
+
"""
|
133 |
+
if isinstance(other, str):
|
134 |
+
try:
|
135 |
+
other = self.construct_from_string(other)
|
136 |
+
except TypeError:
|
137 |
+
return False
|
138 |
+
if isinstance(other, type(self)):
|
139 |
+
return all(
|
140 |
+
getattr(self, attr) == getattr(other, attr) for attr in self._metadata
|
141 |
+
)
|
142 |
+
return False
|
143 |
+
|
144 |
+
def __hash__(self) -> int:
|
145 |
+
# for python>=3.10, different nan objects have different hashes
|
146 |
+
# we need to avoid that and thus use hash function with old behavior
|
147 |
+
return object_hash(tuple(getattr(self, attr) for attr in self._metadata))
|
148 |
+
|
149 |
+
def __ne__(self, other: object) -> bool:
|
150 |
+
return not self.__eq__(other)
|
151 |
+
|
152 |
+
@property
|
153 |
+
def na_value(self) -> object:
|
154 |
+
"""
|
155 |
+
Default NA value to use for this type.
|
156 |
+
|
157 |
+
This is used in e.g. ExtensionArray.take. This should be the
|
158 |
+
user-facing "boxed" version of the NA value, not the physical NA value
|
159 |
+
for storage. e.g. for JSONArray, this is an empty dictionary.
|
160 |
+
"""
|
161 |
+
return np.nan
|
162 |
+
|
163 |
+
@property
|
164 |
+
def type(self) -> type_t[Any]:
|
165 |
+
"""
|
166 |
+
The scalar type for the array, e.g. ``int``
|
167 |
+
|
168 |
+
It's expected ``ExtensionArray[item]`` returns an instance
|
169 |
+
of ``ExtensionDtype.type`` for scalar ``item``, assuming
|
170 |
+
that value is valid (not NA). NA values do not need to be
|
171 |
+
instances of `type`.
|
172 |
+
"""
|
173 |
+
raise AbstractMethodError(self)
|
174 |
+
|
175 |
+
@property
|
176 |
+
def kind(self) -> str:
|
177 |
+
"""
|
178 |
+
A character code (one of 'biufcmMOSUV'), default 'O'
|
179 |
+
|
180 |
+
This should match the NumPy dtype used when the array is
|
181 |
+
converted to an ndarray, which is probably 'O' for object if
|
182 |
+
the extension type cannot be represented as a built-in NumPy
|
183 |
+
type.
|
184 |
+
|
185 |
+
See Also
|
186 |
+
--------
|
187 |
+
numpy.dtype.kind
|
188 |
+
"""
|
189 |
+
return "O"
|
190 |
+
|
191 |
+
@property
|
192 |
+
def name(self) -> str:
|
193 |
+
"""
|
194 |
+
A string identifying the data type.
|
195 |
+
|
196 |
+
Will be used for display in, e.g. ``Series.dtype``
|
197 |
+
"""
|
198 |
+
raise AbstractMethodError(self)
|
199 |
+
|
200 |
+
@property
|
201 |
+
def names(self) -> list[str] | None:
|
202 |
+
"""
|
203 |
+
Ordered list of field names, or None if there are no fields.
|
204 |
+
|
205 |
+
This is for compatibility with NumPy arrays, and may be removed in the
|
206 |
+
future.
|
207 |
+
"""
|
208 |
+
return None
|
209 |
+
|
210 |
+
@classmethod
|
211 |
+
def construct_array_type(cls) -> type_t[ExtensionArray]:
|
212 |
+
"""
|
213 |
+
Return the array type associated with this dtype.
|
214 |
+
|
215 |
+
Returns
|
216 |
+
-------
|
217 |
+
type
|
218 |
+
"""
|
219 |
+
raise AbstractMethodError(cls)
|
220 |
+
|
221 |
+
def empty(self, shape: Shape) -> ExtensionArray:
|
222 |
+
"""
|
223 |
+
Construct an ExtensionArray of this dtype with the given shape.
|
224 |
+
|
225 |
+
Analogous to numpy.empty.
|
226 |
+
|
227 |
+
Parameters
|
228 |
+
----------
|
229 |
+
shape : int or tuple[int]
|
230 |
+
|
231 |
+
Returns
|
232 |
+
-------
|
233 |
+
ExtensionArray
|
234 |
+
"""
|
235 |
+
cls = self.construct_array_type()
|
236 |
+
return cls._empty(shape, dtype=self)
|
237 |
+
|
238 |
+
@classmethod
|
239 |
+
def construct_from_string(cls, string: str) -> Self:
|
240 |
+
r"""
|
241 |
+
Construct this type from a string.
|
242 |
+
|
243 |
+
This is useful mainly for data types that accept parameters.
|
244 |
+
For example, a period dtype accepts a frequency parameter that
|
245 |
+
can be set as ``period[h]`` (where H means hourly frequency).
|
246 |
+
|
247 |
+
By default, in the abstract class, just the name of the type is
|
248 |
+
expected. But subclasses can overwrite this method to accept
|
249 |
+
parameters.
|
250 |
+
|
251 |
+
Parameters
|
252 |
+
----------
|
253 |
+
string : str
|
254 |
+
The name of the type, for example ``category``.
|
255 |
+
|
256 |
+
Returns
|
257 |
+
-------
|
258 |
+
ExtensionDtype
|
259 |
+
Instance of the dtype.
|
260 |
+
|
261 |
+
Raises
|
262 |
+
------
|
263 |
+
TypeError
|
264 |
+
If a class cannot be constructed from this 'string'.
|
265 |
+
|
266 |
+
Examples
|
267 |
+
--------
|
268 |
+
For extension dtypes with arguments the following may be an
|
269 |
+
adequate implementation.
|
270 |
+
|
271 |
+
>>> import re
|
272 |
+
>>> @classmethod
|
273 |
+
... def construct_from_string(cls, string):
|
274 |
+
... pattern = re.compile(r"^my_type\[(?P<arg_name>.+)\]$")
|
275 |
+
... match = pattern.match(string)
|
276 |
+
... if match:
|
277 |
+
... return cls(**match.groupdict())
|
278 |
+
... else:
|
279 |
+
... raise TypeError(
|
280 |
+
... f"Cannot construct a '{cls.__name__}' from '{string}'"
|
281 |
+
... )
|
282 |
+
"""
|
283 |
+
if not isinstance(string, str):
|
284 |
+
raise TypeError(
|
285 |
+
f"'construct_from_string' expects a string, got {type(string)}"
|
286 |
+
)
|
287 |
+
# error: Non-overlapping equality check (left operand type: "str", right
|
288 |
+
# operand type: "Callable[[ExtensionDtype], str]") [comparison-overlap]
|
289 |
+
assert isinstance(cls.name, str), (cls, type(cls.name))
|
290 |
+
if string != cls.name:
|
291 |
+
raise TypeError(f"Cannot construct a '{cls.__name__}' from '{string}'")
|
292 |
+
return cls()
|
293 |
+
|
294 |
+
@classmethod
|
295 |
+
def is_dtype(cls, dtype: object) -> bool:
|
296 |
+
"""
|
297 |
+
Check if we match 'dtype'.
|
298 |
+
|
299 |
+
Parameters
|
300 |
+
----------
|
301 |
+
dtype : object
|
302 |
+
The object to check.
|
303 |
+
|
304 |
+
Returns
|
305 |
+
-------
|
306 |
+
bool
|
307 |
+
|
308 |
+
Notes
|
309 |
+
-----
|
310 |
+
The default implementation is True if
|
311 |
+
|
312 |
+
1. ``cls.construct_from_string(dtype)`` is an instance
|
313 |
+
of ``cls``.
|
314 |
+
2. ``dtype`` is an object and is an instance of ``cls``
|
315 |
+
3. ``dtype`` has a ``dtype`` attribute, and any of the above
|
316 |
+
conditions is true for ``dtype.dtype``.
|
317 |
+
"""
|
318 |
+
dtype = getattr(dtype, "dtype", dtype)
|
319 |
+
|
320 |
+
if isinstance(dtype, (ABCSeries, ABCIndex, ABCDataFrame, np.dtype)):
|
321 |
+
# https://github.com/pandas-dev/pandas/issues/22960
|
322 |
+
# avoid passing data to `construct_from_string`. This could
|
323 |
+
# cause a FutureWarning from numpy about failing elementwise
|
324 |
+
# comparison from, e.g., comparing DataFrame == 'category'.
|
325 |
+
return False
|
326 |
+
elif dtype is None:
|
327 |
+
return False
|
328 |
+
elif isinstance(dtype, cls):
|
329 |
+
return True
|
330 |
+
if isinstance(dtype, str):
|
331 |
+
try:
|
332 |
+
return cls.construct_from_string(dtype) is not None
|
333 |
+
except TypeError:
|
334 |
+
return False
|
335 |
+
return False
|
336 |
+
|
337 |
+
@property
|
338 |
+
def _is_numeric(self) -> bool:
|
339 |
+
"""
|
340 |
+
Whether columns with this dtype should be considered numeric.
|
341 |
+
|
342 |
+
By default ExtensionDtypes are assumed to be non-numeric.
|
343 |
+
They'll be excluded from operations that exclude non-numeric
|
344 |
+
columns, like (groupby) reductions, plotting, etc.
|
345 |
+
"""
|
346 |
+
return False
|
347 |
+
|
348 |
+
@property
|
349 |
+
def _is_boolean(self) -> bool:
|
350 |
+
"""
|
351 |
+
Whether this dtype should be considered boolean.
|
352 |
+
|
353 |
+
By default, ExtensionDtypes are assumed to be non-numeric.
|
354 |
+
Setting this to True will affect the behavior of several places,
|
355 |
+
e.g.
|
356 |
+
|
357 |
+
* is_bool
|
358 |
+
* boolean indexing
|
359 |
+
|
360 |
+
Returns
|
361 |
+
-------
|
362 |
+
bool
|
363 |
+
"""
|
364 |
+
return False
|
365 |
+
|
366 |
+
def _get_common_dtype(self, dtypes: list[DtypeObj]) -> DtypeObj | None:
|
367 |
+
"""
|
368 |
+
Return the common dtype, if one exists.
|
369 |
+
|
370 |
+
Used in `find_common_type` implementation. This is for example used
|
371 |
+
to determine the resulting dtype in a concat operation.
|
372 |
+
|
373 |
+
If no common dtype exists, return None (which gives the other dtypes
|
374 |
+
the chance to determine a common dtype). If all dtypes in the list
|
375 |
+
return None, then the common dtype will be "object" dtype (this means
|
376 |
+
it is never needed to return "object" dtype from this method itself).
|
377 |
+
|
378 |
+
Parameters
|
379 |
+
----------
|
380 |
+
dtypes : list of dtypes
|
381 |
+
The dtypes for which to determine a common dtype. This is a list
|
382 |
+
of np.dtype or ExtensionDtype instances.
|
383 |
+
|
384 |
+
Returns
|
385 |
+
-------
|
386 |
+
Common dtype (np.dtype or ExtensionDtype) or None
|
387 |
+
"""
|
388 |
+
if len(set(dtypes)) == 1:
|
389 |
+
# only itself
|
390 |
+
return self
|
391 |
+
else:
|
392 |
+
return None
|
393 |
+
|
394 |
+
@property
|
395 |
+
def _can_hold_na(self) -> bool:
|
396 |
+
"""
|
397 |
+
Can arrays of this dtype hold NA values?
|
398 |
+
"""
|
399 |
+
return True
|
400 |
+
|
401 |
+
@property
|
402 |
+
def _is_immutable(self) -> bool:
|
403 |
+
"""
|
404 |
+
Can arrays with this dtype be modified with __setitem__? If not, return
|
405 |
+
True.
|
406 |
+
|
407 |
+
Immutable arrays are expected to raise TypeError on __setitem__ calls.
|
408 |
+
"""
|
409 |
+
return False
|
410 |
+
|
411 |
+
@cache_readonly
|
412 |
+
def index_class(self) -> type_t[Index]:
|
413 |
+
"""
|
414 |
+
The Index subclass to return from Index.__new__ when this dtype is
|
415 |
+
encountered.
|
416 |
+
"""
|
417 |
+
from pandas import Index
|
418 |
+
|
419 |
+
return Index
|
420 |
+
|
421 |
+
@property
|
422 |
+
def _supports_2d(self) -> bool:
|
423 |
+
"""
|
424 |
+
Do ExtensionArrays with this dtype support 2D arrays?
|
425 |
+
|
426 |
+
Historically ExtensionArrays were limited to 1D. By returning True here,
|
427 |
+
authors can indicate that their arrays support 2D instances. This can
|
428 |
+
improve performance in some cases, particularly operations with `axis=1`.
|
429 |
+
|
430 |
+
Arrays that support 2D values should:
|
431 |
+
|
432 |
+
- implement Array.reshape
|
433 |
+
- subclass the Dim2CompatTests in tests.extension.base
|
434 |
+
- _concat_same_type should support `axis` keyword
|
435 |
+
- _reduce and reductions should support `axis` keyword
|
436 |
+
"""
|
437 |
+
return False
|
438 |
+
|
439 |
+
@property
|
440 |
+
def _can_fast_transpose(self) -> bool:
|
441 |
+
"""
|
442 |
+
Is transposing an array with this dtype zero-copy?
|
443 |
+
|
444 |
+
Only relevant for cases where _supports_2d is True.
|
445 |
+
"""
|
446 |
+
return False
|
447 |
+
|
448 |
+
|
449 |
+
class StorageExtensionDtype(ExtensionDtype):
|
450 |
+
"""ExtensionDtype that may be backed by more than one implementation."""
|
451 |
+
|
452 |
+
name: str
|
453 |
+
_metadata = ("storage",)
|
454 |
+
|
455 |
+
def __init__(self, storage: str | None = None) -> None:
|
456 |
+
self.storage = storage
|
457 |
+
|
458 |
+
def __repr__(self) -> str:
|
459 |
+
return f"{self.name}[{self.storage}]"
|
460 |
+
|
461 |
+
def __str__(self) -> str:
|
462 |
+
return self.name
|
463 |
+
|
464 |
+
def __eq__(self, other: object) -> bool:
|
465 |
+
if isinstance(other, str) and other == self.name:
|
466 |
+
return True
|
467 |
+
return super().__eq__(other)
|
468 |
+
|
469 |
+
def __hash__(self) -> int:
|
470 |
+
# custom __eq__ so have to override __hash__
|
471 |
+
return super().__hash__()
|
472 |
+
|
473 |
+
@property
|
474 |
+
def na_value(self) -> libmissing.NAType:
|
475 |
+
return libmissing.NA
|
476 |
+
|
477 |
+
|
478 |
+
def register_extension_dtype(cls: type_t[ExtensionDtypeT]) -> type_t[ExtensionDtypeT]:
|
479 |
+
"""
|
480 |
+
Register an ExtensionType with pandas as class decorator.
|
481 |
+
|
482 |
+
This enables operations like ``.astype(name)`` for the name
|
483 |
+
of the ExtensionDtype.
|
484 |
+
|
485 |
+
Returns
|
486 |
+
-------
|
487 |
+
callable
|
488 |
+
A class decorator.
|
489 |
+
|
490 |
+
Examples
|
491 |
+
--------
|
492 |
+
>>> from pandas.api.extensions import register_extension_dtype, ExtensionDtype
|
493 |
+
>>> @register_extension_dtype
|
494 |
+
... class MyExtensionDtype(ExtensionDtype):
|
495 |
+
... name = "myextension"
|
496 |
+
"""
|
497 |
+
_registry.register(cls)
|
498 |
+
return cls
|
499 |
+
|
500 |
+
|
501 |
+
class Registry:
|
502 |
+
"""
|
503 |
+
Registry for dtype inference.
|
504 |
+
|
505 |
+
The registry allows one to map a string repr of a extension
|
506 |
+
dtype to an extension dtype. The string alias can be used in several
|
507 |
+
places, including
|
508 |
+
|
509 |
+
* Series and Index constructors
|
510 |
+
* :meth:`pandas.array`
|
511 |
+
* :meth:`pandas.Series.astype`
|
512 |
+
|
513 |
+
Multiple extension types can be registered.
|
514 |
+
These are tried in order.
|
515 |
+
"""
|
516 |
+
|
517 |
+
def __init__(self) -> None:
|
518 |
+
self.dtypes: list[type_t[ExtensionDtype]] = []
|
519 |
+
|
520 |
+
def register(self, dtype: type_t[ExtensionDtype]) -> None:
|
521 |
+
"""
|
522 |
+
Parameters
|
523 |
+
----------
|
524 |
+
dtype : ExtensionDtype class
|
525 |
+
"""
|
526 |
+
if not issubclass(dtype, ExtensionDtype):
|
527 |
+
raise ValueError("can only register pandas extension dtypes")
|
528 |
+
|
529 |
+
self.dtypes.append(dtype)
|
530 |
+
|
531 |
+
@overload
|
532 |
+
def find(self, dtype: type_t[ExtensionDtypeT]) -> type_t[ExtensionDtypeT]:
|
533 |
+
...
|
534 |
+
|
535 |
+
@overload
|
536 |
+
def find(self, dtype: ExtensionDtypeT) -> ExtensionDtypeT:
|
537 |
+
...
|
538 |
+
|
539 |
+
@overload
|
540 |
+
def find(self, dtype: str) -> ExtensionDtype | None:
|
541 |
+
...
|
542 |
+
|
543 |
+
@overload
|
544 |
+
def find(
|
545 |
+
self, dtype: npt.DTypeLike
|
546 |
+
) -> type_t[ExtensionDtype] | ExtensionDtype | None:
|
547 |
+
...
|
548 |
+
|
549 |
+
def find(
|
550 |
+
self, dtype: type_t[ExtensionDtype] | ExtensionDtype | npt.DTypeLike
|
551 |
+
) -> type_t[ExtensionDtype] | ExtensionDtype | None:
|
552 |
+
"""
|
553 |
+
Parameters
|
554 |
+
----------
|
555 |
+
dtype : ExtensionDtype class or instance or str or numpy dtype or python type
|
556 |
+
|
557 |
+
Returns
|
558 |
+
-------
|
559 |
+
return the first matching dtype, otherwise return None
|
560 |
+
"""
|
561 |
+
if not isinstance(dtype, str):
|
562 |
+
dtype_type: type_t
|
563 |
+
if not isinstance(dtype, type):
|
564 |
+
dtype_type = type(dtype)
|
565 |
+
else:
|
566 |
+
dtype_type = dtype
|
567 |
+
if issubclass(dtype_type, ExtensionDtype):
|
568 |
+
# cast needed here as mypy doesn't know we have figured
|
569 |
+
# out it is an ExtensionDtype or type_t[ExtensionDtype]
|
570 |
+
return cast("ExtensionDtype | type_t[ExtensionDtype]", dtype)
|
571 |
+
|
572 |
+
return None
|
573 |
+
|
574 |
+
for dtype_type in self.dtypes:
|
575 |
+
try:
|
576 |
+
return dtype_type.construct_from_string(dtype)
|
577 |
+
except TypeError:
|
578 |
+
pass
|
579 |
+
|
580 |
+
return None
|
581 |
+
|
582 |
+
|
583 |
+
_registry = Registry()
|
llmeval-env/lib/python3.10/site-packages/pandas/core/dtypes/cast.py
ADDED
@@ -0,0 +1,1973 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
"""
|
2 |
+
Routines for casting.
|
3 |
+
"""
|
4 |
+
|
5 |
+
from __future__ import annotations
|
6 |
+
|
7 |
+
import datetime as dt
|
8 |
+
import functools
|
9 |
+
from typing import (
|
10 |
+
TYPE_CHECKING,
|
11 |
+
Any,
|
12 |
+
Literal,
|
13 |
+
TypeVar,
|
14 |
+
cast,
|
15 |
+
overload,
|
16 |
+
)
|
17 |
+
import warnings
|
18 |
+
|
19 |
+
import numpy as np
|
20 |
+
|
21 |
+
from pandas._config import using_pyarrow_string_dtype
|
22 |
+
|
23 |
+
from pandas._libs import (
|
24 |
+
Interval,
|
25 |
+
Period,
|
26 |
+
lib,
|
27 |
+
)
|
28 |
+
from pandas._libs.missing import (
|
29 |
+
NA,
|
30 |
+
NAType,
|
31 |
+
checknull,
|
32 |
+
)
|
33 |
+
from pandas._libs.tslibs import (
|
34 |
+
NaT,
|
35 |
+
OutOfBoundsDatetime,
|
36 |
+
OutOfBoundsTimedelta,
|
37 |
+
Timedelta,
|
38 |
+
Timestamp,
|
39 |
+
is_supported_dtype,
|
40 |
+
)
|
41 |
+
from pandas._libs.tslibs.timedeltas import array_to_timedelta64
|
42 |
+
from pandas.compat.numpy import np_version_gt2
|
43 |
+
from pandas.errors import (
|
44 |
+
IntCastingNaNError,
|
45 |
+
LossySetitemError,
|
46 |
+
)
|
47 |
+
|
48 |
+
from pandas.core.dtypes.common import (
|
49 |
+
ensure_int8,
|
50 |
+
ensure_int16,
|
51 |
+
ensure_int32,
|
52 |
+
ensure_int64,
|
53 |
+
ensure_object,
|
54 |
+
ensure_str,
|
55 |
+
is_bool,
|
56 |
+
is_complex,
|
57 |
+
is_float,
|
58 |
+
is_integer,
|
59 |
+
is_object_dtype,
|
60 |
+
is_scalar,
|
61 |
+
is_string_dtype,
|
62 |
+
pandas_dtype as pandas_dtype_func,
|
63 |
+
)
|
64 |
+
from pandas.core.dtypes.dtypes import (
|
65 |
+
ArrowDtype,
|
66 |
+
BaseMaskedDtype,
|
67 |
+
CategoricalDtype,
|
68 |
+
DatetimeTZDtype,
|
69 |
+
ExtensionDtype,
|
70 |
+
IntervalDtype,
|
71 |
+
PandasExtensionDtype,
|
72 |
+
PeriodDtype,
|
73 |
+
)
|
74 |
+
from pandas.core.dtypes.generic import (
|
75 |
+
ABCExtensionArray,
|
76 |
+
ABCIndex,
|
77 |
+
ABCSeries,
|
78 |
+
)
|
79 |
+
from pandas.core.dtypes.inference import is_list_like
|
80 |
+
from pandas.core.dtypes.missing import (
|
81 |
+
is_valid_na_for_dtype,
|
82 |
+
isna,
|
83 |
+
na_value_for_dtype,
|
84 |
+
notna,
|
85 |
+
)
|
86 |
+
|
87 |
+
from pandas.io._util import _arrow_dtype_mapping
|
88 |
+
|
89 |
+
if TYPE_CHECKING:
|
90 |
+
from collections.abc import (
|
91 |
+
Sequence,
|
92 |
+
Sized,
|
93 |
+
)
|
94 |
+
|
95 |
+
from pandas._typing import (
|
96 |
+
ArrayLike,
|
97 |
+
Dtype,
|
98 |
+
DtypeObj,
|
99 |
+
NumpyIndexT,
|
100 |
+
Scalar,
|
101 |
+
npt,
|
102 |
+
)
|
103 |
+
|
104 |
+
from pandas import Index
|
105 |
+
from pandas.core.arrays import (
|
106 |
+
Categorical,
|
107 |
+
DatetimeArray,
|
108 |
+
ExtensionArray,
|
109 |
+
IntervalArray,
|
110 |
+
PeriodArray,
|
111 |
+
TimedeltaArray,
|
112 |
+
)
|
113 |
+
|
114 |
+
|
115 |
+
_int8_max = np.iinfo(np.int8).max
|
116 |
+
_int16_max = np.iinfo(np.int16).max
|
117 |
+
_int32_max = np.iinfo(np.int32).max
|
118 |
+
|
119 |
+
_dtype_obj = np.dtype(object)
|
120 |
+
|
121 |
+
NumpyArrayT = TypeVar("NumpyArrayT", bound=np.ndarray)
|
122 |
+
|
123 |
+
|
124 |
+
def maybe_convert_platform(
|
125 |
+
values: list | tuple | range | np.ndarray | ExtensionArray,
|
126 |
+
) -> ArrayLike:
|
127 |
+
"""try to do platform conversion, allow ndarray or list here"""
|
128 |
+
arr: ArrayLike
|
129 |
+
|
130 |
+
if isinstance(values, (list, tuple, range)):
|
131 |
+
arr = construct_1d_object_array_from_listlike(values)
|
132 |
+
else:
|
133 |
+
# The caller is responsible for ensuring that we have np.ndarray
|
134 |
+
# or ExtensionArray here.
|
135 |
+
arr = values
|
136 |
+
|
137 |
+
if arr.dtype == _dtype_obj:
|
138 |
+
arr = cast(np.ndarray, arr)
|
139 |
+
arr = lib.maybe_convert_objects(arr)
|
140 |
+
|
141 |
+
return arr
|
142 |
+
|
143 |
+
|
144 |
+
def is_nested_object(obj) -> bool:
|
145 |
+
"""
|
146 |
+
return a boolean if we have a nested object, e.g. a Series with 1 or
|
147 |
+
more Series elements
|
148 |
+
|
149 |
+
This may not be necessarily be performant.
|
150 |
+
|
151 |
+
"""
|
152 |
+
return bool(
|
153 |
+
isinstance(obj, ABCSeries)
|
154 |
+
and is_object_dtype(obj.dtype)
|
155 |
+
and any(isinstance(v, ABCSeries) for v in obj._values)
|
156 |
+
)
|
157 |
+
|
158 |
+
|
159 |
+
def maybe_box_datetimelike(value: Scalar, dtype: Dtype | None = None) -> Scalar:
|
160 |
+
"""
|
161 |
+
Cast scalar to Timestamp or Timedelta if scalar is datetime-like
|
162 |
+
and dtype is not object.
|
163 |
+
|
164 |
+
Parameters
|
165 |
+
----------
|
166 |
+
value : scalar
|
167 |
+
dtype : Dtype, optional
|
168 |
+
|
169 |
+
Returns
|
170 |
+
-------
|
171 |
+
scalar
|
172 |
+
"""
|
173 |
+
if dtype == _dtype_obj:
|
174 |
+
pass
|
175 |
+
elif isinstance(value, (np.datetime64, dt.datetime)):
|
176 |
+
value = Timestamp(value)
|
177 |
+
elif isinstance(value, (np.timedelta64, dt.timedelta)):
|
178 |
+
value = Timedelta(value)
|
179 |
+
|
180 |
+
return value
|
181 |
+
|
182 |
+
|
183 |
+
def maybe_box_native(value: Scalar | None | NAType) -> Scalar | None | NAType:
|
184 |
+
"""
|
185 |
+
If passed a scalar cast the scalar to a python native type.
|
186 |
+
|
187 |
+
Parameters
|
188 |
+
----------
|
189 |
+
value : scalar or Series
|
190 |
+
|
191 |
+
Returns
|
192 |
+
-------
|
193 |
+
scalar or Series
|
194 |
+
"""
|
195 |
+
if is_float(value):
|
196 |
+
value = float(value)
|
197 |
+
elif is_integer(value):
|
198 |
+
value = int(value)
|
199 |
+
elif is_bool(value):
|
200 |
+
value = bool(value)
|
201 |
+
elif isinstance(value, (np.datetime64, np.timedelta64)):
|
202 |
+
value = maybe_box_datetimelike(value)
|
203 |
+
elif value is NA:
|
204 |
+
value = None
|
205 |
+
return value
|
206 |
+
|
207 |
+
|
208 |
+
def _maybe_unbox_datetimelike(value: Scalar, dtype: DtypeObj) -> Scalar:
|
209 |
+
"""
|
210 |
+
Convert a Timedelta or Timestamp to timedelta64 or datetime64 for setting
|
211 |
+
into a numpy array. Failing to unbox would risk dropping nanoseconds.
|
212 |
+
|
213 |
+
Notes
|
214 |
+
-----
|
215 |
+
Caller is responsible for checking dtype.kind in "mM"
|
216 |
+
"""
|
217 |
+
if is_valid_na_for_dtype(value, dtype):
|
218 |
+
# GH#36541: can't fill array directly with pd.NaT
|
219 |
+
# > np.empty(10, dtype="datetime64[ns]").fill(pd.NaT)
|
220 |
+
# ValueError: cannot convert float NaN to integer
|
221 |
+
value = dtype.type("NaT", "ns")
|
222 |
+
elif isinstance(value, Timestamp):
|
223 |
+
if value.tz is None:
|
224 |
+
value = value.to_datetime64()
|
225 |
+
elif not isinstance(dtype, DatetimeTZDtype):
|
226 |
+
raise TypeError("Cannot unbox tzaware Timestamp to tznaive dtype")
|
227 |
+
elif isinstance(value, Timedelta):
|
228 |
+
value = value.to_timedelta64()
|
229 |
+
|
230 |
+
_disallow_mismatched_datetimelike(value, dtype)
|
231 |
+
return value
|
232 |
+
|
233 |
+
|
234 |
+
def _disallow_mismatched_datetimelike(value, dtype: DtypeObj):
|
235 |
+
"""
|
236 |
+
numpy allows np.array(dt64values, dtype="timedelta64[ns]") and
|
237 |
+
vice-versa, but we do not want to allow this, so we need to
|
238 |
+
check explicitly
|
239 |
+
"""
|
240 |
+
vdtype = getattr(value, "dtype", None)
|
241 |
+
if vdtype is None:
|
242 |
+
return
|
243 |
+
elif (vdtype.kind == "m" and dtype.kind == "M") or (
|
244 |
+
vdtype.kind == "M" and dtype.kind == "m"
|
245 |
+
):
|
246 |
+
raise TypeError(f"Cannot cast {repr(value)} to {dtype}")
|
247 |
+
|
248 |
+
|
249 |
+
@overload
|
250 |
+
def maybe_downcast_to_dtype(result: np.ndarray, dtype: str | np.dtype) -> np.ndarray:
|
251 |
+
...
|
252 |
+
|
253 |
+
|
254 |
+
@overload
|
255 |
+
def maybe_downcast_to_dtype(result: ExtensionArray, dtype: str | np.dtype) -> ArrayLike:
|
256 |
+
...
|
257 |
+
|
258 |
+
|
259 |
+
def maybe_downcast_to_dtype(result: ArrayLike, dtype: str | np.dtype) -> ArrayLike:
|
260 |
+
"""
|
261 |
+
try to cast to the specified dtype (e.g. convert back to bool/int
|
262 |
+
or could be an astype of float64->float32
|
263 |
+
"""
|
264 |
+
if isinstance(result, ABCSeries):
|
265 |
+
result = result._values
|
266 |
+
do_round = False
|
267 |
+
|
268 |
+
if isinstance(dtype, str):
|
269 |
+
if dtype == "infer":
|
270 |
+
inferred_type = lib.infer_dtype(result, skipna=False)
|
271 |
+
if inferred_type == "boolean":
|
272 |
+
dtype = "bool"
|
273 |
+
elif inferred_type == "integer":
|
274 |
+
dtype = "int64"
|
275 |
+
elif inferred_type == "datetime64":
|
276 |
+
dtype = "datetime64[ns]"
|
277 |
+
elif inferred_type in ["timedelta", "timedelta64"]:
|
278 |
+
dtype = "timedelta64[ns]"
|
279 |
+
|
280 |
+
# try to upcast here
|
281 |
+
elif inferred_type == "floating":
|
282 |
+
dtype = "int64"
|
283 |
+
if issubclass(result.dtype.type, np.number):
|
284 |
+
do_round = True
|
285 |
+
|
286 |
+
else:
|
287 |
+
# TODO: complex? what if result is already non-object?
|
288 |
+
dtype = "object"
|
289 |
+
|
290 |
+
dtype = np.dtype(dtype)
|
291 |
+
|
292 |
+
if not isinstance(dtype, np.dtype):
|
293 |
+
# enforce our signature annotation
|
294 |
+
raise TypeError(dtype) # pragma: no cover
|
295 |
+
|
296 |
+
converted = maybe_downcast_numeric(result, dtype, do_round)
|
297 |
+
if converted is not result:
|
298 |
+
return converted
|
299 |
+
|
300 |
+
# a datetimelike
|
301 |
+
# GH12821, iNaT is cast to float
|
302 |
+
if dtype.kind in "mM" and result.dtype.kind in "if":
|
303 |
+
result = result.astype(dtype)
|
304 |
+
|
305 |
+
elif dtype.kind == "m" and result.dtype == _dtype_obj:
|
306 |
+
# test_where_downcast_to_td64
|
307 |
+
result = cast(np.ndarray, result)
|
308 |
+
result = array_to_timedelta64(result)
|
309 |
+
|
310 |
+
elif dtype == np.dtype("M8[ns]") and result.dtype == _dtype_obj:
|
311 |
+
result = cast(np.ndarray, result)
|
312 |
+
return np.asarray(maybe_cast_to_datetime(result, dtype=dtype))
|
313 |
+
|
314 |
+
return result
|
315 |
+
|
316 |
+
|
317 |
+
@overload
|
318 |
+
def maybe_downcast_numeric(
|
319 |
+
result: np.ndarray, dtype: np.dtype, do_round: bool = False
|
320 |
+
) -> np.ndarray:
|
321 |
+
...
|
322 |
+
|
323 |
+
|
324 |
+
@overload
|
325 |
+
def maybe_downcast_numeric(
|
326 |
+
result: ExtensionArray, dtype: DtypeObj, do_round: bool = False
|
327 |
+
) -> ArrayLike:
|
328 |
+
...
|
329 |
+
|
330 |
+
|
331 |
+
def maybe_downcast_numeric(
|
332 |
+
result: ArrayLike, dtype: DtypeObj, do_round: bool = False
|
333 |
+
) -> ArrayLike:
|
334 |
+
"""
|
335 |
+
Subset of maybe_downcast_to_dtype restricted to numeric dtypes.
|
336 |
+
|
337 |
+
Parameters
|
338 |
+
----------
|
339 |
+
result : ndarray or ExtensionArray
|
340 |
+
dtype : np.dtype or ExtensionDtype
|
341 |
+
do_round : bool
|
342 |
+
|
343 |
+
Returns
|
344 |
+
-------
|
345 |
+
ndarray or ExtensionArray
|
346 |
+
"""
|
347 |
+
if not isinstance(dtype, np.dtype) or not isinstance(result.dtype, np.dtype):
|
348 |
+
# e.g. SparseDtype has no itemsize attr
|
349 |
+
return result
|
350 |
+
|
351 |
+
def trans(x):
|
352 |
+
if do_round:
|
353 |
+
return x.round()
|
354 |
+
return x
|
355 |
+
|
356 |
+
if dtype.kind == result.dtype.kind:
|
357 |
+
# don't allow upcasts here (except if empty)
|
358 |
+
if result.dtype.itemsize <= dtype.itemsize and result.size:
|
359 |
+
return result
|
360 |
+
|
361 |
+
if dtype.kind in "biu":
|
362 |
+
if not result.size:
|
363 |
+
# if we don't have any elements, just astype it
|
364 |
+
return trans(result).astype(dtype)
|
365 |
+
|
366 |
+
if isinstance(result, np.ndarray):
|
367 |
+
element = result.item(0)
|
368 |
+
else:
|
369 |
+
element = result.iloc[0]
|
370 |
+
if not isinstance(element, (np.integer, np.floating, int, float, bool)):
|
371 |
+
# a comparable, e.g. a Decimal may slip in here
|
372 |
+
return result
|
373 |
+
|
374 |
+
if (
|
375 |
+
issubclass(result.dtype.type, (np.object_, np.number))
|
376 |
+
and notna(result).all()
|
377 |
+
):
|
378 |
+
new_result = trans(result).astype(dtype)
|
379 |
+
if new_result.dtype.kind == "O" or result.dtype.kind == "O":
|
380 |
+
# np.allclose may raise TypeError on object-dtype
|
381 |
+
if (new_result == result).all():
|
382 |
+
return new_result
|
383 |
+
else:
|
384 |
+
if np.allclose(new_result, result, rtol=0):
|
385 |
+
return new_result
|
386 |
+
|
387 |
+
elif (
|
388 |
+
issubclass(dtype.type, np.floating)
|
389 |
+
and result.dtype.kind != "b"
|
390 |
+
and not is_string_dtype(result.dtype)
|
391 |
+
):
|
392 |
+
with warnings.catch_warnings():
|
393 |
+
warnings.filterwarnings(
|
394 |
+
"ignore", "overflow encountered in cast", RuntimeWarning
|
395 |
+
)
|
396 |
+
new_result = result.astype(dtype)
|
397 |
+
|
398 |
+
# Adjust tolerances based on floating point size
|
399 |
+
size_tols = {4: 5e-4, 8: 5e-8, 16: 5e-16}
|
400 |
+
|
401 |
+
atol = size_tols.get(new_result.dtype.itemsize, 0.0)
|
402 |
+
|
403 |
+
# Check downcast float values are still equal within 7 digits when
|
404 |
+
# converting from float64 to float32
|
405 |
+
if np.allclose(new_result, result, equal_nan=True, rtol=0.0, atol=atol):
|
406 |
+
return new_result
|
407 |
+
|
408 |
+
elif dtype.kind == result.dtype.kind == "c":
|
409 |
+
new_result = result.astype(dtype)
|
410 |
+
|
411 |
+
if np.array_equal(new_result, result, equal_nan=True):
|
412 |
+
# TODO: use tolerance like we do for float?
|
413 |
+
return new_result
|
414 |
+
|
415 |
+
return result
|
416 |
+
|
417 |
+
|
418 |
+
def maybe_upcast_numeric_to_64bit(arr: NumpyIndexT) -> NumpyIndexT:
|
419 |
+
"""
|
420 |
+
If array is a int/uint/float bit size lower than 64 bit, upcast it to 64 bit.
|
421 |
+
|
422 |
+
Parameters
|
423 |
+
----------
|
424 |
+
arr : ndarray or ExtensionArray
|
425 |
+
|
426 |
+
Returns
|
427 |
+
-------
|
428 |
+
ndarray or ExtensionArray
|
429 |
+
"""
|
430 |
+
dtype = arr.dtype
|
431 |
+
if dtype.kind == "i" and dtype != np.int64:
|
432 |
+
return arr.astype(np.int64)
|
433 |
+
elif dtype.kind == "u" and dtype != np.uint64:
|
434 |
+
return arr.astype(np.uint64)
|
435 |
+
elif dtype.kind == "f" and dtype != np.float64:
|
436 |
+
return arr.astype(np.float64)
|
437 |
+
else:
|
438 |
+
return arr
|
439 |
+
|
440 |
+
|
441 |
+
def maybe_cast_pointwise_result(
|
442 |
+
result: ArrayLike,
|
443 |
+
dtype: DtypeObj,
|
444 |
+
numeric_only: bool = False,
|
445 |
+
same_dtype: bool = True,
|
446 |
+
) -> ArrayLike:
|
447 |
+
"""
|
448 |
+
Try casting result of a pointwise operation back to the original dtype if
|
449 |
+
appropriate.
|
450 |
+
|
451 |
+
Parameters
|
452 |
+
----------
|
453 |
+
result : array-like
|
454 |
+
Result to cast.
|
455 |
+
dtype : np.dtype or ExtensionDtype
|
456 |
+
Input Series from which result was calculated.
|
457 |
+
numeric_only : bool, default False
|
458 |
+
Whether to cast only numerics or datetimes as well.
|
459 |
+
same_dtype : bool, default True
|
460 |
+
Specify dtype when calling _from_sequence
|
461 |
+
|
462 |
+
Returns
|
463 |
+
-------
|
464 |
+
result : array-like
|
465 |
+
result maybe casted to the dtype.
|
466 |
+
"""
|
467 |
+
|
468 |
+
if isinstance(dtype, ExtensionDtype):
|
469 |
+
cls = dtype.construct_array_type()
|
470 |
+
if same_dtype:
|
471 |
+
result = _maybe_cast_to_extension_array(cls, result, dtype=dtype)
|
472 |
+
else:
|
473 |
+
result = _maybe_cast_to_extension_array(cls, result)
|
474 |
+
|
475 |
+
elif (numeric_only and dtype.kind in "iufcb") or not numeric_only:
|
476 |
+
result = maybe_downcast_to_dtype(result, dtype)
|
477 |
+
|
478 |
+
return result
|
479 |
+
|
480 |
+
|
481 |
+
def _maybe_cast_to_extension_array(
|
482 |
+
cls: type[ExtensionArray], obj: ArrayLike, dtype: ExtensionDtype | None = None
|
483 |
+
) -> ArrayLike:
|
484 |
+
"""
|
485 |
+
Call to `_from_sequence` that returns the object unchanged on Exception.
|
486 |
+
|
487 |
+
Parameters
|
488 |
+
----------
|
489 |
+
cls : class, subclass of ExtensionArray
|
490 |
+
obj : arraylike
|
491 |
+
Values to pass to cls._from_sequence
|
492 |
+
dtype : ExtensionDtype, optional
|
493 |
+
|
494 |
+
Returns
|
495 |
+
-------
|
496 |
+
ExtensionArray or obj
|
497 |
+
"""
|
498 |
+
result: ArrayLike
|
499 |
+
|
500 |
+
if dtype is not None:
|
501 |
+
try:
|
502 |
+
result = cls._from_scalars(obj, dtype=dtype)
|
503 |
+
except (TypeError, ValueError):
|
504 |
+
return obj
|
505 |
+
return result
|
506 |
+
|
507 |
+
try:
|
508 |
+
result = cls._from_sequence(obj, dtype=dtype)
|
509 |
+
except Exception:
|
510 |
+
# We can't predict what downstream EA constructors may raise
|
511 |
+
result = obj
|
512 |
+
return result
|
513 |
+
|
514 |
+
|
515 |
+
@overload
|
516 |
+
def ensure_dtype_can_hold_na(dtype: np.dtype) -> np.dtype:
|
517 |
+
...
|
518 |
+
|
519 |
+
|
520 |
+
@overload
|
521 |
+
def ensure_dtype_can_hold_na(dtype: ExtensionDtype) -> ExtensionDtype:
|
522 |
+
...
|
523 |
+
|
524 |
+
|
525 |
+
def ensure_dtype_can_hold_na(dtype: DtypeObj) -> DtypeObj:
|
526 |
+
"""
|
527 |
+
If we have a dtype that cannot hold NA values, find the best match that can.
|
528 |
+
"""
|
529 |
+
if isinstance(dtype, ExtensionDtype):
|
530 |
+
if dtype._can_hold_na:
|
531 |
+
return dtype
|
532 |
+
elif isinstance(dtype, IntervalDtype):
|
533 |
+
# TODO(GH#45349): don't special-case IntervalDtype, allow
|
534 |
+
# overriding instead of returning object below.
|
535 |
+
return IntervalDtype(np.float64, closed=dtype.closed)
|
536 |
+
return _dtype_obj
|
537 |
+
elif dtype.kind == "b":
|
538 |
+
return _dtype_obj
|
539 |
+
elif dtype.kind in "iu":
|
540 |
+
return np.dtype(np.float64)
|
541 |
+
return dtype
|
542 |
+
|
543 |
+
|
544 |
+
_canonical_nans = {
|
545 |
+
np.datetime64: np.datetime64("NaT", "ns"),
|
546 |
+
np.timedelta64: np.timedelta64("NaT", "ns"),
|
547 |
+
type(np.nan): np.nan,
|
548 |
+
}
|
549 |
+
|
550 |
+
|
551 |
+
def maybe_promote(dtype: np.dtype, fill_value=np.nan):
|
552 |
+
"""
|
553 |
+
Find the minimal dtype that can hold both the given dtype and fill_value.
|
554 |
+
|
555 |
+
Parameters
|
556 |
+
----------
|
557 |
+
dtype : np.dtype
|
558 |
+
fill_value : scalar, default np.nan
|
559 |
+
|
560 |
+
Returns
|
561 |
+
-------
|
562 |
+
dtype
|
563 |
+
Upcasted from dtype argument if necessary.
|
564 |
+
fill_value
|
565 |
+
Upcasted from fill_value argument if necessary.
|
566 |
+
|
567 |
+
Raises
|
568 |
+
------
|
569 |
+
ValueError
|
570 |
+
If fill_value is a non-scalar and dtype is not object.
|
571 |
+
"""
|
572 |
+
orig = fill_value
|
573 |
+
orig_is_nat = False
|
574 |
+
if checknull(fill_value):
|
575 |
+
# https://github.com/pandas-dev/pandas/pull/39692#issuecomment-1441051740
|
576 |
+
# avoid cache misses with NaN/NaT values that are not singletons
|
577 |
+
if fill_value is not NA:
|
578 |
+
try:
|
579 |
+
orig_is_nat = np.isnat(fill_value)
|
580 |
+
except TypeError:
|
581 |
+
pass
|
582 |
+
|
583 |
+
fill_value = _canonical_nans.get(type(fill_value), fill_value)
|
584 |
+
|
585 |
+
# for performance, we are using a cached version of the actual implementation
|
586 |
+
# of the function in _maybe_promote. However, this doesn't always work (in case
|
587 |
+
# of non-hashable arguments), so we fallback to the actual implementation if needed
|
588 |
+
try:
|
589 |
+
# error: Argument 3 to "__call__" of "_lru_cache_wrapper" has incompatible type
|
590 |
+
# "Type[Any]"; expected "Hashable" [arg-type]
|
591 |
+
dtype, fill_value = _maybe_promote_cached(
|
592 |
+
dtype, fill_value, type(fill_value) # type: ignore[arg-type]
|
593 |
+
)
|
594 |
+
except TypeError:
|
595 |
+
# if fill_value is not hashable (required for caching)
|
596 |
+
dtype, fill_value = _maybe_promote(dtype, fill_value)
|
597 |
+
|
598 |
+
if (dtype == _dtype_obj and orig is not None) or (
|
599 |
+
orig_is_nat and np.datetime_data(orig)[0] != "ns"
|
600 |
+
):
|
601 |
+
# GH#51592,53497 restore our potentially non-canonical fill_value
|
602 |
+
fill_value = orig
|
603 |
+
return dtype, fill_value
|
604 |
+
|
605 |
+
|
606 |
+
@functools.lru_cache
|
607 |
+
def _maybe_promote_cached(dtype, fill_value, fill_value_type):
|
608 |
+
# The cached version of _maybe_promote below
|
609 |
+
# This also use fill_value_type as (unused) argument to use this in the
|
610 |
+
# cache lookup -> to differentiate 1 and True
|
611 |
+
return _maybe_promote(dtype, fill_value)
|
612 |
+
|
613 |
+
|
614 |
+
def _maybe_promote(dtype: np.dtype, fill_value=np.nan):
|
615 |
+
# The actual implementation of the function, use `maybe_promote` above for
|
616 |
+
# a cached version.
|
617 |
+
if not is_scalar(fill_value):
|
618 |
+
# with object dtype there is nothing to promote, and the user can
|
619 |
+
# pass pretty much any weird fill_value they like
|
620 |
+
if dtype != object:
|
621 |
+
# with object dtype there is nothing to promote, and the user can
|
622 |
+
# pass pretty much any weird fill_value they like
|
623 |
+
raise ValueError("fill_value must be a scalar")
|
624 |
+
dtype = _dtype_obj
|
625 |
+
return dtype, fill_value
|
626 |
+
|
627 |
+
if is_valid_na_for_dtype(fill_value, dtype) and dtype.kind in "iufcmM":
|
628 |
+
dtype = ensure_dtype_can_hold_na(dtype)
|
629 |
+
fv = na_value_for_dtype(dtype)
|
630 |
+
return dtype, fv
|
631 |
+
|
632 |
+
elif isinstance(dtype, CategoricalDtype):
|
633 |
+
if fill_value in dtype.categories or isna(fill_value):
|
634 |
+
return dtype, fill_value
|
635 |
+
else:
|
636 |
+
return object, ensure_object(fill_value)
|
637 |
+
|
638 |
+
elif isna(fill_value):
|
639 |
+
dtype = _dtype_obj
|
640 |
+
if fill_value is None:
|
641 |
+
# but we retain e.g. pd.NA
|
642 |
+
fill_value = np.nan
|
643 |
+
return dtype, fill_value
|
644 |
+
|
645 |
+
# returns tuple of (dtype, fill_value)
|
646 |
+
if issubclass(dtype.type, np.datetime64):
|
647 |
+
inferred, fv = infer_dtype_from_scalar(fill_value)
|
648 |
+
if inferred == dtype:
|
649 |
+
return dtype, fv
|
650 |
+
|
651 |
+
from pandas.core.arrays import DatetimeArray
|
652 |
+
|
653 |
+
dta = DatetimeArray._from_sequence([], dtype="M8[ns]")
|
654 |
+
try:
|
655 |
+
fv = dta._validate_setitem_value(fill_value)
|
656 |
+
return dta.dtype, fv
|
657 |
+
except (ValueError, TypeError):
|
658 |
+
return _dtype_obj, fill_value
|
659 |
+
|
660 |
+
elif issubclass(dtype.type, np.timedelta64):
|
661 |
+
inferred, fv = infer_dtype_from_scalar(fill_value)
|
662 |
+
if inferred == dtype:
|
663 |
+
return dtype, fv
|
664 |
+
|
665 |
+
elif inferred.kind == "m":
|
666 |
+
# different unit, e.g. passed np.timedelta64(24, "h") with dtype=m8[ns]
|
667 |
+
# see if we can losslessly cast it to our dtype
|
668 |
+
unit = np.datetime_data(dtype)[0]
|
669 |
+
try:
|
670 |
+
td = Timedelta(fill_value).as_unit(unit, round_ok=False)
|
671 |
+
except OutOfBoundsTimedelta:
|
672 |
+
return _dtype_obj, fill_value
|
673 |
+
else:
|
674 |
+
return dtype, td.asm8
|
675 |
+
|
676 |
+
return _dtype_obj, fill_value
|
677 |
+
|
678 |
+
elif is_float(fill_value):
|
679 |
+
if issubclass(dtype.type, np.bool_):
|
680 |
+
dtype = np.dtype(np.object_)
|
681 |
+
|
682 |
+
elif issubclass(dtype.type, np.integer):
|
683 |
+
dtype = np.dtype(np.float64)
|
684 |
+
|
685 |
+
elif dtype.kind == "f":
|
686 |
+
mst = np.min_scalar_type(fill_value)
|
687 |
+
if mst > dtype:
|
688 |
+
# e.g. mst is np.float64 and dtype is np.float32
|
689 |
+
dtype = mst
|
690 |
+
|
691 |
+
elif dtype.kind == "c":
|
692 |
+
mst = np.min_scalar_type(fill_value)
|
693 |
+
dtype = np.promote_types(dtype, mst)
|
694 |
+
|
695 |
+
elif is_bool(fill_value):
|
696 |
+
if not issubclass(dtype.type, np.bool_):
|
697 |
+
dtype = np.dtype(np.object_)
|
698 |
+
|
699 |
+
elif is_integer(fill_value):
|
700 |
+
if issubclass(dtype.type, np.bool_):
|
701 |
+
dtype = np.dtype(np.object_)
|
702 |
+
|
703 |
+
elif issubclass(dtype.type, np.integer):
|
704 |
+
if not np_can_cast_scalar(fill_value, dtype): # type: ignore[arg-type]
|
705 |
+
# upcast to prevent overflow
|
706 |
+
mst = np.min_scalar_type(fill_value)
|
707 |
+
dtype = np.promote_types(dtype, mst)
|
708 |
+
if dtype.kind == "f":
|
709 |
+
# Case where we disagree with numpy
|
710 |
+
dtype = np.dtype(np.object_)
|
711 |
+
|
712 |
+
elif is_complex(fill_value):
|
713 |
+
if issubclass(dtype.type, np.bool_):
|
714 |
+
dtype = np.dtype(np.object_)
|
715 |
+
|
716 |
+
elif issubclass(dtype.type, (np.integer, np.floating)):
|
717 |
+
mst = np.min_scalar_type(fill_value)
|
718 |
+
dtype = np.promote_types(dtype, mst)
|
719 |
+
|
720 |
+
elif dtype.kind == "c":
|
721 |
+
mst = np.min_scalar_type(fill_value)
|
722 |
+
if mst > dtype:
|
723 |
+
# e.g. mst is np.complex128 and dtype is np.complex64
|
724 |
+
dtype = mst
|
725 |
+
|
726 |
+
else:
|
727 |
+
dtype = np.dtype(np.object_)
|
728 |
+
|
729 |
+
# in case we have a string that looked like a number
|
730 |
+
if issubclass(dtype.type, (bytes, str)):
|
731 |
+
dtype = np.dtype(np.object_)
|
732 |
+
|
733 |
+
fill_value = _ensure_dtype_type(fill_value, dtype)
|
734 |
+
return dtype, fill_value
|
735 |
+
|
736 |
+
|
737 |
+
def _ensure_dtype_type(value, dtype: np.dtype):
|
738 |
+
"""
|
739 |
+
Ensure that the given value is an instance of the given dtype.
|
740 |
+
|
741 |
+
e.g. if out dtype is np.complex64_, we should have an instance of that
|
742 |
+
as opposed to a python complex object.
|
743 |
+
|
744 |
+
Parameters
|
745 |
+
----------
|
746 |
+
value : object
|
747 |
+
dtype : np.dtype
|
748 |
+
|
749 |
+
Returns
|
750 |
+
-------
|
751 |
+
object
|
752 |
+
"""
|
753 |
+
# Start with exceptions in which we do _not_ cast to numpy types
|
754 |
+
|
755 |
+
if dtype == _dtype_obj:
|
756 |
+
return value
|
757 |
+
|
758 |
+
# Note: before we get here we have already excluded isna(value)
|
759 |
+
return dtype.type(value)
|
760 |
+
|
761 |
+
|
762 |
+
def infer_dtype_from(val) -> tuple[DtypeObj, Any]:
|
763 |
+
"""
|
764 |
+
Interpret the dtype from a scalar or array.
|
765 |
+
|
766 |
+
Parameters
|
767 |
+
----------
|
768 |
+
val : object
|
769 |
+
"""
|
770 |
+
if not is_list_like(val):
|
771 |
+
return infer_dtype_from_scalar(val)
|
772 |
+
return infer_dtype_from_array(val)
|
773 |
+
|
774 |
+
|
775 |
+
def infer_dtype_from_scalar(val) -> tuple[DtypeObj, Any]:
|
776 |
+
"""
|
777 |
+
Interpret the dtype from a scalar.
|
778 |
+
|
779 |
+
Parameters
|
780 |
+
----------
|
781 |
+
val : object
|
782 |
+
"""
|
783 |
+
dtype: DtypeObj = _dtype_obj
|
784 |
+
|
785 |
+
# a 1-element ndarray
|
786 |
+
if isinstance(val, np.ndarray):
|
787 |
+
if val.ndim != 0:
|
788 |
+
msg = "invalid ndarray passed to infer_dtype_from_scalar"
|
789 |
+
raise ValueError(msg)
|
790 |
+
|
791 |
+
dtype = val.dtype
|
792 |
+
val = lib.item_from_zerodim(val)
|
793 |
+
|
794 |
+
elif isinstance(val, str):
|
795 |
+
# If we create an empty array using a string to infer
|
796 |
+
# the dtype, NumPy will only allocate one character per entry
|
797 |
+
# so this is kind of bad. Alternately we could use np.repeat
|
798 |
+
# instead of np.empty (but then you still don't want things
|
799 |
+
# coming out as np.str_!
|
800 |
+
|
801 |
+
dtype = _dtype_obj
|
802 |
+
if using_pyarrow_string_dtype():
|
803 |
+
from pandas.core.arrays.string_ import StringDtype
|
804 |
+
|
805 |
+
dtype = StringDtype(storage="pyarrow_numpy")
|
806 |
+
|
807 |
+
elif isinstance(val, (np.datetime64, dt.datetime)):
|
808 |
+
try:
|
809 |
+
val = Timestamp(val)
|
810 |
+
except OutOfBoundsDatetime:
|
811 |
+
return _dtype_obj, val
|
812 |
+
|
813 |
+
if val is NaT or val.tz is None:
|
814 |
+
val = val.to_datetime64()
|
815 |
+
dtype = val.dtype
|
816 |
+
# TODO: test with datetime(2920, 10, 1) based on test_replace_dtypes
|
817 |
+
else:
|
818 |
+
dtype = DatetimeTZDtype(unit=val.unit, tz=val.tz)
|
819 |
+
|
820 |
+
elif isinstance(val, (np.timedelta64, dt.timedelta)):
|
821 |
+
try:
|
822 |
+
val = Timedelta(val)
|
823 |
+
except (OutOfBoundsTimedelta, OverflowError):
|
824 |
+
dtype = _dtype_obj
|
825 |
+
else:
|
826 |
+
if val is NaT:
|
827 |
+
val = np.timedelta64("NaT", "ns")
|
828 |
+
else:
|
829 |
+
val = val.asm8
|
830 |
+
dtype = val.dtype
|
831 |
+
|
832 |
+
elif is_bool(val):
|
833 |
+
dtype = np.dtype(np.bool_)
|
834 |
+
|
835 |
+
elif is_integer(val):
|
836 |
+
if isinstance(val, np.integer):
|
837 |
+
dtype = np.dtype(type(val))
|
838 |
+
else:
|
839 |
+
dtype = np.dtype(np.int64)
|
840 |
+
|
841 |
+
try:
|
842 |
+
np.array(val, dtype=dtype)
|
843 |
+
except OverflowError:
|
844 |
+
dtype = np.array(val).dtype
|
845 |
+
|
846 |
+
elif is_float(val):
|
847 |
+
if isinstance(val, np.floating):
|
848 |
+
dtype = np.dtype(type(val))
|
849 |
+
else:
|
850 |
+
dtype = np.dtype(np.float64)
|
851 |
+
|
852 |
+
elif is_complex(val):
|
853 |
+
dtype = np.dtype(np.complex128)
|
854 |
+
|
855 |
+
if isinstance(val, Period):
|
856 |
+
dtype = PeriodDtype(freq=val.freq)
|
857 |
+
elif isinstance(val, Interval):
|
858 |
+
subtype = infer_dtype_from_scalar(val.left)[0]
|
859 |
+
dtype = IntervalDtype(subtype=subtype, closed=val.closed)
|
860 |
+
|
861 |
+
return dtype, val
|
862 |
+
|
863 |
+
|
864 |
+
def dict_compat(d: dict[Scalar, Scalar]) -> dict[Scalar, Scalar]:
|
865 |
+
"""
|
866 |
+
Convert datetimelike-keyed dicts to a Timestamp-keyed dict.
|
867 |
+
|
868 |
+
Parameters
|
869 |
+
----------
|
870 |
+
d: dict-like object
|
871 |
+
|
872 |
+
Returns
|
873 |
+
-------
|
874 |
+
dict
|
875 |
+
"""
|
876 |
+
return {maybe_box_datetimelike(key): value for key, value in d.items()}
|
877 |
+
|
878 |
+
|
879 |
+
def infer_dtype_from_array(arr) -> tuple[DtypeObj, ArrayLike]:
|
880 |
+
"""
|
881 |
+
Infer the dtype from an array.
|
882 |
+
|
883 |
+
Parameters
|
884 |
+
----------
|
885 |
+
arr : array
|
886 |
+
|
887 |
+
Returns
|
888 |
+
-------
|
889 |
+
tuple (pandas-compat dtype, array)
|
890 |
+
|
891 |
+
|
892 |
+
Examples
|
893 |
+
--------
|
894 |
+
>>> np.asarray([1, '1'])
|
895 |
+
array(['1', '1'], dtype='<U21')
|
896 |
+
|
897 |
+
>>> infer_dtype_from_array([1, '1'])
|
898 |
+
(dtype('O'), [1, '1'])
|
899 |
+
"""
|
900 |
+
if isinstance(arr, np.ndarray):
|
901 |
+
return arr.dtype, arr
|
902 |
+
|
903 |
+
if not is_list_like(arr):
|
904 |
+
raise TypeError("'arr' must be list-like")
|
905 |
+
|
906 |
+
arr_dtype = getattr(arr, "dtype", None)
|
907 |
+
if isinstance(arr_dtype, ExtensionDtype):
|
908 |
+
return arr.dtype, arr
|
909 |
+
|
910 |
+
elif isinstance(arr, ABCSeries):
|
911 |
+
return arr.dtype, np.asarray(arr)
|
912 |
+
|
913 |
+
# don't force numpy coerce with nan's
|
914 |
+
inferred = lib.infer_dtype(arr, skipna=False)
|
915 |
+
if inferred in ["string", "bytes", "mixed", "mixed-integer"]:
|
916 |
+
return (np.dtype(np.object_), arr)
|
917 |
+
|
918 |
+
arr = np.asarray(arr)
|
919 |
+
return arr.dtype, arr
|
920 |
+
|
921 |
+
|
922 |
+
def _maybe_infer_dtype_type(element):
|
923 |
+
"""
|
924 |
+
Try to infer an object's dtype, for use in arithmetic ops.
|
925 |
+
|
926 |
+
Uses `element.dtype` if that's available.
|
927 |
+
Objects implementing the iterator protocol are cast to a NumPy array,
|
928 |
+
and from there the array's type is used.
|
929 |
+
|
930 |
+
Parameters
|
931 |
+
----------
|
932 |
+
element : object
|
933 |
+
Possibly has a `.dtype` attribute, and possibly the iterator
|
934 |
+
protocol.
|
935 |
+
|
936 |
+
Returns
|
937 |
+
-------
|
938 |
+
tipo : type
|
939 |
+
|
940 |
+
Examples
|
941 |
+
--------
|
942 |
+
>>> from collections import namedtuple
|
943 |
+
>>> Foo = namedtuple("Foo", "dtype")
|
944 |
+
>>> _maybe_infer_dtype_type(Foo(np.dtype("i8")))
|
945 |
+
dtype('int64')
|
946 |
+
"""
|
947 |
+
tipo = None
|
948 |
+
if hasattr(element, "dtype"):
|
949 |
+
tipo = element.dtype
|
950 |
+
elif is_list_like(element):
|
951 |
+
element = np.asarray(element)
|
952 |
+
tipo = element.dtype
|
953 |
+
return tipo
|
954 |
+
|
955 |
+
|
956 |
+
def invalidate_string_dtypes(dtype_set: set[DtypeObj]) -> None:
|
957 |
+
"""
|
958 |
+
Change string like dtypes to object for
|
959 |
+
``DataFrame.select_dtypes()``.
|
960 |
+
"""
|
961 |
+
# error: Argument 1 to <set> has incompatible type "Type[generic]"; expected
|
962 |
+
# "Union[dtype[Any], ExtensionDtype, None]"
|
963 |
+
# error: Argument 2 to <set> has incompatible type "Type[generic]"; expected
|
964 |
+
# "Union[dtype[Any], ExtensionDtype, None]"
|
965 |
+
non_string_dtypes = dtype_set - {
|
966 |
+
np.dtype("S").type, # type: ignore[arg-type]
|
967 |
+
np.dtype("<U").type, # type: ignore[arg-type]
|
968 |
+
}
|
969 |
+
if non_string_dtypes != dtype_set:
|
970 |
+
raise TypeError("string dtypes are not allowed, use 'object' instead")
|
971 |
+
|
972 |
+
|
973 |
+
def coerce_indexer_dtype(indexer, categories) -> np.ndarray:
|
974 |
+
"""coerce the indexer input array to the smallest dtype possible"""
|
975 |
+
length = len(categories)
|
976 |
+
if length < _int8_max:
|
977 |
+
return ensure_int8(indexer)
|
978 |
+
elif length < _int16_max:
|
979 |
+
return ensure_int16(indexer)
|
980 |
+
elif length < _int32_max:
|
981 |
+
return ensure_int32(indexer)
|
982 |
+
return ensure_int64(indexer)
|
983 |
+
|
984 |
+
|
985 |
+
def convert_dtypes(
|
986 |
+
input_array: ArrayLike,
|
987 |
+
convert_string: bool = True,
|
988 |
+
convert_integer: bool = True,
|
989 |
+
convert_boolean: bool = True,
|
990 |
+
convert_floating: bool = True,
|
991 |
+
infer_objects: bool = False,
|
992 |
+
dtype_backend: Literal["numpy_nullable", "pyarrow"] = "numpy_nullable",
|
993 |
+
) -> DtypeObj:
|
994 |
+
"""
|
995 |
+
Convert objects to best possible type, and optionally,
|
996 |
+
to types supporting ``pd.NA``.
|
997 |
+
|
998 |
+
Parameters
|
999 |
+
----------
|
1000 |
+
input_array : ExtensionArray or np.ndarray
|
1001 |
+
convert_string : bool, default True
|
1002 |
+
Whether object dtypes should be converted to ``StringDtype()``.
|
1003 |
+
convert_integer : bool, default True
|
1004 |
+
Whether, if possible, conversion can be done to integer extension types.
|
1005 |
+
convert_boolean : bool, defaults True
|
1006 |
+
Whether object dtypes should be converted to ``BooleanDtypes()``.
|
1007 |
+
convert_floating : bool, defaults True
|
1008 |
+
Whether, if possible, conversion can be done to floating extension types.
|
1009 |
+
If `convert_integer` is also True, preference will be give to integer
|
1010 |
+
dtypes if the floats can be faithfully casted to integers.
|
1011 |
+
infer_objects : bool, defaults False
|
1012 |
+
Whether to also infer objects to float/int if possible. Is only hit if the
|
1013 |
+
object array contains pd.NA.
|
1014 |
+
dtype_backend : {'numpy_nullable', 'pyarrow'}, default 'numpy_nullable'
|
1015 |
+
Back-end data type applied to the resultant :class:`DataFrame`
|
1016 |
+
(still experimental). Behaviour is as follows:
|
1017 |
+
|
1018 |
+
* ``"numpy_nullable"``: returns nullable-dtype-backed :class:`DataFrame`
|
1019 |
+
(default).
|
1020 |
+
* ``"pyarrow"``: returns pyarrow-backed nullable :class:`ArrowDtype`
|
1021 |
+
DataFrame.
|
1022 |
+
|
1023 |
+
.. versionadded:: 2.0
|
1024 |
+
|
1025 |
+
Returns
|
1026 |
+
-------
|
1027 |
+
np.dtype, or ExtensionDtype
|
1028 |
+
"""
|
1029 |
+
inferred_dtype: str | DtypeObj
|
1030 |
+
|
1031 |
+
if (
|
1032 |
+
convert_string or convert_integer or convert_boolean or convert_floating
|
1033 |
+
) and isinstance(input_array, np.ndarray):
|
1034 |
+
if input_array.dtype == object:
|
1035 |
+
inferred_dtype = lib.infer_dtype(input_array)
|
1036 |
+
else:
|
1037 |
+
inferred_dtype = input_array.dtype
|
1038 |
+
|
1039 |
+
if is_string_dtype(inferred_dtype):
|
1040 |
+
if not convert_string or inferred_dtype == "bytes":
|
1041 |
+
inferred_dtype = input_array.dtype
|
1042 |
+
else:
|
1043 |
+
inferred_dtype = pandas_dtype_func("string")
|
1044 |
+
|
1045 |
+
if convert_integer:
|
1046 |
+
target_int_dtype = pandas_dtype_func("Int64")
|
1047 |
+
|
1048 |
+
if input_array.dtype.kind in "iu":
|
1049 |
+
from pandas.core.arrays.integer import NUMPY_INT_TO_DTYPE
|
1050 |
+
|
1051 |
+
inferred_dtype = NUMPY_INT_TO_DTYPE.get(
|
1052 |
+
input_array.dtype, target_int_dtype
|
1053 |
+
)
|
1054 |
+
elif input_array.dtype.kind in "fcb":
|
1055 |
+
# TODO: de-dup with maybe_cast_to_integer_array?
|
1056 |
+
arr = input_array[notna(input_array)]
|
1057 |
+
if (arr.astype(int) == arr).all():
|
1058 |
+
inferred_dtype = target_int_dtype
|
1059 |
+
else:
|
1060 |
+
inferred_dtype = input_array.dtype
|
1061 |
+
elif (
|
1062 |
+
infer_objects
|
1063 |
+
and input_array.dtype == object
|
1064 |
+
and (isinstance(inferred_dtype, str) and inferred_dtype == "integer")
|
1065 |
+
):
|
1066 |
+
inferred_dtype = target_int_dtype
|
1067 |
+
|
1068 |
+
if convert_floating:
|
1069 |
+
if input_array.dtype.kind in "fcb":
|
1070 |
+
# i.e. numeric but not integer
|
1071 |
+
from pandas.core.arrays.floating import NUMPY_FLOAT_TO_DTYPE
|
1072 |
+
|
1073 |
+
inferred_float_dtype: DtypeObj = NUMPY_FLOAT_TO_DTYPE.get(
|
1074 |
+
input_array.dtype, pandas_dtype_func("Float64")
|
1075 |
+
)
|
1076 |
+
# if we could also convert to integer, check if all floats
|
1077 |
+
# are actually integers
|
1078 |
+
if convert_integer:
|
1079 |
+
# TODO: de-dup with maybe_cast_to_integer_array?
|
1080 |
+
arr = input_array[notna(input_array)]
|
1081 |
+
if (arr.astype(int) == arr).all():
|
1082 |
+
inferred_dtype = pandas_dtype_func("Int64")
|
1083 |
+
else:
|
1084 |
+
inferred_dtype = inferred_float_dtype
|
1085 |
+
else:
|
1086 |
+
inferred_dtype = inferred_float_dtype
|
1087 |
+
elif (
|
1088 |
+
infer_objects
|
1089 |
+
and input_array.dtype == object
|
1090 |
+
and (
|
1091 |
+
isinstance(inferred_dtype, str)
|
1092 |
+
and inferred_dtype == "mixed-integer-float"
|
1093 |
+
)
|
1094 |
+
):
|
1095 |
+
inferred_dtype = pandas_dtype_func("Float64")
|
1096 |
+
|
1097 |
+
if convert_boolean:
|
1098 |
+
if input_array.dtype.kind == "b":
|
1099 |
+
inferred_dtype = pandas_dtype_func("boolean")
|
1100 |
+
elif isinstance(inferred_dtype, str) and inferred_dtype == "boolean":
|
1101 |
+
inferred_dtype = pandas_dtype_func("boolean")
|
1102 |
+
|
1103 |
+
if isinstance(inferred_dtype, str):
|
1104 |
+
# If we couldn't do anything else, then we retain the dtype
|
1105 |
+
inferred_dtype = input_array.dtype
|
1106 |
+
|
1107 |
+
else:
|
1108 |
+
inferred_dtype = input_array.dtype
|
1109 |
+
|
1110 |
+
if dtype_backend == "pyarrow":
|
1111 |
+
from pandas.core.arrays.arrow.array import to_pyarrow_type
|
1112 |
+
from pandas.core.arrays.string_ import StringDtype
|
1113 |
+
|
1114 |
+
assert not isinstance(inferred_dtype, str)
|
1115 |
+
|
1116 |
+
if (
|
1117 |
+
(convert_integer and inferred_dtype.kind in "iu")
|
1118 |
+
or (convert_floating and inferred_dtype.kind in "fc")
|
1119 |
+
or (convert_boolean and inferred_dtype.kind == "b")
|
1120 |
+
or (convert_string and isinstance(inferred_dtype, StringDtype))
|
1121 |
+
or (
|
1122 |
+
inferred_dtype.kind not in "iufcb"
|
1123 |
+
and not isinstance(inferred_dtype, StringDtype)
|
1124 |
+
)
|
1125 |
+
):
|
1126 |
+
if isinstance(inferred_dtype, PandasExtensionDtype) and not isinstance(
|
1127 |
+
inferred_dtype, DatetimeTZDtype
|
1128 |
+
):
|
1129 |
+
base_dtype = inferred_dtype.base
|
1130 |
+
elif isinstance(inferred_dtype, (BaseMaskedDtype, ArrowDtype)):
|
1131 |
+
base_dtype = inferred_dtype.numpy_dtype
|
1132 |
+
elif isinstance(inferred_dtype, StringDtype):
|
1133 |
+
base_dtype = np.dtype(str)
|
1134 |
+
else:
|
1135 |
+
base_dtype = inferred_dtype
|
1136 |
+
if (
|
1137 |
+
base_dtype.kind == "O" # type: ignore[union-attr]
|
1138 |
+
and input_array.size > 0
|
1139 |
+
and isna(input_array).all()
|
1140 |
+
):
|
1141 |
+
import pyarrow as pa
|
1142 |
+
|
1143 |
+
pa_type = pa.null()
|
1144 |
+
else:
|
1145 |
+
pa_type = to_pyarrow_type(base_dtype)
|
1146 |
+
if pa_type is not None:
|
1147 |
+
inferred_dtype = ArrowDtype(pa_type)
|
1148 |
+
elif dtype_backend == "numpy_nullable" and isinstance(inferred_dtype, ArrowDtype):
|
1149 |
+
# GH 53648
|
1150 |
+
inferred_dtype = _arrow_dtype_mapping()[inferred_dtype.pyarrow_dtype]
|
1151 |
+
|
1152 |
+
# error: Incompatible return value type (got "Union[str, Union[dtype[Any],
|
1153 |
+
# ExtensionDtype]]", expected "Union[dtype[Any], ExtensionDtype]")
|
1154 |
+
return inferred_dtype # type: ignore[return-value]
|
1155 |
+
|
1156 |
+
|
1157 |
+
def maybe_infer_to_datetimelike(
|
1158 |
+
value: npt.NDArray[np.object_],
|
1159 |
+
) -> np.ndarray | DatetimeArray | TimedeltaArray | PeriodArray | IntervalArray:
|
1160 |
+
"""
|
1161 |
+
we might have a array (or single object) that is datetime like,
|
1162 |
+
and no dtype is passed don't change the value unless we find a
|
1163 |
+
datetime/timedelta set
|
1164 |
+
|
1165 |
+
this is pretty strict in that a datetime/timedelta is REQUIRED
|
1166 |
+
in addition to possible nulls/string likes
|
1167 |
+
|
1168 |
+
Parameters
|
1169 |
+
----------
|
1170 |
+
value : np.ndarray[object]
|
1171 |
+
|
1172 |
+
Returns
|
1173 |
+
-------
|
1174 |
+
np.ndarray, DatetimeArray, TimedeltaArray, PeriodArray, or IntervalArray
|
1175 |
+
|
1176 |
+
"""
|
1177 |
+
if not isinstance(value, np.ndarray) or value.dtype != object:
|
1178 |
+
# Caller is responsible for passing only ndarray[object]
|
1179 |
+
raise TypeError(type(value)) # pragma: no cover
|
1180 |
+
if value.ndim != 1:
|
1181 |
+
# Caller is responsible
|
1182 |
+
raise ValueError(value.ndim) # pragma: no cover
|
1183 |
+
|
1184 |
+
if not len(value):
|
1185 |
+
return value
|
1186 |
+
|
1187 |
+
# error: Incompatible return value type (got "Union[ExtensionArray,
|
1188 |
+
# ndarray[Any, Any]]", expected "Union[ndarray[Any, Any], DatetimeArray,
|
1189 |
+
# TimedeltaArray, PeriodArray, IntervalArray]")
|
1190 |
+
return lib.maybe_convert_objects( # type: ignore[return-value]
|
1191 |
+
value,
|
1192 |
+
# Here we do not convert numeric dtypes, as if we wanted that,
|
1193 |
+
# numpy would have done it for us.
|
1194 |
+
convert_numeric=False,
|
1195 |
+
convert_non_numeric=True,
|
1196 |
+
dtype_if_all_nat=np.dtype("M8[ns]"),
|
1197 |
+
)
|
1198 |
+
|
1199 |
+
|
1200 |
+
def maybe_cast_to_datetime(
|
1201 |
+
value: np.ndarray | list, dtype: np.dtype
|
1202 |
+
) -> ExtensionArray | np.ndarray:
|
1203 |
+
"""
|
1204 |
+
try to cast the array/value to a datetimelike dtype, converting float
|
1205 |
+
nan to iNaT
|
1206 |
+
|
1207 |
+
Caller is responsible for handling ExtensionDtype cases and non dt64/td64
|
1208 |
+
cases.
|
1209 |
+
"""
|
1210 |
+
from pandas.core.arrays.datetimes import DatetimeArray
|
1211 |
+
from pandas.core.arrays.timedeltas import TimedeltaArray
|
1212 |
+
|
1213 |
+
assert dtype.kind in "mM"
|
1214 |
+
if not is_list_like(value):
|
1215 |
+
raise TypeError("value must be listlike")
|
1216 |
+
|
1217 |
+
# TODO: _from_sequence would raise ValueError in cases where
|
1218 |
+
# _ensure_nanosecond_dtype raises TypeError
|
1219 |
+
_ensure_nanosecond_dtype(dtype)
|
1220 |
+
|
1221 |
+
if lib.is_np_dtype(dtype, "m"):
|
1222 |
+
res = TimedeltaArray._from_sequence(value, dtype=dtype)
|
1223 |
+
return res
|
1224 |
+
else:
|
1225 |
+
try:
|
1226 |
+
dta = DatetimeArray._from_sequence(value, dtype=dtype)
|
1227 |
+
except ValueError as err:
|
1228 |
+
# We can give a Series-specific exception message.
|
1229 |
+
if "cannot supply both a tz and a timezone-naive dtype" in str(err):
|
1230 |
+
raise ValueError(
|
1231 |
+
"Cannot convert timezone-aware data to "
|
1232 |
+
"timezone-naive dtype. Use "
|
1233 |
+
"pd.Series(values).dt.tz_localize(None) instead."
|
1234 |
+
) from err
|
1235 |
+
raise
|
1236 |
+
|
1237 |
+
return dta
|
1238 |
+
|
1239 |
+
|
1240 |
+
def _ensure_nanosecond_dtype(dtype: DtypeObj) -> None:
|
1241 |
+
"""
|
1242 |
+
Convert dtypes with granularity less than nanosecond to nanosecond
|
1243 |
+
|
1244 |
+
>>> _ensure_nanosecond_dtype(np.dtype("M8[us]"))
|
1245 |
+
|
1246 |
+
>>> _ensure_nanosecond_dtype(np.dtype("M8[D]"))
|
1247 |
+
Traceback (most recent call last):
|
1248 |
+
...
|
1249 |
+
TypeError: dtype=datetime64[D] is not supported. Supported resolutions are 's', 'ms', 'us', and 'ns'
|
1250 |
+
|
1251 |
+
>>> _ensure_nanosecond_dtype(np.dtype("m8[ps]"))
|
1252 |
+
Traceback (most recent call last):
|
1253 |
+
...
|
1254 |
+
TypeError: dtype=timedelta64[ps] is not supported. Supported resolutions are 's', 'ms', 'us', and 'ns'
|
1255 |
+
""" # noqa: E501
|
1256 |
+
msg = (
|
1257 |
+
f"The '{dtype.name}' dtype has no unit. "
|
1258 |
+
f"Please pass in '{dtype.name}[ns]' instead."
|
1259 |
+
)
|
1260 |
+
|
1261 |
+
# unpack e.g. SparseDtype
|
1262 |
+
dtype = getattr(dtype, "subtype", dtype)
|
1263 |
+
|
1264 |
+
if not isinstance(dtype, np.dtype):
|
1265 |
+
# i.e. datetime64tz
|
1266 |
+
pass
|
1267 |
+
|
1268 |
+
elif dtype.kind in "mM":
|
1269 |
+
if not is_supported_dtype(dtype):
|
1270 |
+
# pre-2.0 we would silently swap in nanos for lower-resolutions,
|
1271 |
+
# raise for above-nano resolutions
|
1272 |
+
if dtype.name in ["datetime64", "timedelta64"]:
|
1273 |
+
raise ValueError(msg)
|
1274 |
+
# TODO: ValueError or TypeError? existing test
|
1275 |
+
# test_constructor_generic_timestamp_bad_frequency expects TypeError
|
1276 |
+
raise TypeError(
|
1277 |
+
f"dtype={dtype} is not supported. Supported resolutions are 's', "
|
1278 |
+
"'ms', 'us', and 'ns'"
|
1279 |
+
)
|
1280 |
+
|
1281 |
+
|
1282 |
+
# TODO: other value-dependent functions to standardize here include
|
1283 |
+
# Index._find_common_type_compat
|
1284 |
+
def find_result_type(left_dtype: DtypeObj, right: Any) -> DtypeObj:
|
1285 |
+
"""
|
1286 |
+
Find the type/dtype for the result of an operation between objects.
|
1287 |
+
|
1288 |
+
This is similar to find_common_type, but looks at the right object instead
|
1289 |
+
of just its dtype. This can be useful in particular when the right
|
1290 |
+
object does not have a `dtype`.
|
1291 |
+
|
1292 |
+
Parameters
|
1293 |
+
----------
|
1294 |
+
left_dtype : np.dtype or ExtensionDtype
|
1295 |
+
right : Any
|
1296 |
+
|
1297 |
+
Returns
|
1298 |
+
-------
|
1299 |
+
np.dtype or ExtensionDtype
|
1300 |
+
|
1301 |
+
See also
|
1302 |
+
--------
|
1303 |
+
find_common_type
|
1304 |
+
numpy.result_type
|
1305 |
+
"""
|
1306 |
+
new_dtype: DtypeObj
|
1307 |
+
|
1308 |
+
if (
|
1309 |
+
isinstance(left_dtype, np.dtype)
|
1310 |
+
and left_dtype.kind in "iuc"
|
1311 |
+
and (lib.is_integer(right) or lib.is_float(right))
|
1312 |
+
):
|
1313 |
+
# e.g. with int8 dtype and right=512, we want to end up with
|
1314 |
+
# np.int16, whereas infer_dtype_from(512) gives np.int64,
|
1315 |
+
# which will make us upcast too far.
|
1316 |
+
if lib.is_float(right) and right.is_integer() and left_dtype.kind != "f":
|
1317 |
+
right = int(right)
|
1318 |
+
# After NEP 50, numpy won't inspect Python scalars
|
1319 |
+
# TODO: do we need to recreate numpy's inspection logic for floats too
|
1320 |
+
# (this breaks some tests)
|
1321 |
+
if isinstance(right, int) and not isinstance(right, np.integer):
|
1322 |
+
# This gives an unsigned type by default
|
1323 |
+
# (if our number is positive)
|
1324 |
+
|
1325 |
+
# If our left dtype is signed, we might not want this since
|
1326 |
+
# this might give us 1 dtype too big
|
1327 |
+
# We should check if the corresponding int dtype (e.g. int64 for uint64)
|
1328 |
+
# can hold the number
|
1329 |
+
right_dtype = np.min_scalar_type(right)
|
1330 |
+
if right == 0:
|
1331 |
+
# Special case 0
|
1332 |
+
right = left_dtype
|
1333 |
+
elif (
|
1334 |
+
not np.issubdtype(left_dtype, np.unsignedinteger)
|
1335 |
+
and 0 < right <= np.iinfo(right_dtype).max
|
1336 |
+
):
|
1337 |
+
# If left dtype isn't unsigned, check if it fits in the signed dtype
|
1338 |
+
right = np.dtype(f"i{right_dtype.itemsize}")
|
1339 |
+
else:
|
1340 |
+
right = right_dtype
|
1341 |
+
|
1342 |
+
new_dtype = np.result_type(left_dtype, right)
|
1343 |
+
|
1344 |
+
elif is_valid_na_for_dtype(right, left_dtype):
|
1345 |
+
# e.g. IntervalDtype[int] and None/np.nan
|
1346 |
+
new_dtype = ensure_dtype_can_hold_na(left_dtype)
|
1347 |
+
|
1348 |
+
else:
|
1349 |
+
dtype, _ = infer_dtype_from(right)
|
1350 |
+
new_dtype = find_common_type([left_dtype, dtype])
|
1351 |
+
|
1352 |
+
return new_dtype
|
1353 |
+
|
1354 |
+
|
1355 |
+
def common_dtype_categorical_compat(
|
1356 |
+
objs: Sequence[Index | ArrayLike], dtype: DtypeObj
|
1357 |
+
) -> DtypeObj:
|
1358 |
+
"""
|
1359 |
+
Update the result of find_common_type to account for NAs in a Categorical.
|
1360 |
+
|
1361 |
+
Parameters
|
1362 |
+
----------
|
1363 |
+
objs : list[np.ndarray | ExtensionArray | Index]
|
1364 |
+
dtype : np.dtype or ExtensionDtype
|
1365 |
+
|
1366 |
+
Returns
|
1367 |
+
-------
|
1368 |
+
np.dtype or ExtensionDtype
|
1369 |
+
"""
|
1370 |
+
# GH#38240
|
1371 |
+
|
1372 |
+
# TODO: more generally, could do `not can_hold_na(dtype)`
|
1373 |
+
if lib.is_np_dtype(dtype, "iu"):
|
1374 |
+
for obj in objs:
|
1375 |
+
# We don't want to accientally allow e.g. "categorical" str here
|
1376 |
+
obj_dtype = getattr(obj, "dtype", None)
|
1377 |
+
if isinstance(obj_dtype, CategoricalDtype):
|
1378 |
+
if isinstance(obj, ABCIndex):
|
1379 |
+
# This check may already be cached
|
1380 |
+
hasnas = obj.hasnans
|
1381 |
+
else:
|
1382 |
+
# Categorical
|
1383 |
+
hasnas = cast("Categorical", obj)._hasna
|
1384 |
+
|
1385 |
+
if hasnas:
|
1386 |
+
# see test_union_int_categorical_with_nan
|
1387 |
+
dtype = np.dtype(np.float64)
|
1388 |
+
break
|
1389 |
+
return dtype
|
1390 |
+
|
1391 |
+
|
1392 |
+
def np_find_common_type(*dtypes: np.dtype) -> np.dtype:
|
1393 |
+
"""
|
1394 |
+
np.find_common_type implementation pre-1.25 deprecation using np.result_type
|
1395 |
+
https://github.com/pandas-dev/pandas/pull/49569#issuecomment-1308300065
|
1396 |
+
|
1397 |
+
Parameters
|
1398 |
+
----------
|
1399 |
+
dtypes : np.dtypes
|
1400 |
+
|
1401 |
+
Returns
|
1402 |
+
-------
|
1403 |
+
np.dtype
|
1404 |
+
"""
|
1405 |
+
try:
|
1406 |
+
common_dtype = np.result_type(*dtypes)
|
1407 |
+
if common_dtype.kind in "mMSU":
|
1408 |
+
# NumPy promotion currently (1.25) misbehaves for for times and strings,
|
1409 |
+
# so fall back to object (find_common_dtype did unless there
|
1410 |
+
# was only one dtype)
|
1411 |
+
common_dtype = np.dtype("O")
|
1412 |
+
|
1413 |
+
except TypeError:
|
1414 |
+
common_dtype = np.dtype("O")
|
1415 |
+
return common_dtype
|
1416 |
+
|
1417 |
+
|
1418 |
+
@overload
|
1419 |
+
def find_common_type(types: list[np.dtype]) -> np.dtype:
|
1420 |
+
...
|
1421 |
+
|
1422 |
+
|
1423 |
+
@overload
|
1424 |
+
def find_common_type(types: list[ExtensionDtype]) -> DtypeObj:
|
1425 |
+
...
|
1426 |
+
|
1427 |
+
|
1428 |
+
@overload
|
1429 |
+
def find_common_type(types: list[DtypeObj]) -> DtypeObj:
|
1430 |
+
...
|
1431 |
+
|
1432 |
+
|
1433 |
+
def find_common_type(types):
|
1434 |
+
"""
|
1435 |
+
Find a common data type among the given dtypes.
|
1436 |
+
|
1437 |
+
Parameters
|
1438 |
+
----------
|
1439 |
+
types : list of dtypes
|
1440 |
+
|
1441 |
+
Returns
|
1442 |
+
-------
|
1443 |
+
pandas extension or numpy dtype
|
1444 |
+
|
1445 |
+
See Also
|
1446 |
+
--------
|
1447 |
+
numpy.find_common_type
|
1448 |
+
|
1449 |
+
"""
|
1450 |
+
if not types:
|
1451 |
+
raise ValueError("no types given")
|
1452 |
+
|
1453 |
+
first = types[0]
|
1454 |
+
|
1455 |
+
# workaround for find_common_type([np.dtype('datetime64[ns]')] * 2)
|
1456 |
+
# => object
|
1457 |
+
if lib.dtypes_all_equal(list(types)):
|
1458 |
+
return first
|
1459 |
+
|
1460 |
+
# get unique types (dict.fromkeys is used as order-preserving set())
|
1461 |
+
types = list(dict.fromkeys(types).keys())
|
1462 |
+
|
1463 |
+
if any(isinstance(t, ExtensionDtype) for t in types):
|
1464 |
+
for t in types:
|
1465 |
+
if isinstance(t, ExtensionDtype):
|
1466 |
+
res = t._get_common_dtype(types)
|
1467 |
+
if res is not None:
|
1468 |
+
return res
|
1469 |
+
return np.dtype("object")
|
1470 |
+
|
1471 |
+
# take lowest unit
|
1472 |
+
if all(lib.is_np_dtype(t, "M") for t in types):
|
1473 |
+
return np.dtype(max(types))
|
1474 |
+
if all(lib.is_np_dtype(t, "m") for t in types):
|
1475 |
+
return np.dtype(max(types))
|
1476 |
+
|
1477 |
+
# don't mix bool / int or float or complex
|
1478 |
+
# this is different from numpy, which casts bool with float/int as int
|
1479 |
+
has_bools = any(t.kind == "b" for t in types)
|
1480 |
+
if has_bools:
|
1481 |
+
for t in types:
|
1482 |
+
if t.kind in "iufc":
|
1483 |
+
return np.dtype("object")
|
1484 |
+
|
1485 |
+
return np_find_common_type(*types)
|
1486 |
+
|
1487 |
+
|
1488 |
+
def construct_2d_arraylike_from_scalar(
|
1489 |
+
value: Scalar, length: int, width: int, dtype: np.dtype, copy: bool
|
1490 |
+
) -> np.ndarray:
|
1491 |
+
shape = (length, width)
|
1492 |
+
|
1493 |
+
if dtype.kind in "mM":
|
1494 |
+
value = _maybe_box_and_unbox_datetimelike(value, dtype)
|
1495 |
+
elif dtype == _dtype_obj:
|
1496 |
+
if isinstance(value, (np.timedelta64, np.datetime64)):
|
1497 |
+
# calling np.array below would cast to pytimedelta/pydatetime
|
1498 |
+
out = np.empty(shape, dtype=object)
|
1499 |
+
out.fill(value)
|
1500 |
+
return out
|
1501 |
+
|
1502 |
+
# Attempt to coerce to a numpy array
|
1503 |
+
try:
|
1504 |
+
if not copy:
|
1505 |
+
arr = np.asarray(value, dtype=dtype)
|
1506 |
+
else:
|
1507 |
+
arr = np.array(value, dtype=dtype, copy=copy)
|
1508 |
+
except (ValueError, TypeError) as err:
|
1509 |
+
raise TypeError(
|
1510 |
+
f"DataFrame constructor called with incompatible data and dtype: {err}"
|
1511 |
+
) from err
|
1512 |
+
|
1513 |
+
if arr.ndim != 0:
|
1514 |
+
raise ValueError("DataFrame constructor not properly called!")
|
1515 |
+
|
1516 |
+
return np.full(shape, arr)
|
1517 |
+
|
1518 |
+
|
1519 |
+
def construct_1d_arraylike_from_scalar(
|
1520 |
+
value: Scalar, length: int, dtype: DtypeObj | None
|
1521 |
+
) -> ArrayLike:
|
1522 |
+
"""
|
1523 |
+
create a np.ndarray / pandas type of specified shape and dtype
|
1524 |
+
filled with values
|
1525 |
+
|
1526 |
+
Parameters
|
1527 |
+
----------
|
1528 |
+
value : scalar value
|
1529 |
+
length : int
|
1530 |
+
dtype : pandas_dtype or np.dtype
|
1531 |
+
|
1532 |
+
Returns
|
1533 |
+
-------
|
1534 |
+
np.ndarray / pandas type of length, filled with value
|
1535 |
+
|
1536 |
+
"""
|
1537 |
+
|
1538 |
+
if dtype is None:
|
1539 |
+
try:
|
1540 |
+
dtype, value = infer_dtype_from_scalar(value)
|
1541 |
+
except OutOfBoundsDatetime:
|
1542 |
+
dtype = _dtype_obj
|
1543 |
+
|
1544 |
+
if isinstance(dtype, ExtensionDtype):
|
1545 |
+
cls = dtype.construct_array_type()
|
1546 |
+
seq = [] if length == 0 else [value]
|
1547 |
+
subarr = cls._from_sequence(seq, dtype=dtype).repeat(length)
|
1548 |
+
|
1549 |
+
else:
|
1550 |
+
if length and dtype.kind in "iu" and isna(value):
|
1551 |
+
# coerce if we have nan for an integer dtype
|
1552 |
+
dtype = np.dtype("float64")
|
1553 |
+
elif lib.is_np_dtype(dtype, "US"):
|
1554 |
+
# we need to coerce to object dtype to avoid
|
1555 |
+
# to allow numpy to take our string as a scalar value
|
1556 |
+
dtype = np.dtype("object")
|
1557 |
+
if not isna(value):
|
1558 |
+
value = ensure_str(value)
|
1559 |
+
elif dtype.kind in "mM":
|
1560 |
+
value = _maybe_box_and_unbox_datetimelike(value, dtype)
|
1561 |
+
|
1562 |
+
subarr = np.empty(length, dtype=dtype)
|
1563 |
+
if length:
|
1564 |
+
# GH 47391: numpy > 1.24 will raise filling np.nan into int dtypes
|
1565 |
+
subarr.fill(value)
|
1566 |
+
|
1567 |
+
return subarr
|
1568 |
+
|
1569 |
+
|
1570 |
+
def _maybe_box_and_unbox_datetimelike(value: Scalar, dtype: DtypeObj):
|
1571 |
+
# Caller is responsible for checking dtype.kind in "mM"
|
1572 |
+
|
1573 |
+
if isinstance(value, dt.datetime):
|
1574 |
+
# we dont want to box dt64, in particular datetime64("NaT")
|
1575 |
+
value = maybe_box_datetimelike(value, dtype)
|
1576 |
+
|
1577 |
+
return _maybe_unbox_datetimelike(value, dtype)
|
1578 |
+
|
1579 |
+
|
1580 |
+
def construct_1d_object_array_from_listlike(values: Sized) -> np.ndarray:
|
1581 |
+
"""
|
1582 |
+
Transform any list-like object in a 1-dimensional numpy array of object
|
1583 |
+
dtype.
|
1584 |
+
|
1585 |
+
Parameters
|
1586 |
+
----------
|
1587 |
+
values : any iterable which has a len()
|
1588 |
+
|
1589 |
+
Raises
|
1590 |
+
------
|
1591 |
+
TypeError
|
1592 |
+
* If `values` does not have a len()
|
1593 |
+
|
1594 |
+
Returns
|
1595 |
+
-------
|
1596 |
+
1-dimensional numpy array of dtype object
|
1597 |
+
"""
|
1598 |
+
# numpy will try to interpret nested lists as further dimensions, hence
|
1599 |
+
# making a 1D array that contains list-likes is a bit tricky:
|
1600 |
+
result = np.empty(len(values), dtype="object")
|
1601 |
+
result[:] = values
|
1602 |
+
return result
|
1603 |
+
|
1604 |
+
|
1605 |
+
def maybe_cast_to_integer_array(arr: list | np.ndarray, dtype: np.dtype) -> np.ndarray:
|
1606 |
+
"""
|
1607 |
+
Takes any dtype and returns the casted version, raising for when data is
|
1608 |
+
incompatible with integer/unsigned integer dtypes.
|
1609 |
+
|
1610 |
+
Parameters
|
1611 |
+
----------
|
1612 |
+
arr : np.ndarray or list
|
1613 |
+
The array to cast.
|
1614 |
+
dtype : np.dtype
|
1615 |
+
The integer dtype to cast the array to.
|
1616 |
+
|
1617 |
+
Returns
|
1618 |
+
-------
|
1619 |
+
ndarray
|
1620 |
+
Array of integer or unsigned integer dtype.
|
1621 |
+
|
1622 |
+
Raises
|
1623 |
+
------
|
1624 |
+
OverflowError : the dtype is incompatible with the data
|
1625 |
+
ValueError : loss of precision has occurred during casting
|
1626 |
+
|
1627 |
+
Examples
|
1628 |
+
--------
|
1629 |
+
If you try to coerce negative values to unsigned integers, it raises:
|
1630 |
+
|
1631 |
+
>>> pd.Series([-1], dtype="uint64")
|
1632 |
+
Traceback (most recent call last):
|
1633 |
+
...
|
1634 |
+
OverflowError: Trying to coerce negative values to unsigned integers
|
1635 |
+
|
1636 |
+
Also, if you try to coerce float values to integers, it raises:
|
1637 |
+
|
1638 |
+
>>> maybe_cast_to_integer_array([1, 2, 3.5], dtype=np.dtype("int64"))
|
1639 |
+
Traceback (most recent call last):
|
1640 |
+
...
|
1641 |
+
ValueError: Trying to coerce float values to integers
|
1642 |
+
"""
|
1643 |
+
assert dtype.kind in "iu"
|
1644 |
+
|
1645 |
+
try:
|
1646 |
+
if not isinstance(arr, np.ndarray):
|
1647 |
+
with warnings.catch_warnings():
|
1648 |
+
# We already disallow dtype=uint w/ negative numbers
|
1649 |
+
# (test_constructor_coercion_signed_to_unsigned) so safe to ignore.
|
1650 |
+
if not np_version_gt2:
|
1651 |
+
warnings.filterwarnings(
|
1652 |
+
"ignore",
|
1653 |
+
"NumPy will stop allowing conversion of "
|
1654 |
+
"out-of-bound Python int",
|
1655 |
+
DeprecationWarning,
|
1656 |
+
)
|
1657 |
+
casted = np.asarray(arr, dtype=dtype)
|
1658 |
+
else:
|
1659 |
+
with warnings.catch_warnings():
|
1660 |
+
warnings.filterwarnings("ignore", category=RuntimeWarning)
|
1661 |
+
casted = arr.astype(dtype, copy=False)
|
1662 |
+
except OverflowError as err:
|
1663 |
+
raise OverflowError(
|
1664 |
+
"The elements provided in the data cannot all be "
|
1665 |
+
f"casted to the dtype {dtype}"
|
1666 |
+
) from err
|
1667 |
+
|
1668 |
+
if isinstance(arr, np.ndarray) and arr.dtype == dtype:
|
1669 |
+
# avoid expensive array_equal check
|
1670 |
+
return casted
|
1671 |
+
|
1672 |
+
with warnings.catch_warnings():
|
1673 |
+
warnings.filterwarnings("ignore", category=RuntimeWarning)
|
1674 |
+
warnings.filterwarnings(
|
1675 |
+
"ignore", "elementwise comparison failed", FutureWarning
|
1676 |
+
)
|
1677 |
+
if np.array_equal(arr, casted):
|
1678 |
+
return casted
|
1679 |
+
|
1680 |
+
# We do this casting to allow for proper
|
1681 |
+
# data and dtype checking.
|
1682 |
+
#
|
1683 |
+
# We didn't do this earlier because NumPy
|
1684 |
+
# doesn't handle `uint64` correctly.
|
1685 |
+
arr = np.asarray(arr)
|
1686 |
+
|
1687 |
+
if np.issubdtype(arr.dtype, str):
|
1688 |
+
# TODO(numpy-2.0 min): This case will raise an OverflowError above
|
1689 |
+
if (casted.astype(str) == arr).all():
|
1690 |
+
return casted
|
1691 |
+
raise ValueError(f"string values cannot be losslessly cast to {dtype}")
|
1692 |
+
|
1693 |
+
if dtype.kind == "u" and (arr < 0).any():
|
1694 |
+
# TODO: can this be hit anymore after numpy 2.0?
|
1695 |
+
raise OverflowError("Trying to coerce negative values to unsigned integers")
|
1696 |
+
|
1697 |
+
if arr.dtype.kind == "f":
|
1698 |
+
if not np.isfinite(arr).all():
|
1699 |
+
raise IntCastingNaNError(
|
1700 |
+
"Cannot convert non-finite values (NA or inf) to integer"
|
1701 |
+
)
|
1702 |
+
raise ValueError("Trying to coerce float values to integers")
|
1703 |
+
if arr.dtype == object:
|
1704 |
+
raise ValueError("Trying to coerce float values to integers")
|
1705 |
+
|
1706 |
+
if casted.dtype < arr.dtype:
|
1707 |
+
# TODO: Can this path be hit anymore with numpy > 2
|
1708 |
+
# GH#41734 e.g. [1, 200, 923442] and dtype="int8" -> overflows
|
1709 |
+
raise ValueError(
|
1710 |
+
f"Values are too large to be losslessly converted to {dtype}. "
|
1711 |
+
f"To cast anyway, use pd.Series(values).astype({dtype})"
|
1712 |
+
)
|
1713 |
+
|
1714 |
+
if arr.dtype.kind in "mM":
|
1715 |
+
# test_constructor_maskedarray_nonfloat
|
1716 |
+
raise TypeError(
|
1717 |
+
f"Constructing a Series or DataFrame from {arr.dtype} values and "
|
1718 |
+
f"dtype={dtype} is not supported. Use values.view({dtype}) instead."
|
1719 |
+
)
|
1720 |
+
|
1721 |
+
# No known cases that get here, but raising explicitly to cover our bases.
|
1722 |
+
raise ValueError(f"values cannot be losslessly cast to {dtype}")
|
1723 |
+
|
1724 |
+
|
1725 |
+
def can_hold_element(arr: ArrayLike, element: Any) -> bool:
|
1726 |
+
"""
|
1727 |
+
Can we do an inplace setitem with this element in an array with this dtype?
|
1728 |
+
|
1729 |
+
Parameters
|
1730 |
+
----------
|
1731 |
+
arr : np.ndarray or ExtensionArray
|
1732 |
+
element : Any
|
1733 |
+
|
1734 |
+
Returns
|
1735 |
+
-------
|
1736 |
+
bool
|
1737 |
+
"""
|
1738 |
+
dtype = arr.dtype
|
1739 |
+
if not isinstance(dtype, np.dtype) or dtype.kind in "mM":
|
1740 |
+
if isinstance(dtype, (PeriodDtype, IntervalDtype, DatetimeTZDtype, np.dtype)):
|
1741 |
+
# np.dtype here catches datetime64ns and timedelta64ns; we assume
|
1742 |
+
# in this case that we have DatetimeArray/TimedeltaArray
|
1743 |
+
arr = cast(
|
1744 |
+
"PeriodArray | DatetimeArray | TimedeltaArray | IntervalArray", arr
|
1745 |
+
)
|
1746 |
+
try:
|
1747 |
+
arr._validate_setitem_value(element)
|
1748 |
+
return True
|
1749 |
+
except (ValueError, TypeError):
|
1750 |
+
return False
|
1751 |
+
|
1752 |
+
# This is technically incorrect, but maintains the behavior of
|
1753 |
+
# ExtensionBlock._can_hold_element
|
1754 |
+
return True
|
1755 |
+
|
1756 |
+
try:
|
1757 |
+
np_can_hold_element(dtype, element)
|
1758 |
+
return True
|
1759 |
+
except (TypeError, LossySetitemError):
|
1760 |
+
return False
|
1761 |
+
|
1762 |
+
|
1763 |
+
def np_can_hold_element(dtype: np.dtype, element: Any) -> Any:
|
1764 |
+
"""
|
1765 |
+
Raise if we cannot losslessly set this element into an ndarray with this dtype.
|
1766 |
+
|
1767 |
+
Specifically about places where we disagree with numpy. i.e. there are
|
1768 |
+
cases where numpy will raise in doing the setitem that we do not check
|
1769 |
+
for here, e.g. setting str "X" into a numeric ndarray.
|
1770 |
+
|
1771 |
+
Returns
|
1772 |
+
-------
|
1773 |
+
Any
|
1774 |
+
The element, potentially cast to the dtype.
|
1775 |
+
|
1776 |
+
Raises
|
1777 |
+
------
|
1778 |
+
ValueError : If we cannot losslessly store this element with this dtype.
|
1779 |
+
"""
|
1780 |
+
if dtype == _dtype_obj:
|
1781 |
+
return element
|
1782 |
+
|
1783 |
+
tipo = _maybe_infer_dtype_type(element)
|
1784 |
+
|
1785 |
+
if dtype.kind in "iu":
|
1786 |
+
if isinstance(element, range):
|
1787 |
+
if _dtype_can_hold_range(element, dtype):
|
1788 |
+
return element
|
1789 |
+
raise LossySetitemError
|
1790 |
+
|
1791 |
+
if is_integer(element) or (is_float(element) and element.is_integer()):
|
1792 |
+
# e.g. test_setitem_series_int8 if we have a python int 1
|
1793 |
+
# tipo may be np.int32, despite the fact that it will fit
|
1794 |
+
# in smaller int dtypes.
|
1795 |
+
info = np.iinfo(dtype)
|
1796 |
+
if info.min <= element <= info.max:
|
1797 |
+
return dtype.type(element)
|
1798 |
+
raise LossySetitemError
|
1799 |
+
|
1800 |
+
if tipo is not None:
|
1801 |
+
if tipo.kind not in "iu":
|
1802 |
+
if isinstance(element, np.ndarray) and element.dtype.kind == "f":
|
1803 |
+
# If all can be losslessly cast to integers, then we can hold them
|
1804 |
+
with np.errstate(invalid="ignore"):
|
1805 |
+
# We check afterwards if cast was losslessly, so no need to show
|
1806 |
+
# the warning
|
1807 |
+
casted = element.astype(dtype)
|
1808 |
+
comp = casted == element
|
1809 |
+
if comp.all():
|
1810 |
+
# Return the casted values bc they can be passed to
|
1811 |
+
# np.putmask, whereas the raw values cannot.
|
1812 |
+
# see TestSetitemFloatNDarrayIntoIntegerSeries
|
1813 |
+
return casted
|
1814 |
+
raise LossySetitemError
|
1815 |
+
|
1816 |
+
elif isinstance(element, ABCExtensionArray) and isinstance(
|
1817 |
+
element.dtype, CategoricalDtype
|
1818 |
+
):
|
1819 |
+
# GH#52927 setting Categorical value into non-EA frame
|
1820 |
+
# TODO: general-case for EAs?
|
1821 |
+
try:
|
1822 |
+
casted = element.astype(dtype)
|
1823 |
+
except (ValueError, TypeError):
|
1824 |
+
raise LossySetitemError
|
1825 |
+
# Check for cases of either
|
1826 |
+
# a) lossy overflow/rounding or
|
1827 |
+
# b) semantic changes like dt64->int64
|
1828 |
+
comp = casted == element
|
1829 |
+
if not comp.all():
|
1830 |
+
raise LossySetitemError
|
1831 |
+
return casted
|
1832 |
+
|
1833 |
+
# Anything other than integer we cannot hold
|
1834 |
+
raise LossySetitemError
|
1835 |
+
if (
|
1836 |
+
dtype.kind == "u"
|
1837 |
+
and isinstance(element, np.ndarray)
|
1838 |
+
and element.dtype.kind == "i"
|
1839 |
+
):
|
1840 |
+
# see test_where_uint64
|
1841 |
+
casted = element.astype(dtype)
|
1842 |
+
if (casted == element).all():
|
1843 |
+
# TODO: faster to check (element >=0).all()? potential
|
1844 |
+
# itemsize issues there?
|
1845 |
+
return casted
|
1846 |
+
raise LossySetitemError
|
1847 |
+
if dtype.itemsize < tipo.itemsize:
|
1848 |
+
raise LossySetitemError
|
1849 |
+
if not isinstance(tipo, np.dtype):
|
1850 |
+
# i.e. nullable IntegerDtype; we can put this into an ndarray
|
1851 |
+
# losslessly iff it has no NAs
|
1852 |
+
arr = element._values if isinstance(element, ABCSeries) else element
|
1853 |
+
if arr._hasna:
|
1854 |
+
raise LossySetitemError
|
1855 |
+
return element
|
1856 |
+
|
1857 |
+
return element
|
1858 |
+
|
1859 |
+
raise LossySetitemError
|
1860 |
+
|
1861 |
+
if dtype.kind == "f":
|
1862 |
+
if lib.is_integer(element) or lib.is_float(element):
|
1863 |
+
casted = dtype.type(element)
|
1864 |
+
if np.isnan(casted) or casted == element:
|
1865 |
+
return casted
|
1866 |
+
# otherwise e.g. overflow see TestCoercionFloat32
|
1867 |
+
raise LossySetitemError
|
1868 |
+
|
1869 |
+
if tipo is not None:
|
1870 |
+
# TODO: itemsize check?
|
1871 |
+
if tipo.kind not in "iuf":
|
1872 |
+
# Anything other than float/integer we cannot hold
|
1873 |
+
raise LossySetitemError
|
1874 |
+
if not isinstance(tipo, np.dtype):
|
1875 |
+
# i.e. nullable IntegerDtype or FloatingDtype;
|
1876 |
+
# we can put this into an ndarray losslessly iff it has no NAs
|
1877 |
+
if element._hasna:
|
1878 |
+
raise LossySetitemError
|
1879 |
+
return element
|
1880 |
+
elif tipo.itemsize > dtype.itemsize or tipo.kind != dtype.kind:
|
1881 |
+
if isinstance(element, np.ndarray):
|
1882 |
+
# e.g. TestDataFrameIndexingWhere::test_where_alignment
|
1883 |
+
casted = element.astype(dtype)
|
1884 |
+
if np.array_equal(casted, element, equal_nan=True):
|
1885 |
+
return casted
|
1886 |
+
raise LossySetitemError
|
1887 |
+
|
1888 |
+
return element
|
1889 |
+
|
1890 |
+
raise LossySetitemError
|
1891 |
+
|
1892 |
+
if dtype.kind == "c":
|
1893 |
+
if lib.is_integer(element) or lib.is_complex(element) or lib.is_float(element):
|
1894 |
+
if np.isnan(element):
|
1895 |
+
# see test_where_complex GH#6345
|
1896 |
+
return dtype.type(element)
|
1897 |
+
|
1898 |
+
with warnings.catch_warnings():
|
1899 |
+
warnings.filterwarnings("ignore")
|
1900 |
+
casted = dtype.type(element)
|
1901 |
+
if casted == element:
|
1902 |
+
return casted
|
1903 |
+
# otherwise e.g. overflow see test_32878_complex_itemsize
|
1904 |
+
raise LossySetitemError
|
1905 |
+
|
1906 |
+
if tipo is not None:
|
1907 |
+
if tipo.kind in "iufc":
|
1908 |
+
return element
|
1909 |
+
raise LossySetitemError
|
1910 |
+
raise LossySetitemError
|
1911 |
+
|
1912 |
+
if dtype.kind == "b":
|
1913 |
+
if tipo is not None:
|
1914 |
+
if tipo.kind == "b":
|
1915 |
+
if not isinstance(tipo, np.dtype):
|
1916 |
+
# i.e. we have a BooleanArray
|
1917 |
+
if element._hasna:
|
1918 |
+
# i.e. there are pd.NA elements
|
1919 |
+
raise LossySetitemError
|
1920 |
+
return element
|
1921 |
+
raise LossySetitemError
|
1922 |
+
if lib.is_bool(element):
|
1923 |
+
return element
|
1924 |
+
raise LossySetitemError
|
1925 |
+
|
1926 |
+
if dtype.kind == "S":
|
1927 |
+
# TODO: test tests.frame.methods.test_replace tests get here,
|
1928 |
+
# need more targeted tests. xref phofl has a PR about this
|
1929 |
+
if tipo is not None:
|
1930 |
+
if tipo.kind == "S" and tipo.itemsize <= dtype.itemsize:
|
1931 |
+
return element
|
1932 |
+
raise LossySetitemError
|
1933 |
+
if isinstance(element, bytes) and len(element) <= dtype.itemsize:
|
1934 |
+
return element
|
1935 |
+
raise LossySetitemError
|
1936 |
+
|
1937 |
+
if dtype.kind == "V":
|
1938 |
+
# i.e. np.void, which cannot hold _anything_
|
1939 |
+
raise LossySetitemError
|
1940 |
+
|
1941 |
+
raise NotImplementedError(dtype)
|
1942 |
+
|
1943 |
+
|
1944 |
+
def _dtype_can_hold_range(rng: range, dtype: np.dtype) -> bool:
|
1945 |
+
"""
|
1946 |
+
_maybe_infer_dtype_type infers to int64 (and float64 for very large endpoints),
|
1947 |
+
but in many cases a range can be held by a smaller integer dtype.
|
1948 |
+
Check if this is one of those cases.
|
1949 |
+
"""
|
1950 |
+
if not len(rng):
|
1951 |
+
return True
|
1952 |
+
return np_can_cast_scalar(rng.start, dtype) and np_can_cast_scalar(rng.stop, dtype)
|
1953 |
+
|
1954 |
+
|
1955 |
+
def np_can_cast_scalar(element: Scalar, dtype: np.dtype) -> bool:
|
1956 |
+
"""
|
1957 |
+
np.can_cast pandas-equivalent for pre 2-0 behavior that allowed scalar
|
1958 |
+
inference
|
1959 |
+
|
1960 |
+
Parameters
|
1961 |
+
----------
|
1962 |
+
element : Scalar
|
1963 |
+
dtype : np.dtype
|
1964 |
+
|
1965 |
+
Returns
|
1966 |
+
-------
|
1967 |
+
bool
|
1968 |
+
"""
|
1969 |
+
try:
|
1970 |
+
np_can_hold_element(dtype, element)
|
1971 |
+
return True
|
1972 |
+
except (LossySetitemError, NotImplementedError):
|
1973 |
+
return False
|
llmeval-env/lib/python3.10/site-packages/pandas/core/dtypes/common.py
ADDED
@@ -0,0 +1,1748 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
"""
|
2 |
+
Common type operations.
|
3 |
+
"""
|
4 |
+
from __future__ import annotations
|
5 |
+
|
6 |
+
from typing import (
|
7 |
+
TYPE_CHECKING,
|
8 |
+
Any,
|
9 |
+
Callable,
|
10 |
+
)
|
11 |
+
import warnings
|
12 |
+
|
13 |
+
import numpy as np
|
14 |
+
|
15 |
+
from pandas._libs import (
|
16 |
+
Interval,
|
17 |
+
Period,
|
18 |
+
algos,
|
19 |
+
lib,
|
20 |
+
)
|
21 |
+
from pandas._libs.tslibs import conversion
|
22 |
+
from pandas.util._exceptions import find_stack_level
|
23 |
+
|
24 |
+
from pandas.core.dtypes.base import _registry as registry
|
25 |
+
from pandas.core.dtypes.dtypes import (
|
26 |
+
CategoricalDtype,
|
27 |
+
DatetimeTZDtype,
|
28 |
+
ExtensionDtype,
|
29 |
+
IntervalDtype,
|
30 |
+
PeriodDtype,
|
31 |
+
SparseDtype,
|
32 |
+
)
|
33 |
+
from pandas.core.dtypes.generic import ABCIndex
|
34 |
+
from pandas.core.dtypes.inference import (
|
35 |
+
is_array_like,
|
36 |
+
is_bool,
|
37 |
+
is_complex,
|
38 |
+
is_dataclass,
|
39 |
+
is_decimal,
|
40 |
+
is_dict_like,
|
41 |
+
is_file_like,
|
42 |
+
is_float,
|
43 |
+
is_hashable,
|
44 |
+
is_integer,
|
45 |
+
is_interval,
|
46 |
+
is_iterator,
|
47 |
+
is_list_like,
|
48 |
+
is_named_tuple,
|
49 |
+
is_nested_list_like,
|
50 |
+
is_number,
|
51 |
+
is_re,
|
52 |
+
is_re_compilable,
|
53 |
+
is_scalar,
|
54 |
+
is_sequence,
|
55 |
+
)
|
56 |
+
|
57 |
+
if TYPE_CHECKING:
|
58 |
+
from pandas._typing import (
|
59 |
+
ArrayLike,
|
60 |
+
DtypeObj,
|
61 |
+
)
|
62 |
+
|
63 |
+
DT64NS_DTYPE = conversion.DT64NS_DTYPE
|
64 |
+
TD64NS_DTYPE = conversion.TD64NS_DTYPE
|
65 |
+
INT64_DTYPE = np.dtype(np.int64)
|
66 |
+
|
67 |
+
# oh the troubles to reduce import time
|
68 |
+
_is_scipy_sparse = None
|
69 |
+
|
70 |
+
ensure_float64 = algos.ensure_float64
|
71 |
+
ensure_int64 = algos.ensure_int64
|
72 |
+
ensure_int32 = algos.ensure_int32
|
73 |
+
ensure_int16 = algos.ensure_int16
|
74 |
+
ensure_int8 = algos.ensure_int8
|
75 |
+
ensure_platform_int = algos.ensure_platform_int
|
76 |
+
ensure_object = algos.ensure_object
|
77 |
+
ensure_uint64 = algos.ensure_uint64
|
78 |
+
|
79 |
+
|
80 |
+
def ensure_str(value: bytes | Any) -> str:
|
81 |
+
"""
|
82 |
+
Ensure that bytes and non-strings get converted into ``str`` objects.
|
83 |
+
"""
|
84 |
+
if isinstance(value, bytes):
|
85 |
+
value = value.decode("utf-8")
|
86 |
+
elif not isinstance(value, str):
|
87 |
+
value = str(value)
|
88 |
+
return value
|
89 |
+
|
90 |
+
|
91 |
+
def ensure_python_int(value: int | np.integer) -> int:
|
92 |
+
"""
|
93 |
+
Ensure that a value is a python int.
|
94 |
+
|
95 |
+
Parameters
|
96 |
+
----------
|
97 |
+
value: int or numpy.integer
|
98 |
+
|
99 |
+
Returns
|
100 |
+
-------
|
101 |
+
int
|
102 |
+
|
103 |
+
Raises
|
104 |
+
------
|
105 |
+
TypeError: if the value isn't an int or can't be converted to one.
|
106 |
+
"""
|
107 |
+
if not (is_integer(value) or is_float(value)):
|
108 |
+
if not is_scalar(value):
|
109 |
+
raise TypeError(
|
110 |
+
f"Value needs to be a scalar value, was type {type(value).__name__}"
|
111 |
+
)
|
112 |
+
raise TypeError(f"Wrong type {type(value)} for value {value}")
|
113 |
+
try:
|
114 |
+
new_value = int(value)
|
115 |
+
assert new_value == value
|
116 |
+
except (TypeError, ValueError, AssertionError) as err:
|
117 |
+
raise TypeError(f"Wrong type {type(value)} for value {value}") from err
|
118 |
+
return new_value
|
119 |
+
|
120 |
+
|
121 |
+
def classes(*klasses) -> Callable:
|
122 |
+
"""Evaluate if the tipo is a subclass of the klasses."""
|
123 |
+
return lambda tipo: issubclass(tipo, klasses)
|
124 |
+
|
125 |
+
|
126 |
+
def _classes_and_not_datetimelike(*klasses) -> Callable:
|
127 |
+
"""
|
128 |
+
Evaluate if the tipo is a subclass of the klasses
|
129 |
+
and not a datetimelike.
|
130 |
+
"""
|
131 |
+
return lambda tipo: (
|
132 |
+
issubclass(tipo, klasses)
|
133 |
+
and not issubclass(tipo, (np.datetime64, np.timedelta64))
|
134 |
+
)
|
135 |
+
|
136 |
+
|
137 |
+
def is_object_dtype(arr_or_dtype) -> bool:
|
138 |
+
"""
|
139 |
+
Check whether an array-like or dtype is of the object dtype.
|
140 |
+
|
141 |
+
Parameters
|
142 |
+
----------
|
143 |
+
arr_or_dtype : array-like or dtype
|
144 |
+
The array-like or dtype to check.
|
145 |
+
|
146 |
+
Returns
|
147 |
+
-------
|
148 |
+
boolean
|
149 |
+
Whether or not the array-like or dtype is of the object dtype.
|
150 |
+
|
151 |
+
Examples
|
152 |
+
--------
|
153 |
+
>>> from pandas.api.types import is_object_dtype
|
154 |
+
>>> is_object_dtype(object)
|
155 |
+
True
|
156 |
+
>>> is_object_dtype(int)
|
157 |
+
False
|
158 |
+
>>> is_object_dtype(np.array([], dtype=object))
|
159 |
+
True
|
160 |
+
>>> is_object_dtype(np.array([], dtype=int))
|
161 |
+
False
|
162 |
+
>>> is_object_dtype([1, 2, 3])
|
163 |
+
False
|
164 |
+
"""
|
165 |
+
return _is_dtype_type(arr_or_dtype, classes(np.object_))
|
166 |
+
|
167 |
+
|
168 |
+
def is_sparse(arr) -> bool:
|
169 |
+
"""
|
170 |
+
Check whether an array-like is a 1-D pandas sparse array.
|
171 |
+
|
172 |
+
.. deprecated:: 2.1.0
|
173 |
+
Use isinstance(dtype, pd.SparseDtype) instead.
|
174 |
+
|
175 |
+
Check that the one-dimensional array-like is a pandas sparse array.
|
176 |
+
Returns True if it is a pandas sparse array, not another type of
|
177 |
+
sparse array.
|
178 |
+
|
179 |
+
Parameters
|
180 |
+
----------
|
181 |
+
arr : array-like
|
182 |
+
Array-like to check.
|
183 |
+
|
184 |
+
Returns
|
185 |
+
-------
|
186 |
+
bool
|
187 |
+
Whether or not the array-like is a pandas sparse array.
|
188 |
+
|
189 |
+
Examples
|
190 |
+
--------
|
191 |
+
Returns `True` if the parameter is a 1-D pandas sparse array.
|
192 |
+
|
193 |
+
>>> from pandas.api.types import is_sparse
|
194 |
+
>>> is_sparse(pd.arrays.SparseArray([0, 0, 1, 0]))
|
195 |
+
True
|
196 |
+
>>> is_sparse(pd.Series(pd.arrays.SparseArray([0, 0, 1, 0])))
|
197 |
+
True
|
198 |
+
|
199 |
+
Returns `False` if the parameter is not sparse.
|
200 |
+
|
201 |
+
>>> is_sparse(np.array([0, 0, 1, 0]))
|
202 |
+
False
|
203 |
+
>>> is_sparse(pd.Series([0, 1, 0, 0]))
|
204 |
+
False
|
205 |
+
|
206 |
+
Returns `False` if the parameter is not a pandas sparse array.
|
207 |
+
|
208 |
+
>>> from scipy.sparse import bsr_matrix
|
209 |
+
>>> is_sparse(bsr_matrix([0, 1, 0, 0]))
|
210 |
+
False
|
211 |
+
|
212 |
+
Returns `False` if the parameter has more than one dimension.
|
213 |
+
"""
|
214 |
+
warnings.warn(
|
215 |
+
"is_sparse is deprecated and will be removed in a future "
|
216 |
+
"version. Check `isinstance(dtype, pd.SparseDtype)` instead.",
|
217 |
+
DeprecationWarning,
|
218 |
+
stacklevel=2,
|
219 |
+
)
|
220 |
+
|
221 |
+
dtype = getattr(arr, "dtype", arr)
|
222 |
+
return isinstance(dtype, SparseDtype)
|
223 |
+
|
224 |
+
|
225 |
+
def is_scipy_sparse(arr) -> bool:
|
226 |
+
"""
|
227 |
+
Check whether an array-like is a scipy.sparse.spmatrix instance.
|
228 |
+
|
229 |
+
Parameters
|
230 |
+
----------
|
231 |
+
arr : array-like
|
232 |
+
The array-like to check.
|
233 |
+
|
234 |
+
Returns
|
235 |
+
-------
|
236 |
+
boolean
|
237 |
+
Whether or not the array-like is a scipy.sparse.spmatrix instance.
|
238 |
+
|
239 |
+
Notes
|
240 |
+
-----
|
241 |
+
If scipy is not installed, this function will always return False.
|
242 |
+
|
243 |
+
Examples
|
244 |
+
--------
|
245 |
+
>>> from scipy.sparse import bsr_matrix
|
246 |
+
>>> is_scipy_sparse(bsr_matrix([1, 2, 3]))
|
247 |
+
True
|
248 |
+
>>> is_scipy_sparse(pd.arrays.SparseArray([1, 2, 3]))
|
249 |
+
False
|
250 |
+
"""
|
251 |
+
global _is_scipy_sparse
|
252 |
+
|
253 |
+
if _is_scipy_sparse is None: # pylint: disable=used-before-assignment
|
254 |
+
try:
|
255 |
+
from scipy.sparse import issparse as _is_scipy_sparse
|
256 |
+
except ImportError:
|
257 |
+
_is_scipy_sparse = lambda _: False
|
258 |
+
|
259 |
+
assert _is_scipy_sparse is not None
|
260 |
+
return _is_scipy_sparse(arr)
|
261 |
+
|
262 |
+
|
263 |
+
def is_datetime64_dtype(arr_or_dtype) -> bool:
|
264 |
+
"""
|
265 |
+
Check whether an array-like or dtype is of the datetime64 dtype.
|
266 |
+
|
267 |
+
Parameters
|
268 |
+
----------
|
269 |
+
arr_or_dtype : array-like or dtype
|
270 |
+
The array-like or dtype to check.
|
271 |
+
|
272 |
+
Returns
|
273 |
+
-------
|
274 |
+
boolean
|
275 |
+
Whether or not the array-like or dtype is of the datetime64 dtype.
|
276 |
+
|
277 |
+
Examples
|
278 |
+
--------
|
279 |
+
>>> from pandas.api.types import is_datetime64_dtype
|
280 |
+
>>> is_datetime64_dtype(object)
|
281 |
+
False
|
282 |
+
>>> is_datetime64_dtype(np.datetime64)
|
283 |
+
True
|
284 |
+
>>> is_datetime64_dtype(np.array([], dtype=int))
|
285 |
+
False
|
286 |
+
>>> is_datetime64_dtype(np.array([], dtype=np.datetime64))
|
287 |
+
True
|
288 |
+
>>> is_datetime64_dtype([1, 2, 3])
|
289 |
+
False
|
290 |
+
"""
|
291 |
+
if isinstance(arr_or_dtype, np.dtype):
|
292 |
+
# GH#33400 fastpath for dtype object
|
293 |
+
return arr_or_dtype.kind == "M"
|
294 |
+
return _is_dtype_type(arr_or_dtype, classes(np.datetime64))
|
295 |
+
|
296 |
+
|
297 |
+
def is_datetime64tz_dtype(arr_or_dtype) -> bool:
|
298 |
+
"""
|
299 |
+
Check whether an array-like or dtype is of a DatetimeTZDtype dtype.
|
300 |
+
|
301 |
+
.. deprecated:: 2.1.0
|
302 |
+
Use isinstance(dtype, pd.DatetimeTZDtype) instead.
|
303 |
+
|
304 |
+
Parameters
|
305 |
+
----------
|
306 |
+
arr_or_dtype : array-like or dtype
|
307 |
+
The array-like or dtype to check.
|
308 |
+
|
309 |
+
Returns
|
310 |
+
-------
|
311 |
+
boolean
|
312 |
+
Whether or not the array-like or dtype is of a DatetimeTZDtype dtype.
|
313 |
+
|
314 |
+
Examples
|
315 |
+
--------
|
316 |
+
>>> from pandas.api.types import is_datetime64tz_dtype
|
317 |
+
>>> is_datetime64tz_dtype(object)
|
318 |
+
False
|
319 |
+
>>> is_datetime64tz_dtype([1, 2, 3])
|
320 |
+
False
|
321 |
+
>>> is_datetime64tz_dtype(pd.DatetimeIndex([1, 2, 3])) # tz-naive
|
322 |
+
False
|
323 |
+
>>> is_datetime64tz_dtype(pd.DatetimeIndex([1, 2, 3], tz="US/Eastern"))
|
324 |
+
True
|
325 |
+
|
326 |
+
>>> from pandas.core.dtypes.dtypes import DatetimeTZDtype
|
327 |
+
>>> dtype = DatetimeTZDtype("ns", tz="US/Eastern")
|
328 |
+
>>> s = pd.Series([], dtype=dtype)
|
329 |
+
>>> is_datetime64tz_dtype(dtype)
|
330 |
+
True
|
331 |
+
>>> is_datetime64tz_dtype(s)
|
332 |
+
True
|
333 |
+
"""
|
334 |
+
# GH#52607
|
335 |
+
warnings.warn(
|
336 |
+
"is_datetime64tz_dtype is deprecated and will be removed in a future "
|
337 |
+
"version. Check `isinstance(dtype, pd.DatetimeTZDtype)` instead.",
|
338 |
+
DeprecationWarning,
|
339 |
+
stacklevel=2,
|
340 |
+
)
|
341 |
+
if isinstance(arr_or_dtype, DatetimeTZDtype):
|
342 |
+
# GH#33400 fastpath for dtype object
|
343 |
+
# GH 34986
|
344 |
+
return True
|
345 |
+
|
346 |
+
if arr_or_dtype is None:
|
347 |
+
return False
|
348 |
+
return DatetimeTZDtype.is_dtype(arr_or_dtype)
|
349 |
+
|
350 |
+
|
351 |
+
def is_timedelta64_dtype(arr_or_dtype) -> bool:
|
352 |
+
"""
|
353 |
+
Check whether an array-like or dtype is of the timedelta64 dtype.
|
354 |
+
|
355 |
+
Parameters
|
356 |
+
----------
|
357 |
+
arr_or_dtype : array-like or dtype
|
358 |
+
The array-like or dtype to check.
|
359 |
+
|
360 |
+
Returns
|
361 |
+
-------
|
362 |
+
boolean
|
363 |
+
Whether or not the array-like or dtype is of the timedelta64 dtype.
|
364 |
+
|
365 |
+
Examples
|
366 |
+
--------
|
367 |
+
>>> from pandas.core.dtypes.common import is_timedelta64_dtype
|
368 |
+
>>> is_timedelta64_dtype(object)
|
369 |
+
False
|
370 |
+
>>> is_timedelta64_dtype(np.timedelta64)
|
371 |
+
True
|
372 |
+
>>> is_timedelta64_dtype([1, 2, 3])
|
373 |
+
False
|
374 |
+
>>> is_timedelta64_dtype(pd.Series([], dtype="timedelta64[ns]"))
|
375 |
+
True
|
376 |
+
>>> is_timedelta64_dtype('0 days')
|
377 |
+
False
|
378 |
+
"""
|
379 |
+
if isinstance(arr_or_dtype, np.dtype):
|
380 |
+
# GH#33400 fastpath for dtype object
|
381 |
+
return arr_or_dtype.kind == "m"
|
382 |
+
|
383 |
+
return _is_dtype_type(arr_or_dtype, classes(np.timedelta64))
|
384 |
+
|
385 |
+
|
386 |
+
def is_period_dtype(arr_or_dtype) -> bool:
|
387 |
+
"""
|
388 |
+
Check whether an array-like or dtype is of the Period dtype.
|
389 |
+
|
390 |
+
.. deprecated:: 2.2.0
|
391 |
+
Use isinstance(dtype, pd.Period) instead.
|
392 |
+
|
393 |
+
Parameters
|
394 |
+
----------
|
395 |
+
arr_or_dtype : array-like or dtype
|
396 |
+
The array-like or dtype to check.
|
397 |
+
|
398 |
+
Returns
|
399 |
+
-------
|
400 |
+
boolean
|
401 |
+
Whether or not the array-like or dtype is of the Period dtype.
|
402 |
+
|
403 |
+
Examples
|
404 |
+
--------
|
405 |
+
>>> from pandas.core.dtypes.common import is_period_dtype
|
406 |
+
>>> is_period_dtype(object)
|
407 |
+
False
|
408 |
+
>>> is_period_dtype(pd.PeriodDtype(freq="D"))
|
409 |
+
True
|
410 |
+
>>> is_period_dtype([1, 2, 3])
|
411 |
+
False
|
412 |
+
>>> is_period_dtype(pd.Period("2017-01-01"))
|
413 |
+
False
|
414 |
+
>>> is_period_dtype(pd.PeriodIndex([], freq="Y"))
|
415 |
+
True
|
416 |
+
"""
|
417 |
+
warnings.warn(
|
418 |
+
"is_period_dtype is deprecated and will be removed in a future version. "
|
419 |
+
"Use `isinstance(dtype, pd.PeriodDtype)` instead",
|
420 |
+
DeprecationWarning,
|
421 |
+
stacklevel=2,
|
422 |
+
)
|
423 |
+
if isinstance(arr_or_dtype, ExtensionDtype):
|
424 |
+
# GH#33400 fastpath for dtype object
|
425 |
+
return arr_or_dtype.type is Period
|
426 |
+
|
427 |
+
if arr_or_dtype is None:
|
428 |
+
return False
|
429 |
+
return PeriodDtype.is_dtype(arr_or_dtype)
|
430 |
+
|
431 |
+
|
432 |
+
def is_interval_dtype(arr_or_dtype) -> bool:
|
433 |
+
"""
|
434 |
+
Check whether an array-like or dtype is of the Interval dtype.
|
435 |
+
|
436 |
+
.. deprecated:: 2.2.0
|
437 |
+
Use isinstance(dtype, pd.IntervalDtype) instead.
|
438 |
+
|
439 |
+
Parameters
|
440 |
+
----------
|
441 |
+
arr_or_dtype : array-like or dtype
|
442 |
+
The array-like or dtype to check.
|
443 |
+
|
444 |
+
Returns
|
445 |
+
-------
|
446 |
+
boolean
|
447 |
+
Whether or not the array-like or dtype is of the Interval dtype.
|
448 |
+
|
449 |
+
Examples
|
450 |
+
--------
|
451 |
+
>>> from pandas.core.dtypes.common import is_interval_dtype
|
452 |
+
>>> is_interval_dtype(object)
|
453 |
+
False
|
454 |
+
>>> is_interval_dtype(pd.IntervalDtype())
|
455 |
+
True
|
456 |
+
>>> is_interval_dtype([1, 2, 3])
|
457 |
+
False
|
458 |
+
>>>
|
459 |
+
>>> interval = pd.Interval(1, 2, closed="right")
|
460 |
+
>>> is_interval_dtype(interval)
|
461 |
+
False
|
462 |
+
>>> is_interval_dtype(pd.IntervalIndex([interval]))
|
463 |
+
True
|
464 |
+
"""
|
465 |
+
# GH#52607
|
466 |
+
warnings.warn(
|
467 |
+
"is_interval_dtype is deprecated and will be removed in a future version. "
|
468 |
+
"Use `isinstance(dtype, pd.IntervalDtype)` instead",
|
469 |
+
DeprecationWarning,
|
470 |
+
stacklevel=2,
|
471 |
+
)
|
472 |
+
if isinstance(arr_or_dtype, ExtensionDtype):
|
473 |
+
# GH#33400 fastpath for dtype object
|
474 |
+
return arr_or_dtype.type is Interval
|
475 |
+
|
476 |
+
if arr_or_dtype is None:
|
477 |
+
return False
|
478 |
+
return IntervalDtype.is_dtype(arr_or_dtype)
|
479 |
+
|
480 |
+
|
481 |
+
def is_categorical_dtype(arr_or_dtype) -> bool:
|
482 |
+
"""
|
483 |
+
Check whether an array-like or dtype is of the Categorical dtype.
|
484 |
+
|
485 |
+
.. deprecated:: 2.2.0
|
486 |
+
Use isinstance(dtype, pd.CategoricalDtype) instead.
|
487 |
+
|
488 |
+
Parameters
|
489 |
+
----------
|
490 |
+
arr_or_dtype : array-like or dtype
|
491 |
+
The array-like or dtype to check.
|
492 |
+
|
493 |
+
Returns
|
494 |
+
-------
|
495 |
+
boolean
|
496 |
+
Whether or not the array-like or dtype is of the Categorical dtype.
|
497 |
+
|
498 |
+
Examples
|
499 |
+
--------
|
500 |
+
>>> from pandas.api.types import is_categorical_dtype
|
501 |
+
>>> from pandas import CategoricalDtype
|
502 |
+
>>> is_categorical_dtype(object)
|
503 |
+
False
|
504 |
+
>>> is_categorical_dtype(CategoricalDtype())
|
505 |
+
True
|
506 |
+
>>> is_categorical_dtype([1, 2, 3])
|
507 |
+
False
|
508 |
+
>>> is_categorical_dtype(pd.Categorical([1, 2, 3]))
|
509 |
+
True
|
510 |
+
>>> is_categorical_dtype(pd.CategoricalIndex([1, 2, 3]))
|
511 |
+
True
|
512 |
+
"""
|
513 |
+
# GH#52527
|
514 |
+
warnings.warn(
|
515 |
+
"is_categorical_dtype is deprecated and will be removed in a future "
|
516 |
+
"version. Use isinstance(dtype, pd.CategoricalDtype) instead",
|
517 |
+
DeprecationWarning,
|
518 |
+
stacklevel=2,
|
519 |
+
)
|
520 |
+
if isinstance(arr_or_dtype, ExtensionDtype):
|
521 |
+
# GH#33400 fastpath for dtype object
|
522 |
+
return arr_or_dtype.name == "category"
|
523 |
+
|
524 |
+
if arr_or_dtype is None:
|
525 |
+
return False
|
526 |
+
return CategoricalDtype.is_dtype(arr_or_dtype)
|
527 |
+
|
528 |
+
|
529 |
+
def is_string_or_object_np_dtype(dtype: np.dtype) -> bool:
|
530 |
+
"""
|
531 |
+
Faster alternative to is_string_dtype, assumes we have a np.dtype object.
|
532 |
+
"""
|
533 |
+
return dtype == object or dtype.kind in "SU"
|
534 |
+
|
535 |
+
|
536 |
+
def is_string_dtype(arr_or_dtype) -> bool:
|
537 |
+
"""
|
538 |
+
Check whether the provided array or dtype is of the string dtype.
|
539 |
+
|
540 |
+
If an array is passed with an object dtype, the elements must be
|
541 |
+
inferred as strings.
|
542 |
+
|
543 |
+
Parameters
|
544 |
+
----------
|
545 |
+
arr_or_dtype : array-like or dtype
|
546 |
+
The array or dtype to check.
|
547 |
+
|
548 |
+
Returns
|
549 |
+
-------
|
550 |
+
boolean
|
551 |
+
Whether or not the array or dtype is of the string dtype.
|
552 |
+
|
553 |
+
Examples
|
554 |
+
--------
|
555 |
+
>>> from pandas.api.types import is_string_dtype
|
556 |
+
>>> is_string_dtype(str)
|
557 |
+
True
|
558 |
+
>>> is_string_dtype(object)
|
559 |
+
True
|
560 |
+
>>> is_string_dtype(int)
|
561 |
+
False
|
562 |
+
>>> is_string_dtype(np.array(['a', 'b']))
|
563 |
+
True
|
564 |
+
>>> is_string_dtype(pd.Series([1, 2]))
|
565 |
+
False
|
566 |
+
>>> is_string_dtype(pd.Series([1, 2], dtype=object))
|
567 |
+
False
|
568 |
+
"""
|
569 |
+
if hasattr(arr_or_dtype, "dtype") and _get_dtype(arr_or_dtype).kind == "O":
|
570 |
+
return is_all_strings(arr_or_dtype)
|
571 |
+
|
572 |
+
def condition(dtype) -> bool:
|
573 |
+
if is_string_or_object_np_dtype(dtype):
|
574 |
+
return True
|
575 |
+
try:
|
576 |
+
return dtype == "string"
|
577 |
+
except TypeError:
|
578 |
+
return False
|
579 |
+
|
580 |
+
return _is_dtype(arr_or_dtype, condition)
|
581 |
+
|
582 |
+
|
583 |
+
def is_dtype_equal(source, target) -> bool:
|
584 |
+
"""
|
585 |
+
Check if two dtypes are equal.
|
586 |
+
|
587 |
+
Parameters
|
588 |
+
----------
|
589 |
+
source : The first dtype to compare
|
590 |
+
target : The second dtype to compare
|
591 |
+
|
592 |
+
Returns
|
593 |
+
-------
|
594 |
+
boolean
|
595 |
+
Whether or not the two dtypes are equal.
|
596 |
+
|
597 |
+
Examples
|
598 |
+
--------
|
599 |
+
>>> is_dtype_equal(int, float)
|
600 |
+
False
|
601 |
+
>>> is_dtype_equal("int", int)
|
602 |
+
True
|
603 |
+
>>> is_dtype_equal(object, "category")
|
604 |
+
False
|
605 |
+
>>> is_dtype_equal(CategoricalDtype(), "category")
|
606 |
+
True
|
607 |
+
>>> is_dtype_equal(DatetimeTZDtype(tz="UTC"), "datetime64")
|
608 |
+
False
|
609 |
+
"""
|
610 |
+
if isinstance(target, str):
|
611 |
+
if not isinstance(source, str):
|
612 |
+
# GH#38516 ensure we get the same behavior from
|
613 |
+
# is_dtype_equal(CDT, "category") and CDT == "category"
|
614 |
+
try:
|
615 |
+
src = _get_dtype(source)
|
616 |
+
if isinstance(src, ExtensionDtype):
|
617 |
+
return src == target
|
618 |
+
except (TypeError, AttributeError, ImportError):
|
619 |
+
return False
|
620 |
+
elif isinstance(source, str):
|
621 |
+
return is_dtype_equal(target, source)
|
622 |
+
|
623 |
+
try:
|
624 |
+
source = _get_dtype(source)
|
625 |
+
target = _get_dtype(target)
|
626 |
+
return source == target
|
627 |
+
except (TypeError, AttributeError, ImportError):
|
628 |
+
# invalid comparison
|
629 |
+
# object == category will hit this
|
630 |
+
return False
|
631 |
+
|
632 |
+
|
633 |
+
def is_integer_dtype(arr_or_dtype) -> bool:
|
634 |
+
"""
|
635 |
+
Check whether the provided array or dtype is of an integer dtype.
|
636 |
+
|
637 |
+
Unlike in `is_any_int_dtype`, timedelta64 instances will return False.
|
638 |
+
|
639 |
+
The nullable Integer dtypes (e.g. pandas.Int64Dtype) are also considered
|
640 |
+
as integer by this function.
|
641 |
+
|
642 |
+
Parameters
|
643 |
+
----------
|
644 |
+
arr_or_dtype : array-like or dtype
|
645 |
+
The array or dtype to check.
|
646 |
+
|
647 |
+
Returns
|
648 |
+
-------
|
649 |
+
boolean
|
650 |
+
Whether or not the array or dtype is of an integer dtype and
|
651 |
+
not an instance of timedelta64.
|
652 |
+
|
653 |
+
Examples
|
654 |
+
--------
|
655 |
+
>>> from pandas.api.types import is_integer_dtype
|
656 |
+
>>> is_integer_dtype(str)
|
657 |
+
False
|
658 |
+
>>> is_integer_dtype(int)
|
659 |
+
True
|
660 |
+
>>> is_integer_dtype(float)
|
661 |
+
False
|
662 |
+
>>> is_integer_dtype(np.uint64)
|
663 |
+
True
|
664 |
+
>>> is_integer_dtype('int8')
|
665 |
+
True
|
666 |
+
>>> is_integer_dtype('Int8')
|
667 |
+
True
|
668 |
+
>>> is_integer_dtype(pd.Int8Dtype)
|
669 |
+
True
|
670 |
+
>>> is_integer_dtype(np.datetime64)
|
671 |
+
False
|
672 |
+
>>> is_integer_dtype(np.timedelta64)
|
673 |
+
False
|
674 |
+
>>> is_integer_dtype(np.array(['a', 'b']))
|
675 |
+
False
|
676 |
+
>>> is_integer_dtype(pd.Series([1, 2]))
|
677 |
+
True
|
678 |
+
>>> is_integer_dtype(np.array([], dtype=np.timedelta64))
|
679 |
+
False
|
680 |
+
>>> is_integer_dtype(pd.Index([1, 2.])) # float
|
681 |
+
False
|
682 |
+
"""
|
683 |
+
return _is_dtype_type(
|
684 |
+
arr_or_dtype, _classes_and_not_datetimelike(np.integer)
|
685 |
+
) or _is_dtype(
|
686 |
+
arr_or_dtype, lambda typ: isinstance(typ, ExtensionDtype) and typ.kind in "iu"
|
687 |
+
)
|
688 |
+
|
689 |
+
|
690 |
+
def is_signed_integer_dtype(arr_or_dtype) -> bool:
|
691 |
+
"""
|
692 |
+
Check whether the provided array or dtype is of a signed integer dtype.
|
693 |
+
|
694 |
+
Unlike in `is_any_int_dtype`, timedelta64 instances will return False.
|
695 |
+
|
696 |
+
The nullable Integer dtypes (e.g. pandas.Int64Dtype) are also considered
|
697 |
+
as integer by this function.
|
698 |
+
|
699 |
+
Parameters
|
700 |
+
----------
|
701 |
+
arr_or_dtype : array-like or dtype
|
702 |
+
The array or dtype to check.
|
703 |
+
|
704 |
+
Returns
|
705 |
+
-------
|
706 |
+
boolean
|
707 |
+
Whether or not the array or dtype is of a signed integer dtype
|
708 |
+
and not an instance of timedelta64.
|
709 |
+
|
710 |
+
Examples
|
711 |
+
--------
|
712 |
+
>>> from pandas.core.dtypes.common import is_signed_integer_dtype
|
713 |
+
>>> is_signed_integer_dtype(str)
|
714 |
+
False
|
715 |
+
>>> is_signed_integer_dtype(int)
|
716 |
+
True
|
717 |
+
>>> is_signed_integer_dtype(float)
|
718 |
+
False
|
719 |
+
>>> is_signed_integer_dtype(np.uint64) # unsigned
|
720 |
+
False
|
721 |
+
>>> is_signed_integer_dtype('int8')
|
722 |
+
True
|
723 |
+
>>> is_signed_integer_dtype('Int8')
|
724 |
+
True
|
725 |
+
>>> is_signed_integer_dtype(pd.Int8Dtype)
|
726 |
+
True
|
727 |
+
>>> is_signed_integer_dtype(np.datetime64)
|
728 |
+
False
|
729 |
+
>>> is_signed_integer_dtype(np.timedelta64)
|
730 |
+
False
|
731 |
+
>>> is_signed_integer_dtype(np.array(['a', 'b']))
|
732 |
+
False
|
733 |
+
>>> is_signed_integer_dtype(pd.Series([1, 2]))
|
734 |
+
True
|
735 |
+
>>> is_signed_integer_dtype(np.array([], dtype=np.timedelta64))
|
736 |
+
False
|
737 |
+
>>> is_signed_integer_dtype(pd.Index([1, 2.])) # float
|
738 |
+
False
|
739 |
+
>>> is_signed_integer_dtype(np.array([1, 2], dtype=np.uint32)) # unsigned
|
740 |
+
False
|
741 |
+
"""
|
742 |
+
return _is_dtype_type(
|
743 |
+
arr_or_dtype, _classes_and_not_datetimelike(np.signedinteger)
|
744 |
+
) or _is_dtype(
|
745 |
+
arr_or_dtype, lambda typ: isinstance(typ, ExtensionDtype) and typ.kind == "i"
|
746 |
+
)
|
747 |
+
|
748 |
+
|
749 |
+
def is_unsigned_integer_dtype(arr_or_dtype) -> bool:
|
750 |
+
"""
|
751 |
+
Check whether the provided array or dtype is of an unsigned integer dtype.
|
752 |
+
|
753 |
+
The nullable Integer dtypes (e.g. pandas.UInt64Dtype) are also
|
754 |
+
considered as integer by this function.
|
755 |
+
|
756 |
+
Parameters
|
757 |
+
----------
|
758 |
+
arr_or_dtype : array-like or dtype
|
759 |
+
The array or dtype to check.
|
760 |
+
|
761 |
+
Returns
|
762 |
+
-------
|
763 |
+
boolean
|
764 |
+
Whether or not the array or dtype is of an unsigned integer dtype.
|
765 |
+
|
766 |
+
Examples
|
767 |
+
--------
|
768 |
+
>>> from pandas.api.types import is_unsigned_integer_dtype
|
769 |
+
>>> is_unsigned_integer_dtype(str)
|
770 |
+
False
|
771 |
+
>>> is_unsigned_integer_dtype(int) # signed
|
772 |
+
False
|
773 |
+
>>> is_unsigned_integer_dtype(float)
|
774 |
+
False
|
775 |
+
>>> is_unsigned_integer_dtype(np.uint64)
|
776 |
+
True
|
777 |
+
>>> is_unsigned_integer_dtype('uint8')
|
778 |
+
True
|
779 |
+
>>> is_unsigned_integer_dtype('UInt8')
|
780 |
+
True
|
781 |
+
>>> is_unsigned_integer_dtype(pd.UInt8Dtype)
|
782 |
+
True
|
783 |
+
>>> is_unsigned_integer_dtype(np.array(['a', 'b']))
|
784 |
+
False
|
785 |
+
>>> is_unsigned_integer_dtype(pd.Series([1, 2])) # signed
|
786 |
+
False
|
787 |
+
>>> is_unsigned_integer_dtype(pd.Index([1, 2.])) # float
|
788 |
+
False
|
789 |
+
>>> is_unsigned_integer_dtype(np.array([1, 2], dtype=np.uint32))
|
790 |
+
True
|
791 |
+
"""
|
792 |
+
return _is_dtype_type(
|
793 |
+
arr_or_dtype, _classes_and_not_datetimelike(np.unsignedinteger)
|
794 |
+
) or _is_dtype(
|
795 |
+
arr_or_dtype, lambda typ: isinstance(typ, ExtensionDtype) and typ.kind == "u"
|
796 |
+
)
|
797 |
+
|
798 |
+
|
799 |
+
def is_int64_dtype(arr_or_dtype) -> bool:
|
800 |
+
"""
|
801 |
+
Check whether the provided array or dtype is of the int64 dtype.
|
802 |
+
|
803 |
+
.. deprecated:: 2.1.0
|
804 |
+
|
805 |
+
is_int64_dtype is deprecated and will be removed in a future
|
806 |
+
version. Use dtype == np.int64 instead.
|
807 |
+
|
808 |
+
Parameters
|
809 |
+
----------
|
810 |
+
arr_or_dtype : array-like or dtype
|
811 |
+
The array or dtype to check.
|
812 |
+
|
813 |
+
Returns
|
814 |
+
-------
|
815 |
+
boolean
|
816 |
+
Whether or not the array or dtype is of the int64 dtype.
|
817 |
+
|
818 |
+
Notes
|
819 |
+
-----
|
820 |
+
Depending on system architecture, the return value of `is_int64_dtype(
|
821 |
+
int)` will be True if the OS uses 64-bit integers and False if the OS
|
822 |
+
uses 32-bit integers.
|
823 |
+
|
824 |
+
Examples
|
825 |
+
--------
|
826 |
+
>>> from pandas.api.types import is_int64_dtype
|
827 |
+
>>> is_int64_dtype(str) # doctest: +SKIP
|
828 |
+
False
|
829 |
+
>>> is_int64_dtype(np.int32) # doctest: +SKIP
|
830 |
+
False
|
831 |
+
>>> is_int64_dtype(np.int64) # doctest: +SKIP
|
832 |
+
True
|
833 |
+
>>> is_int64_dtype('int8') # doctest: +SKIP
|
834 |
+
False
|
835 |
+
>>> is_int64_dtype('Int8') # doctest: +SKIP
|
836 |
+
False
|
837 |
+
>>> is_int64_dtype(pd.Int64Dtype) # doctest: +SKIP
|
838 |
+
True
|
839 |
+
>>> is_int64_dtype(float) # doctest: +SKIP
|
840 |
+
False
|
841 |
+
>>> is_int64_dtype(np.uint64) # unsigned # doctest: +SKIP
|
842 |
+
False
|
843 |
+
>>> is_int64_dtype(np.array(['a', 'b'])) # doctest: +SKIP
|
844 |
+
False
|
845 |
+
>>> is_int64_dtype(np.array([1, 2], dtype=np.int64)) # doctest: +SKIP
|
846 |
+
True
|
847 |
+
>>> is_int64_dtype(pd.Index([1, 2.])) # float # doctest: +SKIP
|
848 |
+
False
|
849 |
+
>>> is_int64_dtype(np.array([1, 2], dtype=np.uint32)) # unsigned # doctest: +SKIP
|
850 |
+
False
|
851 |
+
"""
|
852 |
+
# GH#52564
|
853 |
+
warnings.warn(
|
854 |
+
"is_int64_dtype is deprecated and will be removed in a future "
|
855 |
+
"version. Use dtype == np.int64 instead.",
|
856 |
+
DeprecationWarning,
|
857 |
+
stacklevel=2,
|
858 |
+
)
|
859 |
+
return _is_dtype_type(arr_or_dtype, classes(np.int64))
|
860 |
+
|
861 |
+
|
862 |
+
def is_datetime64_any_dtype(arr_or_dtype) -> bool:
|
863 |
+
"""
|
864 |
+
Check whether the provided array or dtype is of the datetime64 dtype.
|
865 |
+
|
866 |
+
Parameters
|
867 |
+
----------
|
868 |
+
arr_or_dtype : array-like or dtype
|
869 |
+
The array or dtype to check.
|
870 |
+
|
871 |
+
Returns
|
872 |
+
-------
|
873 |
+
bool
|
874 |
+
Whether or not the array or dtype is of the datetime64 dtype.
|
875 |
+
|
876 |
+
Examples
|
877 |
+
--------
|
878 |
+
>>> from pandas.api.types import is_datetime64_any_dtype
|
879 |
+
>>> from pandas.core.dtypes.dtypes import DatetimeTZDtype
|
880 |
+
>>> is_datetime64_any_dtype(str)
|
881 |
+
False
|
882 |
+
>>> is_datetime64_any_dtype(int)
|
883 |
+
False
|
884 |
+
>>> is_datetime64_any_dtype(np.datetime64) # can be tz-naive
|
885 |
+
True
|
886 |
+
>>> is_datetime64_any_dtype(DatetimeTZDtype("ns", "US/Eastern"))
|
887 |
+
True
|
888 |
+
>>> is_datetime64_any_dtype(np.array(['a', 'b']))
|
889 |
+
False
|
890 |
+
>>> is_datetime64_any_dtype(np.array([1, 2]))
|
891 |
+
False
|
892 |
+
>>> is_datetime64_any_dtype(np.array([], dtype="datetime64[ns]"))
|
893 |
+
True
|
894 |
+
>>> is_datetime64_any_dtype(pd.DatetimeIndex([1, 2, 3], dtype="datetime64[ns]"))
|
895 |
+
True
|
896 |
+
"""
|
897 |
+
if isinstance(arr_or_dtype, (np.dtype, ExtensionDtype)):
|
898 |
+
# GH#33400 fastpath for dtype object
|
899 |
+
return arr_or_dtype.kind == "M"
|
900 |
+
|
901 |
+
if arr_or_dtype is None:
|
902 |
+
return False
|
903 |
+
|
904 |
+
try:
|
905 |
+
tipo = _get_dtype(arr_or_dtype)
|
906 |
+
except TypeError:
|
907 |
+
return False
|
908 |
+
return lib.is_np_dtype(tipo, "M") or isinstance(tipo, DatetimeTZDtype)
|
909 |
+
|
910 |
+
|
911 |
+
def is_datetime64_ns_dtype(arr_or_dtype) -> bool:
|
912 |
+
"""
|
913 |
+
Check whether the provided array or dtype is of the datetime64[ns] dtype.
|
914 |
+
|
915 |
+
Parameters
|
916 |
+
----------
|
917 |
+
arr_or_dtype : array-like or dtype
|
918 |
+
The array or dtype to check.
|
919 |
+
|
920 |
+
Returns
|
921 |
+
-------
|
922 |
+
bool
|
923 |
+
Whether or not the array or dtype is of the datetime64[ns] dtype.
|
924 |
+
|
925 |
+
Examples
|
926 |
+
--------
|
927 |
+
>>> from pandas.api.types import is_datetime64_ns_dtype
|
928 |
+
>>> from pandas.core.dtypes.dtypes import DatetimeTZDtype
|
929 |
+
>>> is_datetime64_ns_dtype(str)
|
930 |
+
False
|
931 |
+
>>> is_datetime64_ns_dtype(int)
|
932 |
+
False
|
933 |
+
>>> is_datetime64_ns_dtype(np.datetime64) # no unit
|
934 |
+
False
|
935 |
+
>>> is_datetime64_ns_dtype(DatetimeTZDtype("ns", "US/Eastern"))
|
936 |
+
True
|
937 |
+
>>> is_datetime64_ns_dtype(np.array(['a', 'b']))
|
938 |
+
False
|
939 |
+
>>> is_datetime64_ns_dtype(np.array([1, 2]))
|
940 |
+
False
|
941 |
+
>>> is_datetime64_ns_dtype(np.array([], dtype="datetime64")) # no unit
|
942 |
+
False
|
943 |
+
>>> is_datetime64_ns_dtype(np.array([], dtype="datetime64[ps]")) # wrong unit
|
944 |
+
False
|
945 |
+
>>> is_datetime64_ns_dtype(pd.DatetimeIndex([1, 2, 3], dtype="datetime64[ns]"))
|
946 |
+
True
|
947 |
+
"""
|
948 |
+
if arr_or_dtype is None:
|
949 |
+
return False
|
950 |
+
try:
|
951 |
+
tipo = _get_dtype(arr_or_dtype)
|
952 |
+
except TypeError:
|
953 |
+
return False
|
954 |
+
return tipo == DT64NS_DTYPE or (
|
955 |
+
isinstance(tipo, DatetimeTZDtype) and tipo.unit == "ns"
|
956 |
+
)
|
957 |
+
|
958 |
+
|
959 |
+
def is_timedelta64_ns_dtype(arr_or_dtype) -> bool:
|
960 |
+
"""
|
961 |
+
Check whether the provided array or dtype is of the timedelta64[ns] dtype.
|
962 |
+
|
963 |
+
This is a very specific dtype, so generic ones like `np.timedelta64`
|
964 |
+
will return False if passed into this function.
|
965 |
+
|
966 |
+
Parameters
|
967 |
+
----------
|
968 |
+
arr_or_dtype : array-like or dtype
|
969 |
+
The array or dtype to check.
|
970 |
+
|
971 |
+
Returns
|
972 |
+
-------
|
973 |
+
boolean
|
974 |
+
Whether or not the array or dtype is of the timedelta64[ns] dtype.
|
975 |
+
|
976 |
+
Examples
|
977 |
+
--------
|
978 |
+
>>> from pandas.core.dtypes.common import is_timedelta64_ns_dtype
|
979 |
+
>>> is_timedelta64_ns_dtype(np.dtype('m8[ns]'))
|
980 |
+
True
|
981 |
+
>>> is_timedelta64_ns_dtype(np.dtype('m8[ps]')) # Wrong frequency
|
982 |
+
False
|
983 |
+
>>> is_timedelta64_ns_dtype(np.array([1, 2], dtype='m8[ns]'))
|
984 |
+
True
|
985 |
+
>>> is_timedelta64_ns_dtype(np.array([1, 2], dtype=np.timedelta64))
|
986 |
+
False
|
987 |
+
"""
|
988 |
+
return _is_dtype(arr_or_dtype, lambda dtype: dtype == TD64NS_DTYPE)
|
989 |
+
|
990 |
+
|
991 |
+
# This exists to silence numpy deprecation warnings, see GH#29553
|
992 |
+
def is_numeric_v_string_like(a: ArrayLike, b) -> bool:
|
993 |
+
"""
|
994 |
+
Check if we are comparing a string-like object to a numeric ndarray.
|
995 |
+
NumPy doesn't like to compare such objects, especially numeric arrays
|
996 |
+
and scalar string-likes.
|
997 |
+
|
998 |
+
Parameters
|
999 |
+
----------
|
1000 |
+
a : array-like, scalar
|
1001 |
+
The first object to check.
|
1002 |
+
b : array-like, scalar
|
1003 |
+
The second object to check.
|
1004 |
+
|
1005 |
+
Returns
|
1006 |
+
-------
|
1007 |
+
boolean
|
1008 |
+
Whether we return a comparing a string-like object to a numeric array.
|
1009 |
+
|
1010 |
+
Examples
|
1011 |
+
--------
|
1012 |
+
>>> is_numeric_v_string_like(np.array([1]), "foo")
|
1013 |
+
True
|
1014 |
+
>>> is_numeric_v_string_like(np.array([1, 2]), np.array(["foo"]))
|
1015 |
+
True
|
1016 |
+
>>> is_numeric_v_string_like(np.array(["foo"]), np.array([1, 2]))
|
1017 |
+
True
|
1018 |
+
>>> is_numeric_v_string_like(np.array([1]), np.array([2]))
|
1019 |
+
False
|
1020 |
+
>>> is_numeric_v_string_like(np.array(["foo"]), np.array(["foo"]))
|
1021 |
+
False
|
1022 |
+
"""
|
1023 |
+
is_a_array = isinstance(a, np.ndarray)
|
1024 |
+
is_b_array = isinstance(b, np.ndarray)
|
1025 |
+
|
1026 |
+
is_a_numeric_array = is_a_array and a.dtype.kind in ("u", "i", "f", "c", "b")
|
1027 |
+
is_b_numeric_array = is_b_array and b.dtype.kind in ("u", "i", "f", "c", "b")
|
1028 |
+
is_a_string_array = is_a_array and a.dtype.kind in ("S", "U")
|
1029 |
+
is_b_string_array = is_b_array and b.dtype.kind in ("S", "U")
|
1030 |
+
|
1031 |
+
is_b_scalar_string_like = not is_b_array and isinstance(b, str)
|
1032 |
+
|
1033 |
+
return (
|
1034 |
+
(is_a_numeric_array and is_b_scalar_string_like)
|
1035 |
+
or (is_a_numeric_array and is_b_string_array)
|
1036 |
+
or (is_b_numeric_array and is_a_string_array)
|
1037 |
+
)
|
1038 |
+
|
1039 |
+
|
1040 |
+
def needs_i8_conversion(dtype: DtypeObj | None) -> bool:
|
1041 |
+
"""
|
1042 |
+
Check whether the dtype should be converted to int64.
|
1043 |
+
|
1044 |
+
Dtype "needs" such a conversion if the dtype is of a datetime-like dtype
|
1045 |
+
|
1046 |
+
Parameters
|
1047 |
+
----------
|
1048 |
+
dtype : np.dtype, ExtensionDtype, or None
|
1049 |
+
|
1050 |
+
Returns
|
1051 |
+
-------
|
1052 |
+
boolean
|
1053 |
+
Whether or not the dtype should be converted to int64.
|
1054 |
+
|
1055 |
+
Examples
|
1056 |
+
--------
|
1057 |
+
>>> needs_i8_conversion(str)
|
1058 |
+
False
|
1059 |
+
>>> needs_i8_conversion(np.int64)
|
1060 |
+
False
|
1061 |
+
>>> needs_i8_conversion(np.datetime64)
|
1062 |
+
False
|
1063 |
+
>>> needs_i8_conversion(np.dtype(np.datetime64))
|
1064 |
+
True
|
1065 |
+
>>> needs_i8_conversion(np.array(['a', 'b']))
|
1066 |
+
False
|
1067 |
+
>>> needs_i8_conversion(pd.Series([1, 2]))
|
1068 |
+
False
|
1069 |
+
>>> needs_i8_conversion(pd.Series([], dtype="timedelta64[ns]"))
|
1070 |
+
False
|
1071 |
+
>>> needs_i8_conversion(pd.DatetimeIndex([1, 2, 3], tz="US/Eastern"))
|
1072 |
+
False
|
1073 |
+
>>> needs_i8_conversion(pd.DatetimeIndex([1, 2, 3], tz="US/Eastern").dtype)
|
1074 |
+
True
|
1075 |
+
"""
|
1076 |
+
if isinstance(dtype, np.dtype):
|
1077 |
+
return dtype.kind in "mM"
|
1078 |
+
return isinstance(dtype, (PeriodDtype, DatetimeTZDtype))
|
1079 |
+
|
1080 |
+
|
1081 |
+
def is_numeric_dtype(arr_or_dtype) -> bool:
|
1082 |
+
"""
|
1083 |
+
Check whether the provided array or dtype is of a numeric dtype.
|
1084 |
+
|
1085 |
+
Parameters
|
1086 |
+
----------
|
1087 |
+
arr_or_dtype : array-like or dtype
|
1088 |
+
The array or dtype to check.
|
1089 |
+
|
1090 |
+
Returns
|
1091 |
+
-------
|
1092 |
+
boolean
|
1093 |
+
Whether or not the array or dtype is of a numeric dtype.
|
1094 |
+
|
1095 |
+
Examples
|
1096 |
+
--------
|
1097 |
+
>>> from pandas.api.types import is_numeric_dtype
|
1098 |
+
>>> is_numeric_dtype(str)
|
1099 |
+
False
|
1100 |
+
>>> is_numeric_dtype(int)
|
1101 |
+
True
|
1102 |
+
>>> is_numeric_dtype(float)
|
1103 |
+
True
|
1104 |
+
>>> is_numeric_dtype(np.uint64)
|
1105 |
+
True
|
1106 |
+
>>> is_numeric_dtype(np.datetime64)
|
1107 |
+
False
|
1108 |
+
>>> is_numeric_dtype(np.timedelta64)
|
1109 |
+
False
|
1110 |
+
>>> is_numeric_dtype(np.array(['a', 'b']))
|
1111 |
+
False
|
1112 |
+
>>> is_numeric_dtype(pd.Series([1, 2]))
|
1113 |
+
True
|
1114 |
+
>>> is_numeric_dtype(pd.Index([1, 2.]))
|
1115 |
+
True
|
1116 |
+
>>> is_numeric_dtype(np.array([], dtype=np.timedelta64))
|
1117 |
+
False
|
1118 |
+
"""
|
1119 |
+
return _is_dtype_type(
|
1120 |
+
arr_or_dtype, _classes_and_not_datetimelike(np.number, np.bool_)
|
1121 |
+
) or _is_dtype(
|
1122 |
+
arr_or_dtype, lambda typ: isinstance(typ, ExtensionDtype) and typ._is_numeric
|
1123 |
+
)
|
1124 |
+
|
1125 |
+
|
1126 |
+
def is_any_real_numeric_dtype(arr_or_dtype) -> bool:
|
1127 |
+
"""
|
1128 |
+
Check whether the provided array or dtype is of a real number dtype.
|
1129 |
+
|
1130 |
+
Parameters
|
1131 |
+
----------
|
1132 |
+
arr_or_dtype : array-like or dtype
|
1133 |
+
The array or dtype to check.
|
1134 |
+
|
1135 |
+
Returns
|
1136 |
+
-------
|
1137 |
+
boolean
|
1138 |
+
Whether or not the array or dtype is of a real number dtype.
|
1139 |
+
|
1140 |
+
Examples
|
1141 |
+
--------
|
1142 |
+
>>> from pandas.api.types import is_any_real_numeric_dtype
|
1143 |
+
>>> is_any_real_numeric_dtype(int)
|
1144 |
+
True
|
1145 |
+
>>> is_any_real_numeric_dtype(float)
|
1146 |
+
True
|
1147 |
+
>>> is_any_real_numeric_dtype(object)
|
1148 |
+
False
|
1149 |
+
>>> is_any_real_numeric_dtype(str)
|
1150 |
+
False
|
1151 |
+
>>> is_any_real_numeric_dtype(complex(1, 2))
|
1152 |
+
False
|
1153 |
+
>>> is_any_real_numeric_dtype(bool)
|
1154 |
+
False
|
1155 |
+
"""
|
1156 |
+
return (
|
1157 |
+
is_numeric_dtype(arr_or_dtype)
|
1158 |
+
and not is_complex_dtype(arr_or_dtype)
|
1159 |
+
and not is_bool_dtype(arr_or_dtype)
|
1160 |
+
)
|
1161 |
+
|
1162 |
+
|
1163 |
+
def is_float_dtype(arr_or_dtype) -> bool:
|
1164 |
+
"""
|
1165 |
+
Check whether the provided array or dtype is of a float dtype.
|
1166 |
+
|
1167 |
+
Parameters
|
1168 |
+
----------
|
1169 |
+
arr_or_dtype : array-like or dtype
|
1170 |
+
The array or dtype to check.
|
1171 |
+
|
1172 |
+
Returns
|
1173 |
+
-------
|
1174 |
+
boolean
|
1175 |
+
Whether or not the array or dtype is of a float dtype.
|
1176 |
+
|
1177 |
+
Examples
|
1178 |
+
--------
|
1179 |
+
>>> from pandas.api.types import is_float_dtype
|
1180 |
+
>>> is_float_dtype(str)
|
1181 |
+
False
|
1182 |
+
>>> is_float_dtype(int)
|
1183 |
+
False
|
1184 |
+
>>> is_float_dtype(float)
|
1185 |
+
True
|
1186 |
+
>>> is_float_dtype(np.array(['a', 'b']))
|
1187 |
+
False
|
1188 |
+
>>> is_float_dtype(pd.Series([1, 2]))
|
1189 |
+
False
|
1190 |
+
>>> is_float_dtype(pd.Index([1, 2.]))
|
1191 |
+
True
|
1192 |
+
"""
|
1193 |
+
return _is_dtype_type(arr_or_dtype, classes(np.floating)) or _is_dtype(
|
1194 |
+
arr_or_dtype, lambda typ: isinstance(typ, ExtensionDtype) and typ.kind in "f"
|
1195 |
+
)
|
1196 |
+
|
1197 |
+
|
1198 |
+
def is_bool_dtype(arr_or_dtype) -> bool:
|
1199 |
+
"""
|
1200 |
+
Check whether the provided array or dtype is of a boolean dtype.
|
1201 |
+
|
1202 |
+
Parameters
|
1203 |
+
----------
|
1204 |
+
arr_or_dtype : array-like or dtype
|
1205 |
+
The array or dtype to check.
|
1206 |
+
|
1207 |
+
Returns
|
1208 |
+
-------
|
1209 |
+
boolean
|
1210 |
+
Whether or not the array or dtype is of a boolean dtype.
|
1211 |
+
|
1212 |
+
Notes
|
1213 |
+
-----
|
1214 |
+
An ExtensionArray is considered boolean when the ``_is_boolean``
|
1215 |
+
attribute is set to True.
|
1216 |
+
|
1217 |
+
Examples
|
1218 |
+
--------
|
1219 |
+
>>> from pandas.api.types import is_bool_dtype
|
1220 |
+
>>> is_bool_dtype(str)
|
1221 |
+
False
|
1222 |
+
>>> is_bool_dtype(int)
|
1223 |
+
False
|
1224 |
+
>>> is_bool_dtype(bool)
|
1225 |
+
True
|
1226 |
+
>>> is_bool_dtype(np.bool_)
|
1227 |
+
True
|
1228 |
+
>>> is_bool_dtype(np.array(['a', 'b']))
|
1229 |
+
False
|
1230 |
+
>>> is_bool_dtype(pd.Series([1, 2]))
|
1231 |
+
False
|
1232 |
+
>>> is_bool_dtype(np.array([True, False]))
|
1233 |
+
True
|
1234 |
+
>>> is_bool_dtype(pd.Categorical([True, False]))
|
1235 |
+
True
|
1236 |
+
>>> is_bool_dtype(pd.arrays.SparseArray([True, False]))
|
1237 |
+
True
|
1238 |
+
"""
|
1239 |
+
if arr_or_dtype is None:
|
1240 |
+
return False
|
1241 |
+
try:
|
1242 |
+
dtype = _get_dtype(arr_or_dtype)
|
1243 |
+
except (TypeError, ValueError):
|
1244 |
+
return False
|
1245 |
+
|
1246 |
+
if isinstance(dtype, CategoricalDtype):
|
1247 |
+
arr_or_dtype = dtype.categories
|
1248 |
+
# now we use the special definition for Index
|
1249 |
+
|
1250 |
+
if isinstance(arr_or_dtype, ABCIndex):
|
1251 |
+
# Allow Index[object] that is all-bools or Index["boolean"]
|
1252 |
+
if arr_or_dtype.inferred_type == "boolean":
|
1253 |
+
if not is_bool_dtype(arr_or_dtype.dtype):
|
1254 |
+
# GH#52680
|
1255 |
+
warnings.warn(
|
1256 |
+
"The behavior of is_bool_dtype with an object-dtype Index "
|
1257 |
+
"of bool objects is deprecated. In a future version, "
|
1258 |
+
"this will return False. Cast the Index to a bool dtype instead.",
|
1259 |
+
DeprecationWarning,
|
1260 |
+
stacklevel=2,
|
1261 |
+
)
|
1262 |
+
return True
|
1263 |
+
return False
|
1264 |
+
elif isinstance(dtype, ExtensionDtype):
|
1265 |
+
return getattr(dtype, "_is_boolean", False)
|
1266 |
+
|
1267 |
+
return issubclass(dtype.type, np.bool_)
|
1268 |
+
|
1269 |
+
|
1270 |
+
def is_1d_only_ea_dtype(dtype: DtypeObj | None) -> bool:
|
1271 |
+
"""
|
1272 |
+
Analogue to is_extension_array_dtype but excluding DatetimeTZDtype.
|
1273 |
+
"""
|
1274 |
+
return isinstance(dtype, ExtensionDtype) and not dtype._supports_2d
|
1275 |
+
|
1276 |
+
|
1277 |
+
def is_extension_array_dtype(arr_or_dtype) -> bool:
|
1278 |
+
"""
|
1279 |
+
Check if an object is a pandas extension array type.
|
1280 |
+
|
1281 |
+
See the :ref:`Use Guide <extending.extension-types>` for more.
|
1282 |
+
|
1283 |
+
Parameters
|
1284 |
+
----------
|
1285 |
+
arr_or_dtype : object
|
1286 |
+
For array-like input, the ``.dtype`` attribute will
|
1287 |
+
be extracted.
|
1288 |
+
|
1289 |
+
Returns
|
1290 |
+
-------
|
1291 |
+
bool
|
1292 |
+
Whether the `arr_or_dtype` is an extension array type.
|
1293 |
+
|
1294 |
+
Notes
|
1295 |
+
-----
|
1296 |
+
This checks whether an object implements the pandas extension
|
1297 |
+
array interface. In pandas, this includes:
|
1298 |
+
|
1299 |
+
* Categorical
|
1300 |
+
* Sparse
|
1301 |
+
* Interval
|
1302 |
+
* Period
|
1303 |
+
* DatetimeArray
|
1304 |
+
* TimedeltaArray
|
1305 |
+
|
1306 |
+
Third-party libraries may implement arrays or types satisfying
|
1307 |
+
this interface as well.
|
1308 |
+
|
1309 |
+
Examples
|
1310 |
+
--------
|
1311 |
+
>>> from pandas.api.types import is_extension_array_dtype
|
1312 |
+
>>> arr = pd.Categorical(['a', 'b'])
|
1313 |
+
>>> is_extension_array_dtype(arr)
|
1314 |
+
True
|
1315 |
+
>>> is_extension_array_dtype(arr.dtype)
|
1316 |
+
True
|
1317 |
+
|
1318 |
+
>>> arr = np.array(['a', 'b'])
|
1319 |
+
>>> is_extension_array_dtype(arr.dtype)
|
1320 |
+
False
|
1321 |
+
"""
|
1322 |
+
dtype = getattr(arr_or_dtype, "dtype", arr_or_dtype)
|
1323 |
+
if isinstance(dtype, ExtensionDtype):
|
1324 |
+
return True
|
1325 |
+
elif isinstance(dtype, np.dtype):
|
1326 |
+
return False
|
1327 |
+
else:
|
1328 |
+
return registry.find(dtype) is not None
|
1329 |
+
|
1330 |
+
|
1331 |
+
def is_ea_or_datetimelike_dtype(dtype: DtypeObj | None) -> bool:
|
1332 |
+
"""
|
1333 |
+
Check for ExtensionDtype, datetime64 dtype, or timedelta64 dtype.
|
1334 |
+
|
1335 |
+
Notes
|
1336 |
+
-----
|
1337 |
+
Checks only for dtype objects, not dtype-castable strings or types.
|
1338 |
+
"""
|
1339 |
+
return isinstance(dtype, ExtensionDtype) or (lib.is_np_dtype(dtype, "mM"))
|
1340 |
+
|
1341 |
+
|
1342 |
+
def is_complex_dtype(arr_or_dtype) -> bool:
|
1343 |
+
"""
|
1344 |
+
Check whether the provided array or dtype is of a complex dtype.
|
1345 |
+
|
1346 |
+
Parameters
|
1347 |
+
----------
|
1348 |
+
arr_or_dtype : array-like or dtype
|
1349 |
+
The array or dtype to check.
|
1350 |
+
|
1351 |
+
Returns
|
1352 |
+
-------
|
1353 |
+
boolean
|
1354 |
+
Whether or not the array or dtype is of a complex dtype.
|
1355 |
+
|
1356 |
+
Examples
|
1357 |
+
--------
|
1358 |
+
>>> from pandas.api.types import is_complex_dtype
|
1359 |
+
>>> is_complex_dtype(str)
|
1360 |
+
False
|
1361 |
+
>>> is_complex_dtype(int)
|
1362 |
+
False
|
1363 |
+
>>> is_complex_dtype(np.complex128)
|
1364 |
+
True
|
1365 |
+
>>> is_complex_dtype(np.array(['a', 'b']))
|
1366 |
+
False
|
1367 |
+
>>> is_complex_dtype(pd.Series([1, 2]))
|
1368 |
+
False
|
1369 |
+
>>> is_complex_dtype(np.array([1 + 1j, 5]))
|
1370 |
+
True
|
1371 |
+
"""
|
1372 |
+
return _is_dtype_type(arr_or_dtype, classes(np.complexfloating))
|
1373 |
+
|
1374 |
+
|
1375 |
+
def _is_dtype(arr_or_dtype, condition) -> bool:
|
1376 |
+
"""
|
1377 |
+
Return true if the condition is satisfied for the arr_or_dtype.
|
1378 |
+
|
1379 |
+
Parameters
|
1380 |
+
----------
|
1381 |
+
arr_or_dtype : array-like, str, np.dtype, or ExtensionArrayType
|
1382 |
+
The array-like or dtype object whose dtype we want to extract.
|
1383 |
+
condition : callable[Union[np.dtype, ExtensionDtype]]
|
1384 |
+
|
1385 |
+
Returns
|
1386 |
+
-------
|
1387 |
+
bool
|
1388 |
+
|
1389 |
+
"""
|
1390 |
+
if arr_or_dtype is None:
|
1391 |
+
return False
|
1392 |
+
try:
|
1393 |
+
dtype = _get_dtype(arr_or_dtype)
|
1394 |
+
except (TypeError, ValueError):
|
1395 |
+
return False
|
1396 |
+
return condition(dtype)
|
1397 |
+
|
1398 |
+
|
1399 |
+
def _get_dtype(arr_or_dtype) -> DtypeObj:
|
1400 |
+
"""
|
1401 |
+
Get the dtype instance associated with an array
|
1402 |
+
or dtype object.
|
1403 |
+
|
1404 |
+
Parameters
|
1405 |
+
----------
|
1406 |
+
arr_or_dtype : array-like or dtype
|
1407 |
+
The array-like or dtype object whose dtype we want to extract.
|
1408 |
+
|
1409 |
+
Returns
|
1410 |
+
-------
|
1411 |
+
obj_dtype : The extract dtype instance from the
|
1412 |
+
passed in array or dtype object.
|
1413 |
+
|
1414 |
+
Raises
|
1415 |
+
------
|
1416 |
+
TypeError : The passed in object is None.
|
1417 |
+
"""
|
1418 |
+
if arr_or_dtype is None:
|
1419 |
+
raise TypeError("Cannot deduce dtype from null object")
|
1420 |
+
|
1421 |
+
# fastpath
|
1422 |
+
if isinstance(arr_or_dtype, np.dtype):
|
1423 |
+
return arr_or_dtype
|
1424 |
+
elif isinstance(arr_or_dtype, type):
|
1425 |
+
return np.dtype(arr_or_dtype)
|
1426 |
+
|
1427 |
+
# if we have an array-like
|
1428 |
+
elif hasattr(arr_or_dtype, "dtype"):
|
1429 |
+
arr_or_dtype = arr_or_dtype.dtype
|
1430 |
+
|
1431 |
+
return pandas_dtype(arr_or_dtype)
|
1432 |
+
|
1433 |
+
|
1434 |
+
def _is_dtype_type(arr_or_dtype, condition) -> bool:
|
1435 |
+
"""
|
1436 |
+
Return true if the condition is satisfied for the arr_or_dtype.
|
1437 |
+
|
1438 |
+
Parameters
|
1439 |
+
----------
|
1440 |
+
arr_or_dtype : array-like or dtype
|
1441 |
+
The array-like or dtype object whose dtype we want to extract.
|
1442 |
+
condition : callable[Union[np.dtype, ExtensionDtypeType]]
|
1443 |
+
|
1444 |
+
Returns
|
1445 |
+
-------
|
1446 |
+
bool : if the condition is satisfied for the arr_or_dtype
|
1447 |
+
"""
|
1448 |
+
if arr_or_dtype is None:
|
1449 |
+
return condition(type(None))
|
1450 |
+
|
1451 |
+
# fastpath
|
1452 |
+
if isinstance(arr_or_dtype, np.dtype):
|
1453 |
+
return condition(arr_or_dtype.type)
|
1454 |
+
elif isinstance(arr_or_dtype, type):
|
1455 |
+
if issubclass(arr_or_dtype, ExtensionDtype):
|
1456 |
+
arr_or_dtype = arr_or_dtype.type
|
1457 |
+
return condition(np.dtype(arr_or_dtype).type)
|
1458 |
+
|
1459 |
+
# if we have an array-like
|
1460 |
+
if hasattr(arr_or_dtype, "dtype"):
|
1461 |
+
arr_or_dtype = arr_or_dtype.dtype
|
1462 |
+
|
1463 |
+
# we are not possibly a dtype
|
1464 |
+
elif is_list_like(arr_or_dtype):
|
1465 |
+
return condition(type(None))
|
1466 |
+
|
1467 |
+
try:
|
1468 |
+
tipo = pandas_dtype(arr_or_dtype).type
|
1469 |
+
except (TypeError, ValueError):
|
1470 |
+
if is_scalar(arr_or_dtype):
|
1471 |
+
return condition(type(None))
|
1472 |
+
|
1473 |
+
return False
|
1474 |
+
|
1475 |
+
return condition(tipo)
|
1476 |
+
|
1477 |
+
|
1478 |
+
def infer_dtype_from_object(dtype) -> type:
|
1479 |
+
"""
|
1480 |
+
Get a numpy dtype.type-style object for a dtype object.
|
1481 |
+
|
1482 |
+
This methods also includes handling of the datetime64[ns] and
|
1483 |
+
datetime64[ns, TZ] objects.
|
1484 |
+
|
1485 |
+
If no dtype can be found, we return ``object``.
|
1486 |
+
|
1487 |
+
Parameters
|
1488 |
+
----------
|
1489 |
+
dtype : dtype, type
|
1490 |
+
The dtype object whose numpy dtype.type-style
|
1491 |
+
object we want to extract.
|
1492 |
+
|
1493 |
+
Returns
|
1494 |
+
-------
|
1495 |
+
type
|
1496 |
+
"""
|
1497 |
+
if isinstance(dtype, type) and issubclass(dtype, np.generic):
|
1498 |
+
# Type object from a dtype
|
1499 |
+
|
1500 |
+
return dtype
|
1501 |
+
elif isinstance(dtype, (np.dtype, ExtensionDtype)):
|
1502 |
+
# dtype object
|
1503 |
+
try:
|
1504 |
+
_validate_date_like_dtype(dtype)
|
1505 |
+
except TypeError:
|
1506 |
+
# Should still pass if we don't have a date-like
|
1507 |
+
pass
|
1508 |
+
if hasattr(dtype, "numpy_dtype"):
|
1509 |
+
# TODO: Implement this properly
|
1510 |
+
# https://github.com/pandas-dev/pandas/issues/52576
|
1511 |
+
return dtype.numpy_dtype.type
|
1512 |
+
return dtype.type
|
1513 |
+
|
1514 |
+
try:
|
1515 |
+
dtype = pandas_dtype(dtype)
|
1516 |
+
except TypeError:
|
1517 |
+
pass
|
1518 |
+
|
1519 |
+
if isinstance(dtype, ExtensionDtype):
|
1520 |
+
return dtype.type
|
1521 |
+
elif isinstance(dtype, str):
|
1522 |
+
# TODO(jreback)
|
1523 |
+
# should deprecate these
|
1524 |
+
if dtype in ["datetimetz", "datetime64tz"]:
|
1525 |
+
return DatetimeTZDtype.type
|
1526 |
+
elif dtype in ["period"]:
|
1527 |
+
raise NotImplementedError
|
1528 |
+
|
1529 |
+
if dtype in ["datetime", "timedelta"]:
|
1530 |
+
dtype += "64"
|
1531 |
+
try:
|
1532 |
+
return infer_dtype_from_object(getattr(np, dtype))
|
1533 |
+
except (AttributeError, TypeError):
|
1534 |
+
# Handles cases like _get_dtype(int) i.e.,
|
1535 |
+
# Python objects that are valid dtypes
|
1536 |
+
# (unlike user-defined types, in general)
|
1537 |
+
#
|
1538 |
+
# TypeError handles the float16 type code of 'e'
|
1539 |
+
# further handle internal types
|
1540 |
+
pass
|
1541 |
+
|
1542 |
+
return infer_dtype_from_object(np.dtype(dtype))
|
1543 |
+
|
1544 |
+
|
1545 |
+
def _validate_date_like_dtype(dtype) -> None:
|
1546 |
+
"""
|
1547 |
+
Check whether the dtype is a date-like dtype. Raises an error if invalid.
|
1548 |
+
|
1549 |
+
Parameters
|
1550 |
+
----------
|
1551 |
+
dtype : dtype, type
|
1552 |
+
The dtype to check.
|
1553 |
+
|
1554 |
+
Raises
|
1555 |
+
------
|
1556 |
+
TypeError : The dtype could not be casted to a date-like dtype.
|
1557 |
+
ValueError : The dtype is an illegal date-like dtype (e.g. the
|
1558 |
+
frequency provided is too specific)
|
1559 |
+
"""
|
1560 |
+
try:
|
1561 |
+
typ = np.datetime_data(dtype)[0]
|
1562 |
+
except ValueError as e:
|
1563 |
+
raise TypeError(e) from e
|
1564 |
+
if typ not in ["generic", "ns"]:
|
1565 |
+
raise ValueError(
|
1566 |
+
f"{repr(dtype.name)} is too specific of a frequency, "
|
1567 |
+
f"try passing {repr(dtype.type.__name__)}"
|
1568 |
+
)
|
1569 |
+
|
1570 |
+
|
1571 |
+
def validate_all_hashable(*args, error_name: str | None = None) -> None:
|
1572 |
+
"""
|
1573 |
+
Return None if all args are hashable, else raise a TypeError.
|
1574 |
+
|
1575 |
+
Parameters
|
1576 |
+
----------
|
1577 |
+
*args
|
1578 |
+
Arguments to validate.
|
1579 |
+
error_name : str, optional
|
1580 |
+
The name to use if error
|
1581 |
+
|
1582 |
+
Raises
|
1583 |
+
------
|
1584 |
+
TypeError : If an argument is not hashable
|
1585 |
+
|
1586 |
+
Returns
|
1587 |
+
-------
|
1588 |
+
None
|
1589 |
+
"""
|
1590 |
+
if not all(is_hashable(arg) for arg in args):
|
1591 |
+
if error_name:
|
1592 |
+
raise TypeError(f"{error_name} must be a hashable type")
|
1593 |
+
raise TypeError("All elements must be hashable")
|
1594 |
+
|
1595 |
+
|
1596 |
+
def pandas_dtype(dtype) -> DtypeObj:
|
1597 |
+
"""
|
1598 |
+
Convert input into a pandas only dtype object or a numpy dtype object.
|
1599 |
+
|
1600 |
+
Parameters
|
1601 |
+
----------
|
1602 |
+
dtype : object to be converted
|
1603 |
+
|
1604 |
+
Returns
|
1605 |
+
-------
|
1606 |
+
np.dtype or a pandas dtype
|
1607 |
+
|
1608 |
+
Raises
|
1609 |
+
------
|
1610 |
+
TypeError if not a dtype
|
1611 |
+
|
1612 |
+
Examples
|
1613 |
+
--------
|
1614 |
+
>>> pd.api.types.pandas_dtype(int)
|
1615 |
+
dtype('int64')
|
1616 |
+
"""
|
1617 |
+
# short-circuit
|
1618 |
+
if isinstance(dtype, np.ndarray):
|
1619 |
+
return dtype.dtype
|
1620 |
+
elif isinstance(dtype, (np.dtype, ExtensionDtype)):
|
1621 |
+
return dtype
|
1622 |
+
|
1623 |
+
# registered extension types
|
1624 |
+
result = registry.find(dtype)
|
1625 |
+
if result is not None:
|
1626 |
+
if isinstance(result, type):
|
1627 |
+
# GH 31356, GH 54592
|
1628 |
+
warnings.warn(
|
1629 |
+
f"Instantiating {result.__name__} without any arguments."
|
1630 |
+
f"Pass a {result.__name__} instance to silence this warning.",
|
1631 |
+
UserWarning,
|
1632 |
+
stacklevel=find_stack_level(),
|
1633 |
+
)
|
1634 |
+
result = result()
|
1635 |
+
return result
|
1636 |
+
|
1637 |
+
# try a numpy dtype
|
1638 |
+
# raise a consistent TypeError if failed
|
1639 |
+
try:
|
1640 |
+
with warnings.catch_warnings():
|
1641 |
+
# GH#51523 - Series.astype(np.integer) doesn't show
|
1642 |
+
# numpy deprecation warning of np.integer
|
1643 |
+
# Hence enabling DeprecationWarning
|
1644 |
+
warnings.simplefilter("always", DeprecationWarning)
|
1645 |
+
npdtype = np.dtype(dtype)
|
1646 |
+
except SyntaxError as err:
|
1647 |
+
# np.dtype uses `eval` which can raise SyntaxError
|
1648 |
+
raise TypeError(f"data type '{dtype}' not understood") from err
|
1649 |
+
|
1650 |
+
# Any invalid dtype (such as pd.Timestamp) should raise an error.
|
1651 |
+
# np.dtype(invalid_type).kind = 0 for such objects. However, this will
|
1652 |
+
# also catch some valid dtypes such as object, np.object_ and 'object'
|
1653 |
+
# which we safeguard against by catching them earlier and returning
|
1654 |
+
# np.dtype(valid_dtype) before this condition is evaluated.
|
1655 |
+
if is_hashable(dtype) and dtype in [
|
1656 |
+
object,
|
1657 |
+
np.object_,
|
1658 |
+
"object",
|
1659 |
+
"O",
|
1660 |
+
"object_",
|
1661 |
+
]:
|
1662 |
+
# check hashability to avoid errors/DeprecationWarning when we get
|
1663 |
+
# here and `dtype` is an array
|
1664 |
+
return npdtype
|
1665 |
+
elif npdtype.kind == "O":
|
1666 |
+
raise TypeError(f"dtype '{dtype}' not understood")
|
1667 |
+
|
1668 |
+
return npdtype
|
1669 |
+
|
1670 |
+
|
1671 |
+
def is_all_strings(value: ArrayLike) -> bool:
|
1672 |
+
"""
|
1673 |
+
Check if this is an array of strings that we should try parsing.
|
1674 |
+
|
1675 |
+
Includes object-dtype ndarray containing all-strings, StringArray,
|
1676 |
+
and Categorical with all-string categories.
|
1677 |
+
Does not include numpy string dtypes.
|
1678 |
+
"""
|
1679 |
+
dtype = value.dtype
|
1680 |
+
|
1681 |
+
if isinstance(dtype, np.dtype):
|
1682 |
+
if len(value) == 0:
|
1683 |
+
return dtype == np.dtype("object")
|
1684 |
+
else:
|
1685 |
+
return dtype == np.dtype("object") and lib.is_string_array(
|
1686 |
+
np.asarray(value), skipna=False
|
1687 |
+
)
|
1688 |
+
elif isinstance(dtype, CategoricalDtype):
|
1689 |
+
return dtype.categories.inferred_type == "string"
|
1690 |
+
return dtype == "string"
|
1691 |
+
|
1692 |
+
|
1693 |
+
__all__ = [
|
1694 |
+
"classes",
|
1695 |
+
"DT64NS_DTYPE",
|
1696 |
+
"ensure_float64",
|
1697 |
+
"ensure_python_int",
|
1698 |
+
"ensure_str",
|
1699 |
+
"infer_dtype_from_object",
|
1700 |
+
"INT64_DTYPE",
|
1701 |
+
"is_1d_only_ea_dtype",
|
1702 |
+
"is_all_strings",
|
1703 |
+
"is_any_real_numeric_dtype",
|
1704 |
+
"is_array_like",
|
1705 |
+
"is_bool",
|
1706 |
+
"is_bool_dtype",
|
1707 |
+
"is_categorical_dtype",
|
1708 |
+
"is_complex",
|
1709 |
+
"is_complex_dtype",
|
1710 |
+
"is_dataclass",
|
1711 |
+
"is_datetime64_any_dtype",
|
1712 |
+
"is_datetime64_dtype",
|
1713 |
+
"is_datetime64_ns_dtype",
|
1714 |
+
"is_datetime64tz_dtype",
|
1715 |
+
"is_decimal",
|
1716 |
+
"is_dict_like",
|
1717 |
+
"is_dtype_equal",
|
1718 |
+
"is_ea_or_datetimelike_dtype",
|
1719 |
+
"is_extension_array_dtype",
|
1720 |
+
"is_file_like",
|
1721 |
+
"is_float_dtype",
|
1722 |
+
"is_int64_dtype",
|
1723 |
+
"is_integer_dtype",
|
1724 |
+
"is_interval",
|
1725 |
+
"is_interval_dtype",
|
1726 |
+
"is_iterator",
|
1727 |
+
"is_named_tuple",
|
1728 |
+
"is_nested_list_like",
|
1729 |
+
"is_number",
|
1730 |
+
"is_numeric_dtype",
|
1731 |
+
"is_object_dtype",
|
1732 |
+
"is_period_dtype",
|
1733 |
+
"is_re",
|
1734 |
+
"is_re_compilable",
|
1735 |
+
"is_scipy_sparse",
|
1736 |
+
"is_sequence",
|
1737 |
+
"is_signed_integer_dtype",
|
1738 |
+
"is_sparse",
|
1739 |
+
"is_string_dtype",
|
1740 |
+
"is_string_or_object_np_dtype",
|
1741 |
+
"is_timedelta64_dtype",
|
1742 |
+
"is_timedelta64_ns_dtype",
|
1743 |
+
"is_unsigned_integer_dtype",
|
1744 |
+
"needs_i8_conversion",
|
1745 |
+
"pandas_dtype",
|
1746 |
+
"TD64NS_DTYPE",
|
1747 |
+
"validate_all_hashable",
|
1748 |
+
]
|
llmeval-env/lib/python3.10/site-packages/pandas/core/dtypes/concat.py
ADDED
@@ -0,0 +1,348 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
"""
|
2 |
+
Utility functions related to concat.
|
3 |
+
"""
|
4 |
+
from __future__ import annotations
|
5 |
+
|
6 |
+
from typing import (
|
7 |
+
TYPE_CHECKING,
|
8 |
+
cast,
|
9 |
+
)
|
10 |
+
import warnings
|
11 |
+
|
12 |
+
import numpy as np
|
13 |
+
|
14 |
+
from pandas._libs import lib
|
15 |
+
from pandas.util._exceptions import find_stack_level
|
16 |
+
|
17 |
+
from pandas.core.dtypes.astype import astype_array
|
18 |
+
from pandas.core.dtypes.cast import (
|
19 |
+
common_dtype_categorical_compat,
|
20 |
+
find_common_type,
|
21 |
+
np_find_common_type,
|
22 |
+
)
|
23 |
+
from pandas.core.dtypes.dtypes import CategoricalDtype
|
24 |
+
from pandas.core.dtypes.generic import (
|
25 |
+
ABCCategoricalIndex,
|
26 |
+
ABCSeries,
|
27 |
+
)
|
28 |
+
|
29 |
+
if TYPE_CHECKING:
|
30 |
+
from collections.abc import Sequence
|
31 |
+
|
32 |
+
from pandas._typing import (
|
33 |
+
ArrayLike,
|
34 |
+
AxisInt,
|
35 |
+
DtypeObj,
|
36 |
+
)
|
37 |
+
|
38 |
+
from pandas.core.arrays import (
|
39 |
+
Categorical,
|
40 |
+
ExtensionArray,
|
41 |
+
)
|
42 |
+
|
43 |
+
|
44 |
+
def _is_nonempty(x, axis) -> bool:
|
45 |
+
# filter empty arrays
|
46 |
+
# 1-d dtypes always are included here
|
47 |
+
if x.ndim <= axis:
|
48 |
+
return True
|
49 |
+
return x.shape[axis] > 0
|
50 |
+
|
51 |
+
|
52 |
+
def concat_compat(
|
53 |
+
to_concat: Sequence[ArrayLike], axis: AxisInt = 0, ea_compat_axis: bool = False
|
54 |
+
) -> ArrayLike:
|
55 |
+
"""
|
56 |
+
provide concatenation of an array of arrays each of which is a single
|
57 |
+
'normalized' dtypes (in that for example, if it's object, then it is a
|
58 |
+
non-datetimelike and provide a combined dtype for the resulting array that
|
59 |
+
preserves the overall dtype if possible)
|
60 |
+
|
61 |
+
Parameters
|
62 |
+
----------
|
63 |
+
to_concat : sequence of arrays
|
64 |
+
axis : axis to provide concatenation
|
65 |
+
ea_compat_axis : bool, default False
|
66 |
+
For ExtensionArray compat, behave as if axis == 1 when determining
|
67 |
+
whether to drop empty arrays.
|
68 |
+
|
69 |
+
Returns
|
70 |
+
-------
|
71 |
+
a single array, preserving the combined dtypes
|
72 |
+
"""
|
73 |
+
if len(to_concat) and lib.dtypes_all_equal([obj.dtype for obj in to_concat]):
|
74 |
+
# fastpath!
|
75 |
+
obj = to_concat[0]
|
76 |
+
if isinstance(obj, np.ndarray):
|
77 |
+
to_concat_arrs = cast("Sequence[np.ndarray]", to_concat)
|
78 |
+
return np.concatenate(to_concat_arrs, axis=axis)
|
79 |
+
|
80 |
+
to_concat_eas = cast("Sequence[ExtensionArray]", to_concat)
|
81 |
+
if ea_compat_axis:
|
82 |
+
# We have 1D objects, that don't support axis keyword
|
83 |
+
return obj._concat_same_type(to_concat_eas)
|
84 |
+
elif axis == 0:
|
85 |
+
return obj._concat_same_type(to_concat_eas)
|
86 |
+
else:
|
87 |
+
# e.g. DatetimeArray
|
88 |
+
# NB: We are assuming here that ensure_wrapped_if_arraylike has
|
89 |
+
# been called where relevant.
|
90 |
+
return obj._concat_same_type(
|
91 |
+
# error: Unexpected keyword argument "axis" for "_concat_same_type"
|
92 |
+
# of "ExtensionArray"
|
93 |
+
to_concat_eas,
|
94 |
+
axis=axis, # type: ignore[call-arg]
|
95 |
+
)
|
96 |
+
|
97 |
+
# If all arrays are empty, there's nothing to convert, just short-cut to
|
98 |
+
# the concatenation, #3121.
|
99 |
+
#
|
100 |
+
# Creating an empty array directly is tempting, but the winnings would be
|
101 |
+
# marginal given that it would still require shape & dtype calculation and
|
102 |
+
# np.concatenate which has them both implemented is compiled.
|
103 |
+
orig = to_concat
|
104 |
+
non_empties = [x for x in to_concat if _is_nonempty(x, axis)]
|
105 |
+
if non_empties and axis == 0 and not ea_compat_axis:
|
106 |
+
# ea_compat_axis see GH#39574
|
107 |
+
to_concat = non_empties
|
108 |
+
|
109 |
+
any_ea, kinds, target_dtype = _get_result_dtype(to_concat, non_empties)
|
110 |
+
|
111 |
+
if len(to_concat) < len(orig):
|
112 |
+
_, _, alt_dtype = _get_result_dtype(orig, non_empties)
|
113 |
+
if alt_dtype != target_dtype:
|
114 |
+
# GH#39122
|
115 |
+
warnings.warn(
|
116 |
+
"The behavior of array concatenation with empty entries is "
|
117 |
+
"deprecated. In a future version, this will no longer exclude "
|
118 |
+
"empty items when determining the result dtype. "
|
119 |
+
"To retain the old behavior, exclude the empty entries before "
|
120 |
+
"the concat operation.",
|
121 |
+
FutureWarning,
|
122 |
+
stacklevel=find_stack_level(),
|
123 |
+
)
|
124 |
+
|
125 |
+
if target_dtype is not None:
|
126 |
+
to_concat = [astype_array(arr, target_dtype, copy=False) for arr in to_concat]
|
127 |
+
|
128 |
+
if not isinstance(to_concat[0], np.ndarray):
|
129 |
+
# i.e. isinstance(to_concat[0], ExtensionArray)
|
130 |
+
to_concat_eas = cast("Sequence[ExtensionArray]", to_concat)
|
131 |
+
cls = type(to_concat[0])
|
132 |
+
# GH#53640: eg. for datetime array, axis=1 but 0 is default
|
133 |
+
# However, class method `_concat_same_type()` for some classes
|
134 |
+
# may not support the `axis` keyword
|
135 |
+
if ea_compat_axis or axis == 0:
|
136 |
+
return cls._concat_same_type(to_concat_eas)
|
137 |
+
else:
|
138 |
+
return cls._concat_same_type(
|
139 |
+
to_concat_eas,
|
140 |
+
axis=axis, # type: ignore[call-arg]
|
141 |
+
)
|
142 |
+
else:
|
143 |
+
to_concat_arrs = cast("Sequence[np.ndarray]", to_concat)
|
144 |
+
result = np.concatenate(to_concat_arrs, axis=axis)
|
145 |
+
|
146 |
+
if not any_ea and "b" in kinds and result.dtype.kind in "iuf":
|
147 |
+
# GH#39817 cast to object instead of casting bools to numeric
|
148 |
+
result = result.astype(object, copy=False)
|
149 |
+
return result
|
150 |
+
|
151 |
+
|
152 |
+
def _get_result_dtype(
|
153 |
+
to_concat: Sequence[ArrayLike], non_empties: Sequence[ArrayLike]
|
154 |
+
) -> tuple[bool, set[str], DtypeObj | None]:
|
155 |
+
target_dtype = None
|
156 |
+
|
157 |
+
dtypes = {obj.dtype for obj in to_concat}
|
158 |
+
kinds = {obj.dtype.kind for obj in to_concat}
|
159 |
+
|
160 |
+
any_ea = any(not isinstance(x, np.ndarray) for x in to_concat)
|
161 |
+
if any_ea:
|
162 |
+
# i.e. any ExtensionArrays
|
163 |
+
|
164 |
+
# we ignore axis here, as internally concatting with EAs is always
|
165 |
+
# for axis=0
|
166 |
+
if len(dtypes) != 1:
|
167 |
+
target_dtype = find_common_type([x.dtype for x in to_concat])
|
168 |
+
target_dtype = common_dtype_categorical_compat(to_concat, target_dtype)
|
169 |
+
|
170 |
+
elif not len(non_empties):
|
171 |
+
# we have all empties, but may need to coerce the result dtype to
|
172 |
+
# object if we have non-numeric type operands (numpy would otherwise
|
173 |
+
# cast this to float)
|
174 |
+
if len(kinds) != 1:
|
175 |
+
if not len(kinds - {"i", "u", "f"}) or not len(kinds - {"b", "i", "u"}):
|
176 |
+
# let numpy coerce
|
177 |
+
pass
|
178 |
+
else:
|
179 |
+
# coerce to object
|
180 |
+
target_dtype = np.dtype(object)
|
181 |
+
kinds = {"o"}
|
182 |
+
else:
|
183 |
+
# error: Argument 1 to "np_find_common_type" has incompatible type
|
184 |
+
# "*Set[Union[ExtensionDtype, Any]]"; expected "dtype[Any]"
|
185 |
+
target_dtype = np_find_common_type(*dtypes) # type: ignore[arg-type]
|
186 |
+
|
187 |
+
return any_ea, kinds, target_dtype
|
188 |
+
|
189 |
+
|
190 |
+
def union_categoricals(
|
191 |
+
to_union, sort_categories: bool = False, ignore_order: bool = False
|
192 |
+
) -> Categorical:
|
193 |
+
"""
|
194 |
+
Combine list-like of Categorical-like, unioning categories.
|
195 |
+
|
196 |
+
All categories must have the same dtype.
|
197 |
+
|
198 |
+
Parameters
|
199 |
+
----------
|
200 |
+
to_union : list-like
|
201 |
+
Categorical, CategoricalIndex, or Series with dtype='category'.
|
202 |
+
sort_categories : bool, default False
|
203 |
+
If true, resulting categories will be lexsorted, otherwise
|
204 |
+
they will be ordered as they appear in the data.
|
205 |
+
ignore_order : bool, default False
|
206 |
+
If true, the ordered attribute of the Categoricals will be ignored.
|
207 |
+
Results in an unordered categorical.
|
208 |
+
|
209 |
+
Returns
|
210 |
+
-------
|
211 |
+
Categorical
|
212 |
+
|
213 |
+
Raises
|
214 |
+
------
|
215 |
+
TypeError
|
216 |
+
- all inputs do not have the same dtype
|
217 |
+
- all inputs do not have the same ordered property
|
218 |
+
- all inputs are ordered and their categories are not identical
|
219 |
+
- sort_categories=True and Categoricals are ordered
|
220 |
+
ValueError
|
221 |
+
Empty list of categoricals passed
|
222 |
+
|
223 |
+
Notes
|
224 |
+
-----
|
225 |
+
To learn more about categories, see `link
|
226 |
+
<https://pandas.pydata.org/pandas-docs/stable/user_guide/categorical.html#unioning>`__
|
227 |
+
|
228 |
+
Examples
|
229 |
+
--------
|
230 |
+
If you want to combine categoricals that do not necessarily have
|
231 |
+
the same categories, `union_categoricals` will combine a list-like
|
232 |
+
of categoricals. The new categories will be the union of the
|
233 |
+
categories being combined.
|
234 |
+
|
235 |
+
>>> a = pd.Categorical(["b", "c"])
|
236 |
+
>>> b = pd.Categorical(["a", "b"])
|
237 |
+
>>> pd.api.types.union_categoricals([a, b])
|
238 |
+
['b', 'c', 'a', 'b']
|
239 |
+
Categories (3, object): ['b', 'c', 'a']
|
240 |
+
|
241 |
+
By default, the resulting categories will be ordered as they appear
|
242 |
+
in the `categories` of the data. If you want the categories to be
|
243 |
+
lexsorted, use `sort_categories=True` argument.
|
244 |
+
|
245 |
+
>>> pd.api.types.union_categoricals([a, b], sort_categories=True)
|
246 |
+
['b', 'c', 'a', 'b']
|
247 |
+
Categories (3, object): ['a', 'b', 'c']
|
248 |
+
|
249 |
+
`union_categoricals` also works with the case of combining two
|
250 |
+
categoricals of the same categories and order information (e.g. what
|
251 |
+
you could also `append` for).
|
252 |
+
|
253 |
+
>>> a = pd.Categorical(["a", "b"], ordered=True)
|
254 |
+
>>> b = pd.Categorical(["a", "b", "a"], ordered=True)
|
255 |
+
>>> pd.api.types.union_categoricals([a, b])
|
256 |
+
['a', 'b', 'a', 'b', 'a']
|
257 |
+
Categories (2, object): ['a' < 'b']
|
258 |
+
|
259 |
+
Raises `TypeError` because the categories are ordered and not identical.
|
260 |
+
|
261 |
+
>>> a = pd.Categorical(["a", "b"], ordered=True)
|
262 |
+
>>> b = pd.Categorical(["a", "b", "c"], ordered=True)
|
263 |
+
>>> pd.api.types.union_categoricals([a, b])
|
264 |
+
Traceback (most recent call last):
|
265 |
+
...
|
266 |
+
TypeError: to union ordered Categoricals, all categories must be the same
|
267 |
+
|
268 |
+
Ordered categoricals with different categories or orderings can be
|
269 |
+
combined by using the `ignore_ordered=True` argument.
|
270 |
+
|
271 |
+
>>> a = pd.Categorical(["a", "b", "c"], ordered=True)
|
272 |
+
>>> b = pd.Categorical(["c", "b", "a"], ordered=True)
|
273 |
+
>>> pd.api.types.union_categoricals([a, b], ignore_order=True)
|
274 |
+
['a', 'b', 'c', 'c', 'b', 'a']
|
275 |
+
Categories (3, object): ['a', 'b', 'c']
|
276 |
+
|
277 |
+
`union_categoricals` also works with a `CategoricalIndex`, or `Series`
|
278 |
+
containing categorical data, but note that the resulting array will
|
279 |
+
always be a plain `Categorical`
|
280 |
+
|
281 |
+
>>> a = pd.Series(["b", "c"], dtype='category')
|
282 |
+
>>> b = pd.Series(["a", "b"], dtype='category')
|
283 |
+
>>> pd.api.types.union_categoricals([a, b])
|
284 |
+
['b', 'c', 'a', 'b']
|
285 |
+
Categories (3, object): ['b', 'c', 'a']
|
286 |
+
"""
|
287 |
+
from pandas import Categorical
|
288 |
+
from pandas.core.arrays.categorical import recode_for_categories
|
289 |
+
|
290 |
+
if len(to_union) == 0:
|
291 |
+
raise ValueError("No Categoricals to union")
|
292 |
+
|
293 |
+
def _maybe_unwrap(x):
|
294 |
+
if isinstance(x, (ABCCategoricalIndex, ABCSeries)):
|
295 |
+
return x._values
|
296 |
+
elif isinstance(x, Categorical):
|
297 |
+
return x
|
298 |
+
else:
|
299 |
+
raise TypeError("all components to combine must be Categorical")
|
300 |
+
|
301 |
+
to_union = [_maybe_unwrap(x) for x in to_union]
|
302 |
+
first = to_union[0]
|
303 |
+
|
304 |
+
if not lib.dtypes_all_equal([obj.categories.dtype for obj in to_union]):
|
305 |
+
raise TypeError("dtype of categories must be the same")
|
306 |
+
|
307 |
+
ordered = False
|
308 |
+
if all(first._categories_match_up_to_permutation(other) for other in to_union[1:]):
|
309 |
+
# identical categories - fastpath
|
310 |
+
categories = first.categories
|
311 |
+
ordered = first.ordered
|
312 |
+
|
313 |
+
all_codes = [first._encode_with_my_categories(x)._codes for x in to_union]
|
314 |
+
new_codes = np.concatenate(all_codes)
|
315 |
+
|
316 |
+
if sort_categories and not ignore_order and ordered:
|
317 |
+
raise TypeError("Cannot use sort_categories=True with ordered Categoricals")
|
318 |
+
|
319 |
+
if sort_categories and not categories.is_monotonic_increasing:
|
320 |
+
categories = categories.sort_values()
|
321 |
+
indexer = categories.get_indexer(first.categories)
|
322 |
+
|
323 |
+
from pandas.core.algorithms import take_nd
|
324 |
+
|
325 |
+
new_codes = take_nd(indexer, new_codes, fill_value=-1)
|
326 |
+
elif ignore_order or all(not c.ordered for c in to_union):
|
327 |
+
# different categories - union and recode
|
328 |
+
cats = first.categories.append([c.categories for c in to_union[1:]])
|
329 |
+
categories = cats.unique()
|
330 |
+
if sort_categories:
|
331 |
+
categories = categories.sort_values()
|
332 |
+
|
333 |
+
new_codes = [
|
334 |
+
recode_for_categories(c.codes, c.categories, categories) for c in to_union
|
335 |
+
]
|
336 |
+
new_codes = np.concatenate(new_codes)
|
337 |
+
else:
|
338 |
+
# ordered - to show a proper error message
|
339 |
+
if all(c.ordered for c in to_union):
|
340 |
+
msg = "to union ordered Categoricals, all categories must be the same"
|
341 |
+
raise TypeError(msg)
|
342 |
+
raise TypeError("Categorical.ordered must be the same")
|
343 |
+
|
344 |
+
if ignore_order:
|
345 |
+
ordered = False
|
346 |
+
|
347 |
+
dtype = CategoricalDtype(categories=categories, ordered=ordered)
|
348 |
+
return Categorical._simple_new(new_codes, dtype=dtype)
|
llmeval-env/lib/python3.10/site-packages/pandas/core/dtypes/dtypes.py
ADDED
@@ -0,0 +1,2348 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
"""
|
2 |
+
Define extension dtypes.
|
3 |
+
"""
|
4 |
+
from __future__ import annotations
|
5 |
+
|
6 |
+
from datetime import (
|
7 |
+
date,
|
8 |
+
datetime,
|
9 |
+
time,
|
10 |
+
timedelta,
|
11 |
+
)
|
12 |
+
from decimal import Decimal
|
13 |
+
import re
|
14 |
+
from typing import (
|
15 |
+
TYPE_CHECKING,
|
16 |
+
Any,
|
17 |
+
cast,
|
18 |
+
)
|
19 |
+
import warnings
|
20 |
+
|
21 |
+
import numpy as np
|
22 |
+
import pytz
|
23 |
+
|
24 |
+
from pandas._libs import (
|
25 |
+
lib,
|
26 |
+
missing as libmissing,
|
27 |
+
)
|
28 |
+
from pandas._libs.interval import Interval
|
29 |
+
from pandas._libs.properties import cache_readonly
|
30 |
+
from pandas._libs.tslibs import (
|
31 |
+
BaseOffset,
|
32 |
+
NaT,
|
33 |
+
NaTType,
|
34 |
+
Period,
|
35 |
+
Timedelta,
|
36 |
+
Timestamp,
|
37 |
+
timezones,
|
38 |
+
to_offset,
|
39 |
+
tz_compare,
|
40 |
+
)
|
41 |
+
from pandas._libs.tslibs.dtypes import (
|
42 |
+
PeriodDtypeBase,
|
43 |
+
abbrev_to_npy_unit,
|
44 |
+
)
|
45 |
+
from pandas._libs.tslibs.offsets import BDay
|
46 |
+
from pandas.compat import pa_version_under10p1
|
47 |
+
from pandas.errors import PerformanceWarning
|
48 |
+
from pandas.util._exceptions import find_stack_level
|
49 |
+
|
50 |
+
from pandas.core.dtypes.base import (
|
51 |
+
ExtensionDtype,
|
52 |
+
StorageExtensionDtype,
|
53 |
+
register_extension_dtype,
|
54 |
+
)
|
55 |
+
from pandas.core.dtypes.generic import (
|
56 |
+
ABCCategoricalIndex,
|
57 |
+
ABCIndex,
|
58 |
+
ABCRangeIndex,
|
59 |
+
)
|
60 |
+
from pandas.core.dtypes.inference import (
|
61 |
+
is_bool,
|
62 |
+
is_list_like,
|
63 |
+
)
|
64 |
+
|
65 |
+
from pandas.util import capitalize_first_letter
|
66 |
+
|
67 |
+
if not pa_version_under10p1:
|
68 |
+
import pyarrow as pa
|
69 |
+
|
70 |
+
if TYPE_CHECKING:
|
71 |
+
from collections.abc import MutableMapping
|
72 |
+
from datetime import tzinfo
|
73 |
+
|
74 |
+
import pyarrow as pa # noqa: TCH004
|
75 |
+
|
76 |
+
from pandas._typing import (
|
77 |
+
Dtype,
|
78 |
+
DtypeObj,
|
79 |
+
IntervalClosedType,
|
80 |
+
Ordered,
|
81 |
+
Self,
|
82 |
+
npt,
|
83 |
+
type_t,
|
84 |
+
)
|
85 |
+
|
86 |
+
from pandas import (
|
87 |
+
Categorical,
|
88 |
+
CategoricalIndex,
|
89 |
+
DatetimeIndex,
|
90 |
+
Index,
|
91 |
+
IntervalIndex,
|
92 |
+
PeriodIndex,
|
93 |
+
)
|
94 |
+
from pandas.core.arrays import (
|
95 |
+
BaseMaskedArray,
|
96 |
+
DatetimeArray,
|
97 |
+
IntervalArray,
|
98 |
+
NumpyExtensionArray,
|
99 |
+
PeriodArray,
|
100 |
+
SparseArray,
|
101 |
+
)
|
102 |
+
from pandas.core.arrays.arrow import ArrowExtensionArray
|
103 |
+
|
104 |
+
str_type = str
|
105 |
+
|
106 |
+
|
107 |
+
class PandasExtensionDtype(ExtensionDtype):
|
108 |
+
"""
|
109 |
+
A np.dtype duck-typed class, suitable for holding a custom dtype.
|
110 |
+
|
111 |
+
THIS IS NOT A REAL NUMPY DTYPE
|
112 |
+
"""
|
113 |
+
|
114 |
+
type: Any
|
115 |
+
kind: Any
|
116 |
+
# The Any type annotations above are here only because mypy seems to have a
|
117 |
+
# problem dealing with multiple inheritance from PandasExtensionDtype
|
118 |
+
# and ExtensionDtype's @properties in the subclasses below. The kind and
|
119 |
+
# type variables in those subclasses are explicitly typed below.
|
120 |
+
subdtype = None
|
121 |
+
str: str_type
|
122 |
+
num = 100
|
123 |
+
shape: tuple[int, ...] = ()
|
124 |
+
itemsize = 8
|
125 |
+
base: DtypeObj | None = None
|
126 |
+
isbuiltin = 0
|
127 |
+
isnative = 0
|
128 |
+
_cache_dtypes: dict[str_type, PandasExtensionDtype] = {}
|
129 |
+
|
130 |
+
def __repr__(self) -> str_type:
|
131 |
+
"""
|
132 |
+
Return a string representation for a particular object.
|
133 |
+
"""
|
134 |
+
return str(self)
|
135 |
+
|
136 |
+
def __hash__(self) -> int:
|
137 |
+
raise NotImplementedError("sub-classes should implement an __hash__ method")
|
138 |
+
|
139 |
+
def __getstate__(self) -> dict[str_type, Any]:
|
140 |
+
# pickle support; we don't want to pickle the cache
|
141 |
+
return {k: getattr(self, k, None) for k in self._metadata}
|
142 |
+
|
143 |
+
@classmethod
|
144 |
+
def reset_cache(cls) -> None:
|
145 |
+
"""clear the cache"""
|
146 |
+
cls._cache_dtypes = {}
|
147 |
+
|
148 |
+
|
149 |
+
class CategoricalDtypeType(type):
|
150 |
+
"""
|
151 |
+
the type of CategoricalDtype, this metaclass determines subclass ability
|
152 |
+
"""
|
153 |
+
|
154 |
+
|
155 |
+
@register_extension_dtype
|
156 |
+
class CategoricalDtype(PandasExtensionDtype, ExtensionDtype):
|
157 |
+
"""
|
158 |
+
Type for categorical data with the categories and orderedness.
|
159 |
+
|
160 |
+
Parameters
|
161 |
+
----------
|
162 |
+
categories : sequence, optional
|
163 |
+
Must be unique, and must not contain any nulls.
|
164 |
+
The categories are stored in an Index,
|
165 |
+
and if an index is provided the dtype of that index will be used.
|
166 |
+
ordered : bool or None, default False
|
167 |
+
Whether or not this categorical is treated as a ordered categorical.
|
168 |
+
None can be used to maintain the ordered value of existing categoricals when
|
169 |
+
used in operations that combine categoricals, e.g. astype, and will resolve to
|
170 |
+
False if there is no existing ordered to maintain.
|
171 |
+
|
172 |
+
Attributes
|
173 |
+
----------
|
174 |
+
categories
|
175 |
+
ordered
|
176 |
+
|
177 |
+
Methods
|
178 |
+
-------
|
179 |
+
None
|
180 |
+
|
181 |
+
See Also
|
182 |
+
--------
|
183 |
+
Categorical : Represent a categorical variable in classic R / S-plus fashion.
|
184 |
+
|
185 |
+
Notes
|
186 |
+
-----
|
187 |
+
This class is useful for specifying the type of a ``Categorical``
|
188 |
+
independent of the values. See :ref:`categorical.categoricaldtype`
|
189 |
+
for more.
|
190 |
+
|
191 |
+
Examples
|
192 |
+
--------
|
193 |
+
>>> t = pd.CategoricalDtype(categories=['b', 'a'], ordered=True)
|
194 |
+
>>> pd.Series(['a', 'b', 'a', 'c'], dtype=t)
|
195 |
+
0 a
|
196 |
+
1 b
|
197 |
+
2 a
|
198 |
+
3 NaN
|
199 |
+
dtype: category
|
200 |
+
Categories (2, object): ['b' < 'a']
|
201 |
+
|
202 |
+
An empty CategoricalDtype with a specific dtype can be created
|
203 |
+
by providing an empty index. As follows,
|
204 |
+
|
205 |
+
>>> pd.CategoricalDtype(pd.DatetimeIndex([])).categories.dtype
|
206 |
+
dtype('<M8[ns]')
|
207 |
+
"""
|
208 |
+
|
209 |
+
# TODO: Document public vs. private API
|
210 |
+
name = "category"
|
211 |
+
type: type[CategoricalDtypeType] = CategoricalDtypeType
|
212 |
+
kind: str_type = "O"
|
213 |
+
str = "|O08"
|
214 |
+
base = np.dtype("O")
|
215 |
+
_metadata = ("categories", "ordered")
|
216 |
+
_cache_dtypes: dict[str_type, PandasExtensionDtype] = {}
|
217 |
+
_supports_2d = False
|
218 |
+
_can_fast_transpose = False
|
219 |
+
|
220 |
+
def __init__(self, categories=None, ordered: Ordered = False) -> None:
|
221 |
+
self._finalize(categories, ordered, fastpath=False)
|
222 |
+
|
223 |
+
@classmethod
|
224 |
+
def _from_fastpath(
|
225 |
+
cls, categories=None, ordered: bool | None = None
|
226 |
+
) -> CategoricalDtype:
|
227 |
+
self = cls.__new__(cls)
|
228 |
+
self._finalize(categories, ordered, fastpath=True)
|
229 |
+
return self
|
230 |
+
|
231 |
+
@classmethod
|
232 |
+
def _from_categorical_dtype(
|
233 |
+
cls, dtype: CategoricalDtype, categories=None, ordered: Ordered | None = None
|
234 |
+
) -> CategoricalDtype:
|
235 |
+
if categories is ordered is None:
|
236 |
+
return dtype
|
237 |
+
if categories is None:
|
238 |
+
categories = dtype.categories
|
239 |
+
if ordered is None:
|
240 |
+
ordered = dtype.ordered
|
241 |
+
return cls(categories, ordered)
|
242 |
+
|
243 |
+
@classmethod
|
244 |
+
def _from_values_or_dtype(
|
245 |
+
cls,
|
246 |
+
values=None,
|
247 |
+
categories=None,
|
248 |
+
ordered: bool | None = None,
|
249 |
+
dtype: Dtype | None = None,
|
250 |
+
) -> CategoricalDtype:
|
251 |
+
"""
|
252 |
+
Construct dtype from the input parameters used in :class:`Categorical`.
|
253 |
+
|
254 |
+
This constructor method specifically does not do the factorization
|
255 |
+
step, if that is needed to find the categories. This constructor may
|
256 |
+
therefore return ``CategoricalDtype(categories=None, ordered=None)``,
|
257 |
+
which may not be useful. Additional steps may therefore have to be
|
258 |
+
taken to create the final dtype.
|
259 |
+
|
260 |
+
The return dtype is specified from the inputs in this prioritized
|
261 |
+
order:
|
262 |
+
1. if dtype is a CategoricalDtype, return dtype
|
263 |
+
2. if dtype is the string 'category', create a CategoricalDtype from
|
264 |
+
the supplied categories and ordered parameters, and return that.
|
265 |
+
3. if values is a categorical, use value.dtype, but override it with
|
266 |
+
categories and ordered if either/both of those are not None.
|
267 |
+
4. if dtype is None and values is not a categorical, construct the
|
268 |
+
dtype from categories and ordered, even if either of those is None.
|
269 |
+
|
270 |
+
Parameters
|
271 |
+
----------
|
272 |
+
values : list-like, optional
|
273 |
+
The list-like must be 1-dimensional.
|
274 |
+
categories : list-like, optional
|
275 |
+
Categories for the CategoricalDtype.
|
276 |
+
ordered : bool, optional
|
277 |
+
Designating if the categories are ordered.
|
278 |
+
dtype : CategoricalDtype or the string "category", optional
|
279 |
+
If ``CategoricalDtype``, cannot be used together with
|
280 |
+
`categories` or `ordered`.
|
281 |
+
|
282 |
+
Returns
|
283 |
+
-------
|
284 |
+
CategoricalDtype
|
285 |
+
|
286 |
+
Examples
|
287 |
+
--------
|
288 |
+
>>> pd.CategoricalDtype._from_values_or_dtype()
|
289 |
+
CategoricalDtype(categories=None, ordered=None, categories_dtype=None)
|
290 |
+
>>> pd.CategoricalDtype._from_values_or_dtype(
|
291 |
+
... categories=['a', 'b'], ordered=True
|
292 |
+
... )
|
293 |
+
CategoricalDtype(categories=['a', 'b'], ordered=True, categories_dtype=object)
|
294 |
+
>>> dtype1 = pd.CategoricalDtype(['a', 'b'], ordered=True)
|
295 |
+
>>> dtype2 = pd.CategoricalDtype(['x', 'y'], ordered=False)
|
296 |
+
>>> c = pd.Categorical([0, 1], dtype=dtype1)
|
297 |
+
>>> pd.CategoricalDtype._from_values_or_dtype(
|
298 |
+
... c, ['x', 'y'], ordered=True, dtype=dtype2
|
299 |
+
... )
|
300 |
+
Traceback (most recent call last):
|
301 |
+
...
|
302 |
+
ValueError: Cannot specify `categories` or `ordered` together with
|
303 |
+
`dtype`.
|
304 |
+
|
305 |
+
The supplied dtype takes precedence over values' dtype:
|
306 |
+
|
307 |
+
>>> pd.CategoricalDtype._from_values_or_dtype(c, dtype=dtype2)
|
308 |
+
CategoricalDtype(categories=['x', 'y'], ordered=False, categories_dtype=object)
|
309 |
+
"""
|
310 |
+
|
311 |
+
if dtype is not None:
|
312 |
+
# The dtype argument takes precedence over values.dtype (if any)
|
313 |
+
if isinstance(dtype, str):
|
314 |
+
if dtype == "category":
|
315 |
+
if ordered is None and cls.is_dtype(values):
|
316 |
+
# GH#49309 preserve orderedness
|
317 |
+
ordered = values.dtype.ordered
|
318 |
+
|
319 |
+
dtype = CategoricalDtype(categories, ordered)
|
320 |
+
else:
|
321 |
+
raise ValueError(f"Unknown dtype {repr(dtype)}")
|
322 |
+
elif categories is not None or ordered is not None:
|
323 |
+
raise ValueError(
|
324 |
+
"Cannot specify `categories` or `ordered` together with `dtype`."
|
325 |
+
)
|
326 |
+
elif not isinstance(dtype, CategoricalDtype):
|
327 |
+
raise ValueError(f"Cannot not construct CategoricalDtype from {dtype}")
|
328 |
+
elif cls.is_dtype(values):
|
329 |
+
# If no "dtype" was passed, use the one from "values", but honor
|
330 |
+
# the "ordered" and "categories" arguments
|
331 |
+
dtype = values.dtype._from_categorical_dtype(
|
332 |
+
values.dtype, categories, ordered
|
333 |
+
)
|
334 |
+
else:
|
335 |
+
# If dtype=None and values is not categorical, create a new dtype.
|
336 |
+
# Note: This could potentially have categories=None and
|
337 |
+
# ordered=None.
|
338 |
+
dtype = CategoricalDtype(categories, ordered)
|
339 |
+
|
340 |
+
return cast(CategoricalDtype, dtype)
|
341 |
+
|
342 |
+
@classmethod
|
343 |
+
def construct_from_string(cls, string: str_type) -> CategoricalDtype:
|
344 |
+
"""
|
345 |
+
Construct a CategoricalDtype from a string.
|
346 |
+
|
347 |
+
Parameters
|
348 |
+
----------
|
349 |
+
string : str
|
350 |
+
Must be the string "category" in order to be successfully constructed.
|
351 |
+
|
352 |
+
Returns
|
353 |
+
-------
|
354 |
+
CategoricalDtype
|
355 |
+
Instance of the dtype.
|
356 |
+
|
357 |
+
Raises
|
358 |
+
------
|
359 |
+
TypeError
|
360 |
+
If a CategoricalDtype cannot be constructed from the input.
|
361 |
+
"""
|
362 |
+
if not isinstance(string, str):
|
363 |
+
raise TypeError(
|
364 |
+
f"'construct_from_string' expects a string, got {type(string)}"
|
365 |
+
)
|
366 |
+
if string != cls.name:
|
367 |
+
raise TypeError(f"Cannot construct a 'CategoricalDtype' from '{string}'")
|
368 |
+
|
369 |
+
# need ordered=None to ensure that operations specifying dtype="category" don't
|
370 |
+
# override the ordered value for existing categoricals
|
371 |
+
return cls(ordered=None)
|
372 |
+
|
373 |
+
def _finalize(self, categories, ordered: Ordered, fastpath: bool = False) -> None:
|
374 |
+
if ordered is not None:
|
375 |
+
self.validate_ordered(ordered)
|
376 |
+
|
377 |
+
if categories is not None:
|
378 |
+
categories = self.validate_categories(categories, fastpath=fastpath)
|
379 |
+
|
380 |
+
self._categories = categories
|
381 |
+
self._ordered = ordered
|
382 |
+
|
383 |
+
def __setstate__(self, state: MutableMapping[str_type, Any]) -> None:
|
384 |
+
# for pickle compat. __get_state__ is defined in the
|
385 |
+
# PandasExtensionDtype superclass and uses the public properties to
|
386 |
+
# pickle -> need to set the settable private ones here (see GH26067)
|
387 |
+
self._categories = state.pop("categories", None)
|
388 |
+
self._ordered = state.pop("ordered", False)
|
389 |
+
|
390 |
+
def __hash__(self) -> int:
|
391 |
+
# _hash_categories returns a uint64, so use the negative
|
392 |
+
# space for when we have unknown categories to avoid a conflict
|
393 |
+
if self.categories is None:
|
394 |
+
if self.ordered:
|
395 |
+
return -1
|
396 |
+
else:
|
397 |
+
return -2
|
398 |
+
# We *do* want to include the real self.ordered here
|
399 |
+
return int(self._hash_categories)
|
400 |
+
|
401 |
+
def __eq__(self, other: object) -> bool:
|
402 |
+
"""
|
403 |
+
Rules for CDT equality:
|
404 |
+
1) Any CDT is equal to the string 'category'
|
405 |
+
2) Any CDT is equal to itself
|
406 |
+
3) Any CDT is equal to a CDT with categories=None regardless of ordered
|
407 |
+
4) A CDT with ordered=True is only equal to another CDT with
|
408 |
+
ordered=True and identical categories in the same order
|
409 |
+
5) A CDT with ordered={False, None} is only equal to another CDT with
|
410 |
+
ordered={False, None} and identical categories, but same order is
|
411 |
+
not required. There is no distinction between False/None.
|
412 |
+
6) Any other comparison returns False
|
413 |
+
"""
|
414 |
+
if isinstance(other, str):
|
415 |
+
return other == self.name
|
416 |
+
elif other is self:
|
417 |
+
return True
|
418 |
+
elif not (hasattr(other, "ordered") and hasattr(other, "categories")):
|
419 |
+
return False
|
420 |
+
elif self.categories is None or other.categories is None:
|
421 |
+
# For non-fully-initialized dtypes, these are only equal to
|
422 |
+
# - the string "category" (handled above)
|
423 |
+
# - other CategoricalDtype with categories=None
|
424 |
+
return self.categories is other.categories
|
425 |
+
elif self.ordered or other.ordered:
|
426 |
+
# At least one has ordered=True; equal if both have ordered=True
|
427 |
+
# and the same values for categories in the same order.
|
428 |
+
return (self.ordered == other.ordered) and self.categories.equals(
|
429 |
+
other.categories
|
430 |
+
)
|
431 |
+
else:
|
432 |
+
# Neither has ordered=True; equal if both have the same categories,
|
433 |
+
# but same order is not necessary. There is no distinction between
|
434 |
+
# ordered=False and ordered=None: CDT(., False) and CDT(., None)
|
435 |
+
# will be equal if they have the same categories.
|
436 |
+
left = self.categories
|
437 |
+
right = other.categories
|
438 |
+
|
439 |
+
# GH#36280 the ordering of checks here is for performance
|
440 |
+
if not left.dtype == right.dtype:
|
441 |
+
return False
|
442 |
+
|
443 |
+
if len(left) != len(right):
|
444 |
+
return False
|
445 |
+
|
446 |
+
if self.categories.equals(other.categories):
|
447 |
+
# Check and see if they happen to be identical categories
|
448 |
+
return True
|
449 |
+
|
450 |
+
if left.dtype != object:
|
451 |
+
# Faster than calculating hash
|
452 |
+
indexer = left.get_indexer(right)
|
453 |
+
# Because left and right have the same length and are unique,
|
454 |
+
# `indexer` not having any -1s implies that there is a
|
455 |
+
# bijection between `left` and `right`.
|
456 |
+
return (indexer != -1).all()
|
457 |
+
|
458 |
+
# With object-dtype we need a comparison that identifies
|
459 |
+
# e.g. int(2) as distinct from float(2)
|
460 |
+
return set(left) == set(right)
|
461 |
+
|
462 |
+
def __repr__(self) -> str_type:
|
463 |
+
if self.categories is None:
|
464 |
+
data = "None"
|
465 |
+
dtype = "None"
|
466 |
+
else:
|
467 |
+
data = self.categories._format_data(name=type(self).__name__)
|
468 |
+
if isinstance(self.categories, ABCRangeIndex):
|
469 |
+
data = str(self.categories._range)
|
470 |
+
data = data.rstrip(", ")
|
471 |
+
dtype = self.categories.dtype
|
472 |
+
|
473 |
+
return (
|
474 |
+
f"CategoricalDtype(categories={data}, ordered={self.ordered}, "
|
475 |
+
f"categories_dtype={dtype})"
|
476 |
+
)
|
477 |
+
|
478 |
+
@cache_readonly
|
479 |
+
def _hash_categories(self) -> int:
|
480 |
+
from pandas.core.util.hashing import (
|
481 |
+
combine_hash_arrays,
|
482 |
+
hash_array,
|
483 |
+
hash_tuples,
|
484 |
+
)
|
485 |
+
|
486 |
+
categories = self.categories
|
487 |
+
ordered = self.ordered
|
488 |
+
|
489 |
+
if len(categories) and isinstance(categories[0], tuple):
|
490 |
+
# assumes if any individual category is a tuple, then all our. ATM
|
491 |
+
# I don't really want to support just some of the categories being
|
492 |
+
# tuples.
|
493 |
+
cat_list = list(categories) # breaks if a np.array of categories
|
494 |
+
cat_array = hash_tuples(cat_list)
|
495 |
+
else:
|
496 |
+
if categories.dtype == "O" and len({type(x) for x in categories}) != 1:
|
497 |
+
# TODO: hash_array doesn't handle mixed types. It casts
|
498 |
+
# everything to a str first, which means we treat
|
499 |
+
# {'1', '2'} the same as {'1', 2}
|
500 |
+
# find a better solution
|
501 |
+
hashed = hash((tuple(categories), ordered))
|
502 |
+
return hashed
|
503 |
+
|
504 |
+
if DatetimeTZDtype.is_dtype(categories.dtype):
|
505 |
+
# Avoid future warning.
|
506 |
+
categories = categories.view("datetime64[ns]")
|
507 |
+
|
508 |
+
cat_array = hash_array(np.asarray(categories), categorize=False)
|
509 |
+
if ordered:
|
510 |
+
cat_array = np.vstack(
|
511 |
+
[cat_array, np.arange(len(cat_array), dtype=cat_array.dtype)]
|
512 |
+
)
|
513 |
+
else:
|
514 |
+
cat_array = np.array([cat_array])
|
515 |
+
combined_hashed = combine_hash_arrays(iter(cat_array), num_items=len(cat_array))
|
516 |
+
return np.bitwise_xor.reduce(combined_hashed)
|
517 |
+
|
518 |
+
@classmethod
|
519 |
+
def construct_array_type(cls) -> type_t[Categorical]:
|
520 |
+
"""
|
521 |
+
Return the array type associated with this dtype.
|
522 |
+
|
523 |
+
Returns
|
524 |
+
-------
|
525 |
+
type
|
526 |
+
"""
|
527 |
+
from pandas import Categorical
|
528 |
+
|
529 |
+
return Categorical
|
530 |
+
|
531 |
+
@staticmethod
|
532 |
+
def validate_ordered(ordered: Ordered) -> None:
|
533 |
+
"""
|
534 |
+
Validates that we have a valid ordered parameter. If
|
535 |
+
it is not a boolean, a TypeError will be raised.
|
536 |
+
|
537 |
+
Parameters
|
538 |
+
----------
|
539 |
+
ordered : object
|
540 |
+
The parameter to be verified.
|
541 |
+
|
542 |
+
Raises
|
543 |
+
------
|
544 |
+
TypeError
|
545 |
+
If 'ordered' is not a boolean.
|
546 |
+
"""
|
547 |
+
if not is_bool(ordered):
|
548 |
+
raise TypeError("'ordered' must either be 'True' or 'False'")
|
549 |
+
|
550 |
+
@staticmethod
|
551 |
+
def validate_categories(categories, fastpath: bool = False) -> Index:
|
552 |
+
"""
|
553 |
+
Validates that we have good categories
|
554 |
+
|
555 |
+
Parameters
|
556 |
+
----------
|
557 |
+
categories : array-like
|
558 |
+
fastpath : bool
|
559 |
+
Whether to skip nan and uniqueness checks
|
560 |
+
|
561 |
+
Returns
|
562 |
+
-------
|
563 |
+
categories : Index
|
564 |
+
"""
|
565 |
+
from pandas.core.indexes.base import Index
|
566 |
+
|
567 |
+
if not fastpath and not is_list_like(categories):
|
568 |
+
raise TypeError(
|
569 |
+
f"Parameter 'categories' must be list-like, was {repr(categories)}"
|
570 |
+
)
|
571 |
+
if not isinstance(categories, ABCIndex):
|
572 |
+
categories = Index._with_infer(categories, tupleize_cols=False)
|
573 |
+
|
574 |
+
if not fastpath:
|
575 |
+
if categories.hasnans:
|
576 |
+
raise ValueError("Categorical categories cannot be null")
|
577 |
+
|
578 |
+
if not categories.is_unique:
|
579 |
+
raise ValueError("Categorical categories must be unique")
|
580 |
+
|
581 |
+
if isinstance(categories, ABCCategoricalIndex):
|
582 |
+
categories = categories.categories
|
583 |
+
|
584 |
+
return categories
|
585 |
+
|
586 |
+
def update_dtype(self, dtype: str_type | CategoricalDtype) -> CategoricalDtype:
|
587 |
+
"""
|
588 |
+
Returns a CategoricalDtype with categories and ordered taken from dtype
|
589 |
+
if specified, otherwise falling back to self if unspecified
|
590 |
+
|
591 |
+
Parameters
|
592 |
+
----------
|
593 |
+
dtype : CategoricalDtype
|
594 |
+
|
595 |
+
Returns
|
596 |
+
-------
|
597 |
+
new_dtype : CategoricalDtype
|
598 |
+
"""
|
599 |
+
if isinstance(dtype, str) and dtype == "category":
|
600 |
+
# dtype='category' should not change anything
|
601 |
+
return self
|
602 |
+
elif not self.is_dtype(dtype):
|
603 |
+
raise ValueError(
|
604 |
+
f"a CategoricalDtype must be passed to perform an update, "
|
605 |
+
f"got {repr(dtype)}"
|
606 |
+
)
|
607 |
+
else:
|
608 |
+
# from here on, dtype is a CategoricalDtype
|
609 |
+
dtype = cast(CategoricalDtype, dtype)
|
610 |
+
|
611 |
+
# update categories/ordered unless they've been explicitly passed as None
|
612 |
+
new_categories = (
|
613 |
+
dtype.categories if dtype.categories is not None else self.categories
|
614 |
+
)
|
615 |
+
new_ordered = dtype.ordered if dtype.ordered is not None else self.ordered
|
616 |
+
|
617 |
+
return CategoricalDtype(new_categories, new_ordered)
|
618 |
+
|
619 |
+
@property
|
620 |
+
def categories(self) -> Index:
|
621 |
+
"""
|
622 |
+
An ``Index`` containing the unique categories allowed.
|
623 |
+
|
624 |
+
Examples
|
625 |
+
--------
|
626 |
+
>>> cat_type = pd.CategoricalDtype(categories=['a', 'b'], ordered=True)
|
627 |
+
>>> cat_type.categories
|
628 |
+
Index(['a', 'b'], dtype='object')
|
629 |
+
"""
|
630 |
+
return self._categories
|
631 |
+
|
632 |
+
@property
|
633 |
+
def ordered(self) -> Ordered:
|
634 |
+
"""
|
635 |
+
Whether the categories have an ordered relationship.
|
636 |
+
|
637 |
+
Examples
|
638 |
+
--------
|
639 |
+
>>> cat_type = pd.CategoricalDtype(categories=['a', 'b'], ordered=True)
|
640 |
+
>>> cat_type.ordered
|
641 |
+
True
|
642 |
+
|
643 |
+
>>> cat_type = pd.CategoricalDtype(categories=['a', 'b'], ordered=False)
|
644 |
+
>>> cat_type.ordered
|
645 |
+
False
|
646 |
+
"""
|
647 |
+
return self._ordered
|
648 |
+
|
649 |
+
@property
|
650 |
+
def _is_boolean(self) -> bool:
|
651 |
+
from pandas.core.dtypes.common import is_bool_dtype
|
652 |
+
|
653 |
+
return is_bool_dtype(self.categories)
|
654 |
+
|
655 |
+
def _get_common_dtype(self, dtypes: list[DtypeObj]) -> DtypeObj | None:
|
656 |
+
# check if we have all categorical dtype with identical categories
|
657 |
+
if all(isinstance(x, CategoricalDtype) for x in dtypes):
|
658 |
+
first = dtypes[0]
|
659 |
+
if all(first == other for other in dtypes[1:]):
|
660 |
+
return first
|
661 |
+
|
662 |
+
# special case non-initialized categorical
|
663 |
+
# TODO we should figure out the expected return value in general
|
664 |
+
non_init_cats = [
|
665 |
+
isinstance(x, CategoricalDtype) and x.categories is None for x in dtypes
|
666 |
+
]
|
667 |
+
if all(non_init_cats):
|
668 |
+
return self
|
669 |
+
elif any(non_init_cats):
|
670 |
+
return None
|
671 |
+
|
672 |
+
# categorical is aware of Sparse -> extract sparse subdtypes
|
673 |
+
dtypes = [x.subtype if isinstance(x, SparseDtype) else x for x in dtypes]
|
674 |
+
# extract the categories' dtype
|
675 |
+
non_cat_dtypes = [
|
676 |
+
x.categories.dtype if isinstance(x, CategoricalDtype) else x for x in dtypes
|
677 |
+
]
|
678 |
+
# TODO should categorical always give an answer?
|
679 |
+
from pandas.core.dtypes.cast import find_common_type
|
680 |
+
|
681 |
+
return find_common_type(non_cat_dtypes)
|
682 |
+
|
683 |
+
@cache_readonly
|
684 |
+
def index_class(self) -> type_t[CategoricalIndex]:
|
685 |
+
from pandas import CategoricalIndex
|
686 |
+
|
687 |
+
return CategoricalIndex
|
688 |
+
|
689 |
+
|
690 |
+
@register_extension_dtype
|
691 |
+
class DatetimeTZDtype(PandasExtensionDtype):
|
692 |
+
"""
|
693 |
+
An ExtensionDtype for timezone-aware datetime data.
|
694 |
+
|
695 |
+
**This is not an actual numpy dtype**, but a duck type.
|
696 |
+
|
697 |
+
Parameters
|
698 |
+
----------
|
699 |
+
unit : str, default "ns"
|
700 |
+
The precision of the datetime data. Currently limited
|
701 |
+
to ``"ns"``.
|
702 |
+
tz : str, int, or datetime.tzinfo
|
703 |
+
The timezone.
|
704 |
+
|
705 |
+
Attributes
|
706 |
+
----------
|
707 |
+
unit
|
708 |
+
tz
|
709 |
+
|
710 |
+
Methods
|
711 |
+
-------
|
712 |
+
None
|
713 |
+
|
714 |
+
Raises
|
715 |
+
------
|
716 |
+
ZoneInfoNotFoundError
|
717 |
+
When the requested timezone cannot be found.
|
718 |
+
|
719 |
+
Examples
|
720 |
+
--------
|
721 |
+
>>> from zoneinfo import ZoneInfo
|
722 |
+
>>> pd.DatetimeTZDtype(tz=ZoneInfo('UTC'))
|
723 |
+
datetime64[ns, UTC]
|
724 |
+
|
725 |
+
>>> pd.DatetimeTZDtype(tz=ZoneInfo('Europe/Paris'))
|
726 |
+
datetime64[ns, Europe/Paris]
|
727 |
+
"""
|
728 |
+
|
729 |
+
type: type[Timestamp] = Timestamp
|
730 |
+
kind: str_type = "M"
|
731 |
+
num = 101
|
732 |
+
_metadata = ("unit", "tz")
|
733 |
+
_match = re.compile(r"(datetime64|M8)\[(?P<unit>.+), (?P<tz>.+)\]")
|
734 |
+
_cache_dtypes: dict[str_type, PandasExtensionDtype] = {}
|
735 |
+
_supports_2d = True
|
736 |
+
_can_fast_transpose = True
|
737 |
+
|
738 |
+
@property
|
739 |
+
def na_value(self) -> NaTType:
|
740 |
+
return NaT
|
741 |
+
|
742 |
+
@cache_readonly
|
743 |
+
def base(self) -> DtypeObj: # type: ignore[override]
|
744 |
+
return np.dtype(f"M8[{self.unit}]")
|
745 |
+
|
746 |
+
# error: Signature of "str" incompatible with supertype "PandasExtensionDtype"
|
747 |
+
@cache_readonly
|
748 |
+
def str(self) -> str: # type: ignore[override]
|
749 |
+
return f"|M8[{self.unit}]"
|
750 |
+
|
751 |
+
def __init__(self, unit: str_type | DatetimeTZDtype = "ns", tz=None) -> None:
|
752 |
+
if isinstance(unit, DatetimeTZDtype):
|
753 |
+
# error: "str" has no attribute "tz"
|
754 |
+
unit, tz = unit.unit, unit.tz # type: ignore[attr-defined]
|
755 |
+
|
756 |
+
if unit != "ns":
|
757 |
+
if isinstance(unit, str) and tz is None:
|
758 |
+
# maybe a string like datetime64[ns, tz], which we support for
|
759 |
+
# now.
|
760 |
+
result = type(self).construct_from_string(unit)
|
761 |
+
unit = result.unit
|
762 |
+
tz = result.tz
|
763 |
+
msg = (
|
764 |
+
f"Passing a dtype alias like 'datetime64[ns, {tz}]' "
|
765 |
+
"to DatetimeTZDtype is no longer supported. Use "
|
766 |
+
"'DatetimeTZDtype.construct_from_string()' instead."
|
767 |
+
)
|
768 |
+
raise ValueError(msg)
|
769 |
+
if unit not in ["s", "ms", "us", "ns"]:
|
770 |
+
raise ValueError("DatetimeTZDtype only supports s, ms, us, ns units")
|
771 |
+
|
772 |
+
if tz:
|
773 |
+
tz = timezones.maybe_get_tz(tz)
|
774 |
+
tz = timezones.tz_standardize(tz)
|
775 |
+
elif tz is not None:
|
776 |
+
raise pytz.UnknownTimeZoneError(tz)
|
777 |
+
if tz is None:
|
778 |
+
raise TypeError("A 'tz' is required.")
|
779 |
+
|
780 |
+
self._unit = unit
|
781 |
+
self._tz = tz
|
782 |
+
|
783 |
+
@cache_readonly
|
784 |
+
def _creso(self) -> int:
|
785 |
+
"""
|
786 |
+
The NPY_DATETIMEUNIT corresponding to this dtype's resolution.
|
787 |
+
"""
|
788 |
+
return abbrev_to_npy_unit(self.unit)
|
789 |
+
|
790 |
+
@property
|
791 |
+
def unit(self) -> str_type:
|
792 |
+
"""
|
793 |
+
The precision of the datetime data.
|
794 |
+
|
795 |
+
Examples
|
796 |
+
--------
|
797 |
+
>>> from zoneinfo import ZoneInfo
|
798 |
+
>>> dtype = pd.DatetimeTZDtype(tz=ZoneInfo('America/Los_Angeles'))
|
799 |
+
>>> dtype.unit
|
800 |
+
'ns'
|
801 |
+
"""
|
802 |
+
return self._unit
|
803 |
+
|
804 |
+
@property
|
805 |
+
def tz(self) -> tzinfo:
|
806 |
+
"""
|
807 |
+
The timezone.
|
808 |
+
|
809 |
+
Examples
|
810 |
+
--------
|
811 |
+
>>> from zoneinfo import ZoneInfo
|
812 |
+
>>> dtype = pd.DatetimeTZDtype(tz=ZoneInfo('America/Los_Angeles'))
|
813 |
+
>>> dtype.tz
|
814 |
+
zoneinfo.ZoneInfo(key='America/Los_Angeles')
|
815 |
+
"""
|
816 |
+
return self._tz
|
817 |
+
|
818 |
+
@classmethod
|
819 |
+
def construct_array_type(cls) -> type_t[DatetimeArray]:
|
820 |
+
"""
|
821 |
+
Return the array type associated with this dtype.
|
822 |
+
|
823 |
+
Returns
|
824 |
+
-------
|
825 |
+
type
|
826 |
+
"""
|
827 |
+
from pandas.core.arrays import DatetimeArray
|
828 |
+
|
829 |
+
return DatetimeArray
|
830 |
+
|
831 |
+
@classmethod
|
832 |
+
def construct_from_string(cls, string: str_type) -> DatetimeTZDtype:
|
833 |
+
"""
|
834 |
+
Construct a DatetimeTZDtype from a string.
|
835 |
+
|
836 |
+
Parameters
|
837 |
+
----------
|
838 |
+
string : str
|
839 |
+
The string alias for this DatetimeTZDtype.
|
840 |
+
Should be formatted like ``datetime64[ns, <tz>]``,
|
841 |
+
where ``<tz>`` is the timezone name.
|
842 |
+
|
843 |
+
Examples
|
844 |
+
--------
|
845 |
+
>>> DatetimeTZDtype.construct_from_string('datetime64[ns, UTC]')
|
846 |
+
datetime64[ns, UTC]
|
847 |
+
"""
|
848 |
+
if not isinstance(string, str):
|
849 |
+
raise TypeError(
|
850 |
+
f"'construct_from_string' expects a string, got {type(string)}"
|
851 |
+
)
|
852 |
+
|
853 |
+
msg = f"Cannot construct a 'DatetimeTZDtype' from '{string}'"
|
854 |
+
match = cls._match.match(string)
|
855 |
+
if match:
|
856 |
+
d = match.groupdict()
|
857 |
+
try:
|
858 |
+
return cls(unit=d["unit"], tz=d["tz"])
|
859 |
+
except (KeyError, TypeError, ValueError) as err:
|
860 |
+
# KeyError if maybe_get_tz tries and fails to get a
|
861 |
+
# pytz timezone (actually pytz.UnknownTimeZoneError).
|
862 |
+
# TypeError if we pass a nonsense tz;
|
863 |
+
# ValueError if we pass a unit other than "ns"
|
864 |
+
raise TypeError(msg) from err
|
865 |
+
raise TypeError(msg)
|
866 |
+
|
867 |
+
def __str__(self) -> str_type:
|
868 |
+
return f"datetime64[{self.unit}, {self.tz}]"
|
869 |
+
|
870 |
+
@property
|
871 |
+
def name(self) -> str_type:
|
872 |
+
"""A string representation of the dtype."""
|
873 |
+
return str(self)
|
874 |
+
|
875 |
+
def __hash__(self) -> int:
|
876 |
+
# make myself hashable
|
877 |
+
# TODO: update this.
|
878 |
+
return hash(str(self))
|
879 |
+
|
880 |
+
def __eq__(self, other: object) -> bool:
|
881 |
+
if isinstance(other, str):
|
882 |
+
if other.startswith("M8["):
|
883 |
+
other = f"datetime64[{other[3:]}"
|
884 |
+
return other == self.name
|
885 |
+
|
886 |
+
return (
|
887 |
+
isinstance(other, DatetimeTZDtype)
|
888 |
+
and self.unit == other.unit
|
889 |
+
and tz_compare(self.tz, other.tz)
|
890 |
+
)
|
891 |
+
|
892 |
+
def __from_arrow__(self, array: pa.Array | pa.ChunkedArray) -> DatetimeArray:
|
893 |
+
"""
|
894 |
+
Construct DatetimeArray from pyarrow Array/ChunkedArray.
|
895 |
+
|
896 |
+
Note: If the units in the pyarrow Array are the same as this
|
897 |
+
DatetimeDtype, then values corresponding to the integer representation
|
898 |
+
of ``NaT`` (e.g. one nanosecond before :attr:`pandas.Timestamp.min`)
|
899 |
+
are converted to ``NaT``, regardless of the null indicator in the
|
900 |
+
pyarrow array.
|
901 |
+
|
902 |
+
Parameters
|
903 |
+
----------
|
904 |
+
array : pyarrow.Array or pyarrow.ChunkedArray
|
905 |
+
The Arrow array to convert to DatetimeArray.
|
906 |
+
|
907 |
+
Returns
|
908 |
+
-------
|
909 |
+
extension array : DatetimeArray
|
910 |
+
"""
|
911 |
+
import pyarrow
|
912 |
+
|
913 |
+
from pandas.core.arrays import DatetimeArray
|
914 |
+
|
915 |
+
array = array.cast(pyarrow.timestamp(unit=self._unit), safe=True)
|
916 |
+
|
917 |
+
if isinstance(array, pyarrow.Array):
|
918 |
+
np_arr = array.to_numpy(zero_copy_only=False)
|
919 |
+
else:
|
920 |
+
np_arr = array.to_numpy()
|
921 |
+
|
922 |
+
return DatetimeArray._simple_new(np_arr, dtype=self)
|
923 |
+
|
924 |
+
def __setstate__(self, state) -> None:
|
925 |
+
# for pickle compat. __get_state__ is defined in the
|
926 |
+
# PandasExtensionDtype superclass and uses the public properties to
|
927 |
+
# pickle -> need to set the settable private ones here (see GH26067)
|
928 |
+
self._tz = state["tz"]
|
929 |
+
self._unit = state["unit"]
|
930 |
+
|
931 |
+
def _get_common_dtype(self, dtypes: list[DtypeObj]) -> DtypeObj | None:
|
932 |
+
if all(isinstance(t, DatetimeTZDtype) and t.tz == self.tz for t in dtypes):
|
933 |
+
np_dtype = np.max([cast(DatetimeTZDtype, t).base for t in [self, *dtypes]])
|
934 |
+
unit = np.datetime_data(np_dtype)[0]
|
935 |
+
return type(self)(unit=unit, tz=self.tz)
|
936 |
+
return super()._get_common_dtype(dtypes)
|
937 |
+
|
938 |
+
@cache_readonly
|
939 |
+
def index_class(self) -> type_t[DatetimeIndex]:
|
940 |
+
from pandas import DatetimeIndex
|
941 |
+
|
942 |
+
return DatetimeIndex
|
943 |
+
|
944 |
+
|
945 |
+
@register_extension_dtype
|
946 |
+
class PeriodDtype(PeriodDtypeBase, PandasExtensionDtype):
|
947 |
+
"""
|
948 |
+
An ExtensionDtype for Period data.
|
949 |
+
|
950 |
+
**This is not an actual numpy dtype**, but a duck type.
|
951 |
+
|
952 |
+
Parameters
|
953 |
+
----------
|
954 |
+
freq : str or DateOffset
|
955 |
+
The frequency of this PeriodDtype.
|
956 |
+
|
957 |
+
Attributes
|
958 |
+
----------
|
959 |
+
freq
|
960 |
+
|
961 |
+
Methods
|
962 |
+
-------
|
963 |
+
None
|
964 |
+
|
965 |
+
Examples
|
966 |
+
--------
|
967 |
+
>>> pd.PeriodDtype(freq='D')
|
968 |
+
period[D]
|
969 |
+
|
970 |
+
>>> pd.PeriodDtype(freq=pd.offsets.MonthEnd())
|
971 |
+
period[M]
|
972 |
+
"""
|
973 |
+
|
974 |
+
type: type[Period] = Period
|
975 |
+
kind: str_type = "O"
|
976 |
+
str = "|O08"
|
977 |
+
base = np.dtype("O")
|
978 |
+
num = 102
|
979 |
+
_metadata = ("freq",)
|
980 |
+
_match = re.compile(r"(P|p)eriod\[(?P<freq>.+)\]")
|
981 |
+
# error: Incompatible types in assignment (expression has type
|
982 |
+
# "Dict[int, PandasExtensionDtype]", base class "PandasExtensionDtype"
|
983 |
+
# defined the type as "Dict[str, PandasExtensionDtype]") [assignment]
|
984 |
+
_cache_dtypes: dict[BaseOffset, int] = {} # type: ignore[assignment]
|
985 |
+
__hash__ = PeriodDtypeBase.__hash__
|
986 |
+
_freq: BaseOffset
|
987 |
+
_supports_2d = True
|
988 |
+
_can_fast_transpose = True
|
989 |
+
|
990 |
+
def __new__(cls, freq) -> PeriodDtype: # noqa: PYI034
|
991 |
+
"""
|
992 |
+
Parameters
|
993 |
+
----------
|
994 |
+
freq : PeriodDtype, BaseOffset, or string
|
995 |
+
"""
|
996 |
+
if isinstance(freq, PeriodDtype):
|
997 |
+
return freq
|
998 |
+
|
999 |
+
if not isinstance(freq, BaseOffset):
|
1000 |
+
freq = cls._parse_dtype_strict(freq)
|
1001 |
+
|
1002 |
+
if isinstance(freq, BDay):
|
1003 |
+
# GH#53446
|
1004 |
+
# TODO(3.0): enforcing this will close GH#10575
|
1005 |
+
warnings.warn(
|
1006 |
+
"PeriodDtype[B] is deprecated and will be removed in a future "
|
1007 |
+
"version. Use a DatetimeIndex with freq='B' instead",
|
1008 |
+
FutureWarning,
|
1009 |
+
stacklevel=find_stack_level(),
|
1010 |
+
)
|
1011 |
+
|
1012 |
+
try:
|
1013 |
+
dtype_code = cls._cache_dtypes[freq]
|
1014 |
+
except KeyError:
|
1015 |
+
dtype_code = freq._period_dtype_code
|
1016 |
+
cls._cache_dtypes[freq] = dtype_code
|
1017 |
+
u = PeriodDtypeBase.__new__(cls, dtype_code, freq.n)
|
1018 |
+
u._freq = freq
|
1019 |
+
return u
|
1020 |
+
|
1021 |
+
def __reduce__(self) -> tuple[type_t[Self], tuple[str_type]]:
|
1022 |
+
return type(self), (self.name,)
|
1023 |
+
|
1024 |
+
@property
|
1025 |
+
def freq(self) -> BaseOffset:
|
1026 |
+
"""
|
1027 |
+
The frequency object of this PeriodDtype.
|
1028 |
+
|
1029 |
+
Examples
|
1030 |
+
--------
|
1031 |
+
>>> dtype = pd.PeriodDtype(freq='D')
|
1032 |
+
>>> dtype.freq
|
1033 |
+
<Day>
|
1034 |
+
"""
|
1035 |
+
return self._freq
|
1036 |
+
|
1037 |
+
@classmethod
|
1038 |
+
def _parse_dtype_strict(cls, freq: str_type) -> BaseOffset:
|
1039 |
+
if isinstance(freq, str): # note: freq is already of type str!
|
1040 |
+
if freq.startswith(("Period[", "period[")):
|
1041 |
+
m = cls._match.search(freq)
|
1042 |
+
if m is not None:
|
1043 |
+
freq = m.group("freq")
|
1044 |
+
|
1045 |
+
freq_offset = to_offset(freq, is_period=True)
|
1046 |
+
if freq_offset is not None:
|
1047 |
+
return freq_offset
|
1048 |
+
|
1049 |
+
raise TypeError(
|
1050 |
+
"PeriodDtype argument should be string or BaseOffset, "
|
1051 |
+
f"got {type(freq).__name__}"
|
1052 |
+
)
|
1053 |
+
|
1054 |
+
@classmethod
|
1055 |
+
def construct_from_string(cls, string: str_type) -> PeriodDtype:
|
1056 |
+
"""
|
1057 |
+
Strict construction from a string, raise a TypeError if not
|
1058 |
+
possible
|
1059 |
+
"""
|
1060 |
+
if (
|
1061 |
+
isinstance(string, str)
|
1062 |
+
and (string.startswith(("period[", "Period[")))
|
1063 |
+
or isinstance(string, BaseOffset)
|
1064 |
+
):
|
1065 |
+
# do not parse string like U as period[U]
|
1066 |
+
# avoid tuple to be regarded as freq
|
1067 |
+
try:
|
1068 |
+
return cls(freq=string)
|
1069 |
+
except ValueError:
|
1070 |
+
pass
|
1071 |
+
if isinstance(string, str):
|
1072 |
+
msg = f"Cannot construct a 'PeriodDtype' from '{string}'"
|
1073 |
+
else:
|
1074 |
+
msg = f"'construct_from_string' expects a string, got {type(string)}"
|
1075 |
+
raise TypeError(msg)
|
1076 |
+
|
1077 |
+
def __str__(self) -> str_type:
|
1078 |
+
return self.name
|
1079 |
+
|
1080 |
+
@property
|
1081 |
+
def name(self) -> str_type:
|
1082 |
+
return f"period[{self._freqstr}]"
|
1083 |
+
|
1084 |
+
@property
|
1085 |
+
def na_value(self) -> NaTType:
|
1086 |
+
return NaT
|
1087 |
+
|
1088 |
+
def __eq__(self, other: object) -> bool:
|
1089 |
+
if isinstance(other, str):
|
1090 |
+
return other in [self.name, capitalize_first_letter(self.name)]
|
1091 |
+
|
1092 |
+
return super().__eq__(other)
|
1093 |
+
|
1094 |
+
def __ne__(self, other: object) -> bool:
|
1095 |
+
return not self.__eq__(other)
|
1096 |
+
|
1097 |
+
@classmethod
|
1098 |
+
def is_dtype(cls, dtype: object) -> bool:
|
1099 |
+
"""
|
1100 |
+
Return a boolean if we if the passed type is an actual dtype that we
|
1101 |
+
can match (via string or type)
|
1102 |
+
"""
|
1103 |
+
if isinstance(dtype, str):
|
1104 |
+
# PeriodDtype can be instantiated from freq string like "U",
|
1105 |
+
# but doesn't regard freq str like "U" as dtype.
|
1106 |
+
if dtype.startswith(("period[", "Period[")):
|
1107 |
+
try:
|
1108 |
+
return cls._parse_dtype_strict(dtype) is not None
|
1109 |
+
except ValueError:
|
1110 |
+
return False
|
1111 |
+
else:
|
1112 |
+
return False
|
1113 |
+
return super().is_dtype(dtype)
|
1114 |
+
|
1115 |
+
@classmethod
|
1116 |
+
def construct_array_type(cls) -> type_t[PeriodArray]:
|
1117 |
+
"""
|
1118 |
+
Return the array type associated with this dtype.
|
1119 |
+
|
1120 |
+
Returns
|
1121 |
+
-------
|
1122 |
+
type
|
1123 |
+
"""
|
1124 |
+
from pandas.core.arrays import PeriodArray
|
1125 |
+
|
1126 |
+
return PeriodArray
|
1127 |
+
|
1128 |
+
def __from_arrow__(self, array: pa.Array | pa.ChunkedArray) -> PeriodArray:
|
1129 |
+
"""
|
1130 |
+
Construct PeriodArray from pyarrow Array/ChunkedArray.
|
1131 |
+
"""
|
1132 |
+
import pyarrow
|
1133 |
+
|
1134 |
+
from pandas.core.arrays import PeriodArray
|
1135 |
+
from pandas.core.arrays.arrow._arrow_utils import (
|
1136 |
+
pyarrow_array_to_numpy_and_mask,
|
1137 |
+
)
|
1138 |
+
|
1139 |
+
if isinstance(array, pyarrow.Array):
|
1140 |
+
chunks = [array]
|
1141 |
+
else:
|
1142 |
+
chunks = array.chunks
|
1143 |
+
|
1144 |
+
results = []
|
1145 |
+
for arr in chunks:
|
1146 |
+
data, mask = pyarrow_array_to_numpy_and_mask(arr, dtype=np.dtype(np.int64))
|
1147 |
+
parr = PeriodArray(data.copy(), dtype=self, copy=False)
|
1148 |
+
# error: Invalid index type "ndarray[Any, dtype[bool_]]" for "PeriodArray";
|
1149 |
+
# expected type "Union[int, Sequence[int], Sequence[bool], slice]"
|
1150 |
+
parr[~mask] = NaT # type: ignore[index]
|
1151 |
+
results.append(parr)
|
1152 |
+
|
1153 |
+
if not results:
|
1154 |
+
return PeriodArray(np.array([], dtype="int64"), dtype=self, copy=False)
|
1155 |
+
return PeriodArray._concat_same_type(results)
|
1156 |
+
|
1157 |
+
@cache_readonly
|
1158 |
+
def index_class(self) -> type_t[PeriodIndex]:
|
1159 |
+
from pandas import PeriodIndex
|
1160 |
+
|
1161 |
+
return PeriodIndex
|
1162 |
+
|
1163 |
+
|
1164 |
+
@register_extension_dtype
|
1165 |
+
class IntervalDtype(PandasExtensionDtype):
|
1166 |
+
"""
|
1167 |
+
An ExtensionDtype for Interval data.
|
1168 |
+
|
1169 |
+
**This is not an actual numpy dtype**, but a duck type.
|
1170 |
+
|
1171 |
+
Parameters
|
1172 |
+
----------
|
1173 |
+
subtype : str, np.dtype
|
1174 |
+
The dtype of the Interval bounds.
|
1175 |
+
|
1176 |
+
Attributes
|
1177 |
+
----------
|
1178 |
+
subtype
|
1179 |
+
|
1180 |
+
Methods
|
1181 |
+
-------
|
1182 |
+
None
|
1183 |
+
|
1184 |
+
Examples
|
1185 |
+
--------
|
1186 |
+
>>> pd.IntervalDtype(subtype='int64', closed='both')
|
1187 |
+
interval[int64, both]
|
1188 |
+
"""
|
1189 |
+
|
1190 |
+
name = "interval"
|
1191 |
+
kind: str_type = "O"
|
1192 |
+
str = "|O08"
|
1193 |
+
base = np.dtype("O")
|
1194 |
+
num = 103
|
1195 |
+
_metadata = (
|
1196 |
+
"subtype",
|
1197 |
+
"closed",
|
1198 |
+
)
|
1199 |
+
|
1200 |
+
_match = re.compile(
|
1201 |
+
r"(I|i)nterval\[(?P<subtype>[^,]+(\[.+\])?)"
|
1202 |
+
r"(, (?P<closed>(right|left|both|neither)))?\]"
|
1203 |
+
)
|
1204 |
+
|
1205 |
+
_cache_dtypes: dict[str_type, PandasExtensionDtype] = {}
|
1206 |
+
_subtype: None | np.dtype
|
1207 |
+
_closed: IntervalClosedType | None
|
1208 |
+
|
1209 |
+
def __init__(self, subtype=None, closed: IntervalClosedType | None = None) -> None:
|
1210 |
+
from pandas.core.dtypes.common import (
|
1211 |
+
is_string_dtype,
|
1212 |
+
pandas_dtype,
|
1213 |
+
)
|
1214 |
+
|
1215 |
+
if closed is not None and closed not in {"right", "left", "both", "neither"}:
|
1216 |
+
raise ValueError("closed must be one of 'right', 'left', 'both', 'neither'")
|
1217 |
+
|
1218 |
+
if isinstance(subtype, IntervalDtype):
|
1219 |
+
if closed is not None and closed != subtype.closed:
|
1220 |
+
raise ValueError(
|
1221 |
+
"dtype.closed and 'closed' do not match. "
|
1222 |
+
"Try IntervalDtype(dtype.subtype, closed) instead."
|
1223 |
+
)
|
1224 |
+
self._subtype = subtype._subtype
|
1225 |
+
self._closed = subtype._closed
|
1226 |
+
elif subtype is None:
|
1227 |
+
# we are called as an empty constructor
|
1228 |
+
# generally for pickle compat
|
1229 |
+
self._subtype = None
|
1230 |
+
self._closed = closed
|
1231 |
+
elif isinstance(subtype, str) and subtype.lower() == "interval":
|
1232 |
+
self._subtype = None
|
1233 |
+
self._closed = closed
|
1234 |
+
else:
|
1235 |
+
if isinstance(subtype, str):
|
1236 |
+
m = IntervalDtype._match.search(subtype)
|
1237 |
+
if m is not None:
|
1238 |
+
gd = m.groupdict()
|
1239 |
+
subtype = gd["subtype"]
|
1240 |
+
if gd.get("closed", None) is not None:
|
1241 |
+
if closed is not None:
|
1242 |
+
if closed != gd["closed"]:
|
1243 |
+
raise ValueError(
|
1244 |
+
"'closed' keyword does not match value "
|
1245 |
+
"specified in dtype string"
|
1246 |
+
)
|
1247 |
+
closed = gd["closed"] # type: ignore[assignment]
|
1248 |
+
|
1249 |
+
try:
|
1250 |
+
subtype = pandas_dtype(subtype)
|
1251 |
+
except TypeError as err:
|
1252 |
+
raise TypeError("could not construct IntervalDtype") from err
|
1253 |
+
if CategoricalDtype.is_dtype(subtype) or is_string_dtype(subtype):
|
1254 |
+
# GH 19016
|
1255 |
+
msg = (
|
1256 |
+
"category, object, and string subtypes are not supported "
|
1257 |
+
"for IntervalDtype"
|
1258 |
+
)
|
1259 |
+
raise TypeError(msg)
|
1260 |
+
self._subtype = subtype
|
1261 |
+
self._closed = closed
|
1262 |
+
|
1263 |
+
@cache_readonly
|
1264 |
+
def _can_hold_na(self) -> bool:
|
1265 |
+
subtype = self._subtype
|
1266 |
+
if subtype is None:
|
1267 |
+
# partially-initialized
|
1268 |
+
raise NotImplementedError(
|
1269 |
+
"_can_hold_na is not defined for partially-initialized IntervalDtype"
|
1270 |
+
)
|
1271 |
+
if subtype.kind in "iu":
|
1272 |
+
return False
|
1273 |
+
return True
|
1274 |
+
|
1275 |
+
@property
|
1276 |
+
def closed(self) -> IntervalClosedType:
|
1277 |
+
return self._closed # type: ignore[return-value]
|
1278 |
+
|
1279 |
+
@property
|
1280 |
+
def subtype(self):
|
1281 |
+
"""
|
1282 |
+
The dtype of the Interval bounds.
|
1283 |
+
|
1284 |
+
Examples
|
1285 |
+
--------
|
1286 |
+
>>> dtype = pd.IntervalDtype(subtype='int64', closed='both')
|
1287 |
+
>>> dtype.subtype
|
1288 |
+
dtype('int64')
|
1289 |
+
"""
|
1290 |
+
return self._subtype
|
1291 |
+
|
1292 |
+
@classmethod
|
1293 |
+
def construct_array_type(cls) -> type[IntervalArray]:
|
1294 |
+
"""
|
1295 |
+
Return the array type associated with this dtype.
|
1296 |
+
|
1297 |
+
Returns
|
1298 |
+
-------
|
1299 |
+
type
|
1300 |
+
"""
|
1301 |
+
from pandas.core.arrays import IntervalArray
|
1302 |
+
|
1303 |
+
return IntervalArray
|
1304 |
+
|
1305 |
+
@classmethod
|
1306 |
+
def construct_from_string(cls, string: str_type) -> IntervalDtype:
|
1307 |
+
"""
|
1308 |
+
attempt to construct this type from a string, raise a TypeError
|
1309 |
+
if its not possible
|
1310 |
+
"""
|
1311 |
+
if not isinstance(string, str):
|
1312 |
+
raise TypeError(
|
1313 |
+
f"'construct_from_string' expects a string, got {type(string)}"
|
1314 |
+
)
|
1315 |
+
|
1316 |
+
if string.lower() == "interval" or cls._match.search(string) is not None:
|
1317 |
+
return cls(string)
|
1318 |
+
|
1319 |
+
msg = (
|
1320 |
+
f"Cannot construct a 'IntervalDtype' from '{string}'.\n\n"
|
1321 |
+
"Incorrectly formatted string passed to constructor. "
|
1322 |
+
"Valid formats include Interval or Interval[dtype] "
|
1323 |
+
"where dtype is numeric, datetime, or timedelta"
|
1324 |
+
)
|
1325 |
+
raise TypeError(msg)
|
1326 |
+
|
1327 |
+
@property
|
1328 |
+
def type(self) -> type[Interval]:
|
1329 |
+
return Interval
|
1330 |
+
|
1331 |
+
def __str__(self) -> str_type:
|
1332 |
+
if self.subtype is None:
|
1333 |
+
return "interval"
|
1334 |
+
if self.closed is None:
|
1335 |
+
# Only partially initialized GH#38394
|
1336 |
+
return f"interval[{self.subtype}]"
|
1337 |
+
return f"interval[{self.subtype}, {self.closed}]"
|
1338 |
+
|
1339 |
+
def __hash__(self) -> int:
|
1340 |
+
# make myself hashable
|
1341 |
+
return hash(str(self))
|
1342 |
+
|
1343 |
+
def __eq__(self, other: object) -> bool:
|
1344 |
+
if isinstance(other, str):
|
1345 |
+
return other.lower() in (self.name.lower(), str(self).lower())
|
1346 |
+
elif not isinstance(other, IntervalDtype):
|
1347 |
+
return False
|
1348 |
+
elif self.subtype is None or other.subtype is None:
|
1349 |
+
# None should match any subtype
|
1350 |
+
return True
|
1351 |
+
elif self.closed != other.closed:
|
1352 |
+
return False
|
1353 |
+
else:
|
1354 |
+
return self.subtype == other.subtype
|
1355 |
+
|
1356 |
+
def __setstate__(self, state) -> None:
|
1357 |
+
# for pickle compat. __get_state__ is defined in the
|
1358 |
+
# PandasExtensionDtype superclass and uses the public properties to
|
1359 |
+
# pickle -> need to set the settable private ones here (see GH26067)
|
1360 |
+
self._subtype = state["subtype"]
|
1361 |
+
|
1362 |
+
# backward-compat older pickles won't have "closed" key
|
1363 |
+
self._closed = state.pop("closed", None)
|
1364 |
+
|
1365 |
+
@classmethod
|
1366 |
+
def is_dtype(cls, dtype: object) -> bool:
|
1367 |
+
"""
|
1368 |
+
Return a boolean if we if the passed type is an actual dtype that we
|
1369 |
+
can match (via string or type)
|
1370 |
+
"""
|
1371 |
+
if isinstance(dtype, str):
|
1372 |
+
if dtype.lower().startswith("interval"):
|
1373 |
+
try:
|
1374 |
+
return cls.construct_from_string(dtype) is not None
|
1375 |
+
except (ValueError, TypeError):
|
1376 |
+
return False
|
1377 |
+
else:
|
1378 |
+
return False
|
1379 |
+
return super().is_dtype(dtype)
|
1380 |
+
|
1381 |
+
def __from_arrow__(self, array: pa.Array | pa.ChunkedArray) -> IntervalArray:
|
1382 |
+
"""
|
1383 |
+
Construct IntervalArray from pyarrow Array/ChunkedArray.
|
1384 |
+
"""
|
1385 |
+
import pyarrow
|
1386 |
+
|
1387 |
+
from pandas.core.arrays import IntervalArray
|
1388 |
+
|
1389 |
+
if isinstance(array, pyarrow.Array):
|
1390 |
+
chunks = [array]
|
1391 |
+
else:
|
1392 |
+
chunks = array.chunks
|
1393 |
+
|
1394 |
+
results = []
|
1395 |
+
for arr in chunks:
|
1396 |
+
if isinstance(arr, pyarrow.ExtensionArray):
|
1397 |
+
arr = arr.storage
|
1398 |
+
left = np.asarray(arr.field("left"), dtype=self.subtype)
|
1399 |
+
right = np.asarray(arr.field("right"), dtype=self.subtype)
|
1400 |
+
iarr = IntervalArray.from_arrays(left, right, closed=self.closed)
|
1401 |
+
results.append(iarr)
|
1402 |
+
|
1403 |
+
if not results:
|
1404 |
+
return IntervalArray.from_arrays(
|
1405 |
+
np.array([], dtype=self.subtype),
|
1406 |
+
np.array([], dtype=self.subtype),
|
1407 |
+
closed=self.closed,
|
1408 |
+
)
|
1409 |
+
return IntervalArray._concat_same_type(results)
|
1410 |
+
|
1411 |
+
def _get_common_dtype(self, dtypes: list[DtypeObj]) -> DtypeObj | None:
|
1412 |
+
if not all(isinstance(x, IntervalDtype) for x in dtypes):
|
1413 |
+
return None
|
1414 |
+
|
1415 |
+
closed = cast("IntervalDtype", dtypes[0]).closed
|
1416 |
+
if not all(cast("IntervalDtype", x).closed == closed for x in dtypes):
|
1417 |
+
return np.dtype(object)
|
1418 |
+
|
1419 |
+
from pandas.core.dtypes.cast import find_common_type
|
1420 |
+
|
1421 |
+
common = find_common_type([cast("IntervalDtype", x).subtype for x in dtypes])
|
1422 |
+
if common == object:
|
1423 |
+
return np.dtype(object)
|
1424 |
+
return IntervalDtype(common, closed=closed)
|
1425 |
+
|
1426 |
+
@cache_readonly
|
1427 |
+
def index_class(self) -> type_t[IntervalIndex]:
|
1428 |
+
from pandas import IntervalIndex
|
1429 |
+
|
1430 |
+
return IntervalIndex
|
1431 |
+
|
1432 |
+
|
1433 |
+
class NumpyEADtype(ExtensionDtype):
|
1434 |
+
"""
|
1435 |
+
A Pandas ExtensionDtype for NumPy dtypes.
|
1436 |
+
|
1437 |
+
This is mostly for internal compatibility, and is not especially
|
1438 |
+
useful on its own.
|
1439 |
+
|
1440 |
+
Parameters
|
1441 |
+
----------
|
1442 |
+
dtype : object
|
1443 |
+
Object to be converted to a NumPy data type object.
|
1444 |
+
|
1445 |
+
See Also
|
1446 |
+
--------
|
1447 |
+
numpy.dtype
|
1448 |
+
"""
|
1449 |
+
|
1450 |
+
_metadata = ("_dtype",)
|
1451 |
+
_supports_2d = False
|
1452 |
+
_can_fast_transpose = False
|
1453 |
+
|
1454 |
+
def __init__(self, dtype: npt.DTypeLike | NumpyEADtype | None) -> None:
|
1455 |
+
if isinstance(dtype, NumpyEADtype):
|
1456 |
+
# make constructor idempotent
|
1457 |
+
dtype = dtype.numpy_dtype
|
1458 |
+
self._dtype = np.dtype(dtype)
|
1459 |
+
|
1460 |
+
def __repr__(self) -> str:
|
1461 |
+
return f"NumpyEADtype({repr(self.name)})"
|
1462 |
+
|
1463 |
+
@property
|
1464 |
+
def numpy_dtype(self) -> np.dtype:
|
1465 |
+
"""
|
1466 |
+
The NumPy dtype this NumpyEADtype wraps.
|
1467 |
+
"""
|
1468 |
+
return self._dtype
|
1469 |
+
|
1470 |
+
@property
|
1471 |
+
def name(self) -> str:
|
1472 |
+
"""
|
1473 |
+
A bit-width name for this data-type.
|
1474 |
+
"""
|
1475 |
+
return self._dtype.name
|
1476 |
+
|
1477 |
+
@property
|
1478 |
+
def type(self) -> type[np.generic]:
|
1479 |
+
"""
|
1480 |
+
The type object used to instantiate a scalar of this NumPy data-type.
|
1481 |
+
"""
|
1482 |
+
return self._dtype.type
|
1483 |
+
|
1484 |
+
@property
|
1485 |
+
def _is_numeric(self) -> bool:
|
1486 |
+
# exclude object, str, unicode, void.
|
1487 |
+
return self.kind in set("biufc")
|
1488 |
+
|
1489 |
+
@property
|
1490 |
+
def _is_boolean(self) -> bool:
|
1491 |
+
return self.kind == "b"
|
1492 |
+
|
1493 |
+
@classmethod
|
1494 |
+
def construct_from_string(cls, string: str) -> NumpyEADtype:
|
1495 |
+
try:
|
1496 |
+
dtype = np.dtype(string)
|
1497 |
+
except TypeError as err:
|
1498 |
+
if not isinstance(string, str):
|
1499 |
+
msg = f"'construct_from_string' expects a string, got {type(string)}"
|
1500 |
+
else:
|
1501 |
+
msg = f"Cannot construct a 'NumpyEADtype' from '{string}'"
|
1502 |
+
raise TypeError(msg) from err
|
1503 |
+
return cls(dtype)
|
1504 |
+
|
1505 |
+
@classmethod
|
1506 |
+
def construct_array_type(cls) -> type_t[NumpyExtensionArray]:
|
1507 |
+
"""
|
1508 |
+
Return the array type associated with this dtype.
|
1509 |
+
|
1510 |
+
Returns
|
1511 |
+
-------
|
1512 |
+
type
|
1513 |
+
"""
|
1514 |
+
from pandas.core.arrays import NumpyExtensionArray
|
1515 |
+
|
1516 |
+
return NumpyExtensionArray
|
1517 |
+
|
1518 |
+
@property
|
1519 |
+
def kind(self) -> str:
|
1520 |
+
"""
|
1521 |
+
A character code (one of 'biufcmMOSUV') identifying the general kind of data.
|
1522 |
+
"""
|
1523 |
+
return self._dtype.kind
|
1524 |
+
|
1525 |
+
@property
|
1526 |
+
def itemsize(self) -> int:
|
1527 |
+
"""
|
1528 |
+
The element size of this data-type object.
|
1529 |
+
"""
|
1530 |
+
return self._dtype.itemsize
|
1531 |
+
|
1532 |
+
|
1533 |
+
class BaseMaskedDtype(ExtensionDtype):
|
1534 |
+
"""
|
1535 |
+
Base class for dtypes for BaseMaskedArray subclasses.
|
1536 |
+
"""
|
1537 |
+
|
1538 |
+
base = None
|
1539 |
+
type: type
|
1540 |
+
|
1541 |
+
@property
|
1542 |
+
def na_value(self) -> libmissing.NAType:
|
1543 |
+
return libmissing.NA
|
1544 |
+
|
1545 |
+
@cache_readonly
|
1546 |
+
def numpy_dtype(self) -> np.dtype:
|
1547 |
+
"""Return an instance of our numpy dtype"""
|
1548 |
+
return np.dtype(self.type)
|
1549 |
+
|
1550 |
+
@cache_readonly
|
1551 |
+
def kind(self) -> str:
|
1552 |
+
return self.numpy_dtype.kind
|
1553 |
+
|
1554 |
+
@cache_readonly
|
1555 |
+
def itemsize(self) -> int:
|
1556 |
+
"""Return the number of bytes in this dtype"""
|
1557 |
+
return self.numpy_dtype.itemsize
|
1558 |
+
|
1559 |
+
@classmethod
|
1560 |
+
def construct_array_type(cls) -> type_t[BaseMaskedArray]:
|
1561 |
+
"""
|
1562 |
+
Return the array type associated with this dtype.
|
1563 |
+
|
1564 |
+
Returns
|
1565 |
+
-------
|
1566 |
+
type
|
1567 |
+
"""
|
1568 |
+
raise NotImplementedError
|
1569 |
+
|
1570 |
+
@classmethod
|
1571 |
+
def from_numpy_dtype(cls, dtype: np.dtype) -> BaseMaskedDtype:
|
1572 |
+
"""
|
1573 |
+
Construct the MaskedDtype corresponding to the given numpy dtype.
|
1574 |
+
"""
|
1575 |
+
if dtype.kind == "b":
|
1576 |
+
from pandas.core.arrays.boolean import BooleanDtype
|
1577 |
+
|
1578 |
+
return BooleanDtype()
|
1579 |
+
elif dtype.kind in "iu":
|
1580 |
+
from pandas.core.arrays.integer import NUMPY_INT_TO_DTYPE
|
1581 |
+
|
1582 |
+
return NUMPY_INT_TO_DTYPE[dtype]
|
1583 |
+
elif dtype.kind == "f":
|
1584 |
+
from pandas.core.arrays.floating import NUMPY_FLOAT_TO_DTYPE
|
1585 |
+
|
1586 |
+
return NUMPY_FLOAT_TO_DTYPE[dtype]
|
1587 |
+
else:
|
1588 |
+
raise NotImplementedError(dtype)
|
1589 |
+
|
1590 |
+
def _get_common_dtype(self, dtypes: list[DtypeObj]) -> DtypeObj | None:
|
1591 |
+
# We unwrap any masked dtypes, find the common dtype we would use
|
1592 |
+
# for that, then re-mask the result.
|
1593 |
+
from pandas.core.dtypes.cast import find_common_type
|
1594 |
+
|
1595 |
+
new_dtype = find_common_type(
|
1596 |
+
[
|
1597 |
+
dtype.numpy_dtype if isinstance(dtype, BaseMaskedDtype) else dtype
|
1598 |
+
for dtype in dtypes
|
1599 |
+
]
|
1600 |
+
)
|
1601 |
+
if not isinstance(new_dtype, np.dtype):
|
1602 |
+
# If we ever support e.g. Masked[DatetimeArray] then this will change
|
1603 |
+
return None
|
1604 |
+
try:
|
1605 |
+
return type(self).from_numpy_dtype(new_dtype)
|
1606 |
+
except (KeyError, NotImplementedError):
|
1607 |
+
return None
|
1608 |
+
|
1609 |
+
|
1610 |
+
@register_extension_dtype
|
1611 |
+
class SparseDtype(ExtensionDtype):
|
1612 |
+
"""
|
1613 |
+
Dtype for data stored in :class:`SparseArray`.
|
1614 |
+
|
1615 |
+
This dtype implements the pandas ExtensionDtype interface.
|
1616 |
+
|
1617 |
+
Parameters
|
1618 |
+
----------
|
1619 |
+
dtype : str, ExtensionDtype, numpy.dtype, type, default numpy.float64
|
1620 |
+
The dtype of the underlying array storing the non-fill value values.
|
1621 |
+
fill_value : scalar, optional
|
1622 |
+
The scalar value not stored in the SparseArray. By default, this
|
1623 |
+
depends on `dtype`.
|
1624 |
+
|
1625 |
+
=========== ==========
|
1626 |
+
dtype na_value
|
1627 |
+
=========== ==========
|
1628 |
+
float ``np.nan``
|
1629 |
+
int ``0``
|
1630 |
+
bool ``False``
|
1631 |
+
datetime64 ``pd.NaT``
|
1632 |
+
timedelta64 ``pd.NaT``
|
1633 |
+
=========== ==========
|
1634 |
+
|
1635 |
+
The default value may be overridden by specifying a `fill_value`.
|
1636 |
+
|
1637 |
+
Attributes
|
1638 |
+
----------
|
1639 |
+
None
|
1640 |
+
|
1641 |
+
Methods
|
1642 |
+
-------
|
1643 |
+
None
|
1644 |
+
|
1645 |
+
Examples
|
1646 |
+
--------
|
1647 |
+
>>> ser = pd.Series([1, 0, 0], dtype=pd.SparseDtype(dtype=int, fill_value=0))
|
1648 |
+
>>> ser
|
1649 |
+
0 1
|
1650 |
+
1 0
|
1651 |
+
2 0
|
1652 |
+
dtype: Sparse[int64, 0]
|
1653 |
+
>>> ser.sparse.density
|
1654 |
+
0.3333333333333333
|
1655 |
+
"""
|
1656 |
+
|
1657 |
+
_is_immutable = True
|
1658 |
+
|
1659 |
+
# We include `_is_na_fill_value` in the metadata to avoid hash collisions
|
1660 |
+
# between SparseDtype(float, 0.0) and SparseDtype(float, nan).
|
1661 |
+
# Without is_na_fill_value in the comparison, those would be equal since
|
1662 |
+
# hash(nan) is (sometimes?) 0.
|
1663 |
+
_metadata = ("_dtype", "_fill_value", "_is_na_fill_value")
|
1664 |
+
|
1665 |
+
def __init__(self, dtype: Dtype = np.float64, fill_value: Any = None) -> None:
|
1666 |
+
if isinstance(dtype, type(self)):
|
1667 |
+
if fill_value is None:
|
1668 |
+
fill_value = dtype.fill_value
|
1669 |
+
dtype = dtype.subtype
|
1670 |
+
|
1671 |
+
from pandas.core.dtypes.common import (
|
1672 |
+
is_string_dtype,
|
1673 |
+
pandas_dtype,
|
1674 |
+
)
|
1675 |
+
from pandas.core.dtypes.missing import na_value_for_dtype
|
1676 |
+
|
1677 |
+
dtype = pandas_dtype(dtype)
|
1678 |
+
if is_string_dtype(dtype):
|
1679 |
+
dtype = np.dtype("object")
|
1680 |
+
if not isinstance(dtype, np.dtype):
|
1681 |
+
# GH#53160
|
1682 |
+
raise TypeError("SparseDtype subtype must be a numpy dtype")
|
1683 |
+
|
1684 |
+
if fill_value is None:
|
1685 |
+
fill_value = na_value_for_dtype(dtype)
|
1686 |
+
|
1687 |
+
self._dtype = dtype
|
1688 |
+
self._fill_value = fill_value
|
1689 |
+
self._check_fill_value()
|
1690 |
+
|
1691 |
+
def __hash__(self) -> int:
|
1692 |
+
# Python3 doesn't inherit __hash__ when a base class overrides
|
1693 |
+
# __eq__, so we explicitly do it here.
|
1694 |
+
return super().__hash__()
|
1695 |
+
|
1696 |
+
def __eq__(self, other: object) -> bool:
|
1697 |
+
# We have to override __eq__ to handle NA values in _metadata.
|
1698 |
+
# The base class does simple == checks, which fail for NA.
|
1699 |
+
if isinstance(other, str):
|
1700 |
+
try:
|
1701 |
+
other = self.construct_from_string(other)
|
1702 |
+
except TypeError:
|
1703 |
+
return False
|
1704 |
+
|
1705 |
+
if isinstance(other, type(self)):
|
1706 |
+
subtype = self.subtype == other.subtype
|
1707 |
+
if self._is_na_fill_value:
|
1708 |
+
# this case is complicated by two things:
|
1709 |
+
# SparseDtype(float, float(nan)) == SparseDtype(float, np.nan)
|
1710 |
+
# SparseDtype(float, np.nan) != SparseDtype(float, pd.NaT)
|
1711 |
+
# i.e. we want to treat any floating-point NaN as equal, but
|
1712 |
+
# not a floating-point NaN and a datetime NaT.
|
1713 |
+
fill_value = (
|
1714 |
+
other._is_na_fill_value
|
1715 |
+
and isinstance(self.fill_value, type(other.fill_value))
|
1716 |
+
or isinstance(other.fill_value, type(self.fill_value))
|
1717 |
+
)
|
1718 |
+
else:
|
1719 |
+
with warnings.catch_warnings():
|
1720 |
+
# Ignore spurious numpy warning
|
1721 |
+
warnings.filterwarnings(
|
1722 |
+
"ignore",
|
1723 |
+
"elementwise comparison failed",
|
1724 |
+
category=DeprecationWarning,
|
1725 |
+
)
|
1726 |
+
|
1727 |
+
fill_value = self.fill_value == other.fill_value
|
1728 |
+
|
1729 |
+
return subtype and fill_value
|
1730 |
+
return False
|
1731 |
+
|
1732 |
+
@property
|
1733 |
+
def fill_value(self):
|
1734 |
+
"""
|
1735 |
+
The fill value of the array.
|
1736 |
+
|
1737 |
+
Converting the SparseArray to a dense ndarray will fill the
|
1738 |
+
array with this value.
|
1739 |
+
|
1740 |
+
.. warning::
|
1741 |
+
|
1742 |
+
It's possible to end up with a SparseArray that has ``fill_value``
|
1743 |
+
values in ``sp_values``. This can occur, for example, when setting
|
1744 |
+
``SparseArray.fill_value`` directly.
|
1745 |
+
"""
|
1746 |
+
return self._fill_value
|
1747 |
+
|
1748 |
+
def _check_fill_value(self) -> None:
|
1749 |
+
if not lib.is_scalar(self._fill_value):
|
1750 |
+
raise ValueError(
|
1751 |
+
f"fill_value must be a scalar. Got {self._fill_value} instead"
|
1752 |
+
)
|
1753 |
+
|
1754 |
+
from pandas.core.dtypes.cast import can_hold_element
|
1755 |
+
from pandas.core.dtypes.missing import (
|
1756 |
+
is_valid_na_for_dtype,
|
1757 |
+
isna,
|
1758 |
+
)
|
1759 |
+
|
1760 |
+
from pandas.core.construction import ensure_wrapped_if_datetimelike
|
1761 |
+
|
1762 |
+
# GH#23124 require fill_value and subtype to match
|
1763 |
+
val = self._fill_value
|
1764 |
+
if isna(val):
|
1765 |
+
if not is_valid_na_for_dtype(val, self.subtype):
|
1766 |
+
warnings.warn(
|
1767 |
+
"Allowing arbitrary scalar fill_value in SparseDtype is "
|
1768 |
+
"deprecated. In a future version, the fill_value must be "
|
1769 |
+
"a valid value for the SparseDtype.subtype.",
|
1770 |
+
FutureWarning,
|
1771 |
+
stacklevel=find_stack_level(),
|
1772 |
+
)
|
1773 |
+
else:
|
1774 |
+
dummy = np.empty(0, dtype=self.subtype)
|
1775 |
+
dummy = ensure_wrapped_if_datetimelike(dummy)
|
1776 |
+
|
1777 |
+
if not can_hold_element(dummy, val):
|
1778 |
+
warnings.warn(
|
1779 |
+
"Allowing arbitrary scalar fill_value in SparseDtype is "
|
1780 |
+
"deprecated. In a future version, the fill_value must be "
|
1781 |
+
"a valid value for the SparseDtype.subtype.",
|
1782 |
+
FutureWarning,
|
1783 |
+
stacklevel=find_stack_level(),
|
1784 |
+
)
|
1785 |
+
|
1786 |
+
@property
|
1787 |
+
def _is_na_fill_value(self) -> bool:
|
1788 |
+
from pandas import isna
|
1789 |
+
|
1790 |
+
return isna(self.fill_value)
|
1791 |
+
|
1792 |
+
@property
|
1793 |
+
def _is_numeric(self) -> bool:
|
1794 |
+
return not self.subtype == object
|
1795 |
+
|
1796 |
+
@property
|
1797 |
+
def _is_boolean(self) -> bool:
|
1798 |
+
return self.subtype.kind == "b"
|
1799 |
+
|
1800 |
+
@property
|
1801 |
+
def kind(self) -> str:
|
1802 |
+
"""
|
1803 |
+
The sparse kind. Either 'integer', or 'block'.
|
1804 |
+
"""
|
1805 |
+
return self.subtype.kind
|
1806 |
+
|
1807 |
+
@property
|
1808 |
+
def type(self):
|
1809 |
+
return self.subtype.type
|
1810 |
+
|
1811 |
+
@property
|
1812 |
+
def subtype(self):
|
1813 |
+
return self._dtype
|
1814 |
+
|
1815 |
+
@property
|
1816 |
+
def name(self) -> str:
|
1817 |
+
return f"Sparse[{self.subtype.name}, {repr(self.fill_value)}]"
|
1818 |
+
|
1819 |
+
def __repr__(self) -> str:
|
1820 |
+
return self.name
|
1821 |
+
|
1822 |
+
@classmethod
|
1823 |
+
def construct_array_type(cls) -> type_t[SparseArray]:
|
1824 |
+
"""
|
1825 |
+
Return the array type associated with this dtype.
|
1826 |
+
|
1827 |
+
Returns
|
1828 |
+
-------
|
1829 |
+
type
|
1830 |
+
"""
|
1831 |
+
from pandas.core.arrays.sparse.array import SparseArray
|
1832 |
+
|
1833 |
+
return SparseArray
|
1834 |
+
|
1835 |
+
@classmethod
|
1836 |
+
def construct_from_string(cls, string: str) -> SparseDtype:
|
1837 |
+
"""
|
1838 |
+
Construct a SparseDtype from a string form.
|
1839 |
+
|
1840 |
+
Parameters
|
1841 |
+
----------
|
1842 |
+
string : str
|
1843 |
+
Can take the following forms.
|
1844 |
+
|
1845 |
+
string dtype
|
1846 |
+
================ ============================
|
1847 |
+
'int' SparseDtype[np.int64, 0]
|
1848 |
+
'Sparse' SparseDtype[np.float64, nan]
|
1849 |
+
'Sparse[int]' SparseDtype[np.int64, 0]
|
1850 |
+
'Sparse[int, 0]' SparseDtype[np.int64, 0]
|
1851 |
+
================ ============================
|
1852 |
+
|
1853 |
+
It is not possible to specify non-default fill values
|
1854 |
+
with a string. An argument like ``'Sparse[int, 1]'``
|
1855 |
+
will raise a ``TypeError`` because the default fill value
|
1856 |
+
for integers is 0.
|
1857 |
+
|
1858 |
+
Returns
|
1859 |
+
-------
|
1860 |
+
SparseDtype
|
1861 |
+
"""
|
1862 |
+
if not isinstance(string, str):
|
1863 |
+
raise TypeError(
|
1864 |
+
f"'construct_from_string' expects a string, got {type(string)}"
|
1865 |
+
)
|
1866 |
+
msg = f"Cannot construct a 'SparseDtype' from '{string}'"
|
1867 |
+
if string.startswith("Sparse"):
|
1868 |
+
try:
|
1869 |
+
sub_type, has_fill_value = cls._parse_subtype(string)
|
1870 |
+
except ValueError as err:
|
1871 |
+
raise TypeError(msg) from err
|
1872 |
+
else:
|
1873 |
+
result = SparseDtype(sub_type)
|
1874 |
+
msg = (
|
1875 |
+
f"Cannot construct a 'SparseDtype' from '{string}'.\n\nIt "
|
1876 |
+
"looks like the fill_value in the string is not "
|
1877 |
+
"the default for the dtype. Non-default fill_values "
|
1878 |
+
"are not supported. Use the 'SparseDtype()' "
|
1879 |
+
"constructor instead."
|
1880 |
+
)
|
1881 |
+
if has_fill_value and str(result) != string:
|
1882 |
+
raise TypeError(msg)
|
1883 |
+
return result
|
1884 |
+
else:
|
1885 |
+
raise TypeError(msg)
|
1886 |
+
|
1887 |
+
@staticmethod
|
1888 |
+
def _parse_subtype(dtype: str) -> tuple[str, bool]:
|
1889 |
+
"""
|
1890 |
+
Parse a string to get the subtype
|
1891 |
+
|
1892 |
+
Parameters
|
1893 |
+
----------
|
1894 |
+
dtype : str
|
1895 |
+
A string like
|
1896 |
+
|
1897 |
+
* Sparse[subtype]
|
1898 |
+
* Sparse[subtype, fill_value]
|
1899 |
+
|
1900 |
+
Returns
|
1901 |
+
-------
|
1902 |
+
subtype : str
|
1903 |
+
|
1904 |
+
Raises
|
1905 |
+
------
|
1906 |
+
ValueError
|
1907 |
+
When the subtype cannot be extracted.
|
1908 |
+
"""
|
1909 |
+
xpr = re.compile(r"Sparse\[(?P<subtype>[^,]*)(, )?(?P<fill_value>.*?)?\]$")
|
1910 |
+
m = xpr.match(dtype)
|
1911 |
+
has_fill_value = False
|
1912 |
+
if m:
|
1913 |
+
subtype = m.groupdict()["subtype"]
|
1914 |
+
has_fill_value = bool(m.groupdict()["fill_value"])
|
1915 |
+
elif dtype == "Sparse":
|
1916 |
+
subtype = "float64"
|
1917 |
+
else:
|
1918 |
+
raise ValueError(f"Cannot parse {dtype}")
|
1919 |
+
return subtype, has_fill_value
|
1920 |
+
|
1921 |
+
@classmethod
|
1922 |
+
def is_dtype(cls, dtype: object) -> bool:
|
1923 |
+
dtype = getattr(dtype, "dtype", dtype)
|
1924 |
+
if isinstance(dtype, str) and dtype.startswith("Sparse"):
|
1925 |
+
sub_type, _ = cls._parse_subtype(dtype)
|
1926 |
+
dtype = np.dtype(sub_type)
|
1927 |
+
elif isinstance(dtype, cls):
|
1928 |
+
return True
|
1929 |
+
return isinstance(dtype, np.dtype) or dtype == "Sparse"
|
1930 |
+
|
1931 |
+
def update_dtype(self, dtype) -> SparseDtype:
|
1932 |
+
"""
|
1933 |
+
Convert the SparseDtype to a new dtype.
|
1934 |
+
|
1935 |
+
This takes care of converting the ``fill_value``.
|
1936 |
+
|
1937 |
+
Parameters
|
1938 |
+
----------
|
1939 |
+
dtype : Union[str, numpy.dtype, SparseDtype]
|
1940 |
+
The new dtype to use.
|
1941 |
+
|
1942 |
+
* For a SparseDtype, it is simply returned
|
1943 |
+
* For a NumPy dtype (or str), the current fill value
|
1944 |
+
is converted to the new dtype, and a SparseDtype
|
1945 |
+
with `dtype` and the new fill value is returned.
|
1946 |
+
|
1947 |
+
Returns
|
1948 |
+
-------
|
1949 |
+
SparseDtype
|
1950 |
+
A new SparseDtype with the correct `dtype` and fill value
|
1951 |
+
for that `dtype`.
|
1952 |
+
|
1953 |
+
Raises
|
1954 |
+
------
|
1955 |
+
ValueError
|
1956 |
+
When the current fill value cannot be converted to the
|
1957 |
+
new `dtype` (e.g. trying to convert ``np.nan`` to an
|
1958 |
+
integer dtype).
|
1959 |
+
|
1960 |
+
|
1961 |
+
Examples
|
1962 |
+
--------
|
1963 |
+
>>> SparseDtype(int, 0).update_dtype(float)
|
1964 |
+
Sparse[float64, 0.0]
|
1965 |
+
|
1966 |
+
>>> SparseDtype(int, 1).update_dtype(SparseDtype(float, np.nan))
|
1967 |
+
Sparse[float64, nan]
|
1968 |
+
"""
|
1969 |
+
from pandas.core.dtypes.astype import astype_array
|
1970 |
+
from pandas.core.dtypes.common import pandas_dtype
|
1971 |
+
|
1972 |
+
cls = type(self)
|
1973 |
+
dtype = pandas_dtype(dtype)
|
1974 |
+
|
1975 |
+
if not isinstance(dtype, cls):
|
1976 |
+
if not isinstance(dtype, np.dtype):
|
1977 |
+
raise TypeError("sparse arrays of extension dtypes not supported")
|
1978 |
+
|
1979 |
+
fv_asarray = np.atleast_1d(np.array(self.fill_value))
|
1980 |
+
fvarr = astype_array(fv_asarray, dtype)
|
1981 |
+
# NB: not fv_0d.item(), as that casts dt64->int
|
1982 |
+
fill_value = fvarr[0]
|
1983 |
+
dtype = cls(dtype, fill_value=fill_value)
|
1984 |
+
|
1985 |
+
return dtype
|
1986 |
+
|
1987 |
+
@property
|
1988 |
+
def _subtype_with_str(self):
|
1989 |
+
"""
|
1990 |
+
Whether the SparseDtype's subtype should be considered ``str``.
|
1991 |
+
|
1992 |
+
Typically, pandas will store string data in an object-dtype array.
|
1993 |
+
When converting values to a dtype, e.g. in ``.astype``, we need to
|
1994 |
+
be more specific, we need the actual underlying type.
|
1995 |
+
|
1996 |
+
Returns
|
1997 |
+
-------
|
1998 |
+
>>> SparseDtype(int, 1)._subtype_with_str
|
1999 |
+
dtype('int64')
|
2000 |
+
|
2001 |
+
>>> SparseDtype(object, 1)._subtype_with_str
|
2002 |
+
dtype('O')
|
2003 |
+
|
2004 |
+
>>> dtype = SparseDtype(str, '')
|
2005 |
+
>>> dtype.subtype
|
2006 |
+
dtype('O')
|
2007 |
+
|
2008 |
+
>>> dtype._subtype_with_str
|
2009 |
+
<class 'str'>
|
2010 |
+
"""
|
2011 |
+
if isinstance(self.fill_value, str):
|
2012 |
+
return type(self.fill_value)
|
2013 |
+
return self.subtype
|
2014 |
+
|
2015 |
+
def _get_common_dtype(self, dtypes: list[DtypeObj]) -> DtypeObj | None:
|
2016 |
+
# TODO for now only handle SparseDtypes and numpy dtypes => extend
|
2017 |
+
# with other compatible extension dtypes
|
2018 |
+
from pandas.core.dtypes.cast import np_find_common_type
|
2019 |
+
|
2020 |
+
if any(
|
2021 |
+
isinstance(x, ExtensionDtype) and not isinstance(x, SparseDtype)
|
2022 |
+
for x in dtypes
|
2023 |
+
):
|
2024 |
+
return None
|
2025 |
+
|
2026 |
+
fill_values = [x.fill_value for x in dtypes if isinstance(x, SparseDtype)]
|
2027 |
+
fill_value = fill_values[0]
|
2028 |
+
|
2029 |
+
from pandas import isna
|
2030 |
+
|
2031 |
+
# np.nan isn't a singleton, so we may end up with multiple
|
2032 |
+
# NaNs here, so we ignore the all NA case too.
|
2033 |
+
if not (len(set(fill_values)) == 1 or isna(fill_values).all()):
|
2034 |
+
warnings.warn(
|
2035 |
+
"Concatenating sparse arrays with multiple fill "
|
2036 |
+
f"values: '{fill_values}'. Picking the first and "
|
2037 |
+
"converting the rest.",
|
2038 |
+
PerformanceWarning,
|
2039 |
+
stacklevel=find_stack_level(),
|
2040 |
+
)
|
2041 |
+
|
2042 |
+
np_dtypes = (x.subtype if isinstance(x, SparseDtype) else x for x in dtypes)
|
2043 |
+
return SparseDtype(np_find_common_type(*np_dtypes), fill_value=fill_value)
|
2044 |
+
|
2045 |
+
|
2046 |
+
@register_extension_dtype
|
2047 |
+
class ArrowDtype(StorageExtensionDtype):
|
2048 |
+
"""
|
2049 |
+
An ExtensionDtype for PyArrow data types.
|
2050 |
+
|
2051 |
+
.. warning::
|
2052 |
+
|
2053 |
+
ArrowDtype is considered experimental. The implementation and
|
2054 |
+
parts of the API may change without warning.
|
2055 |
+
|
2056 |
+
While most ``dtype`` arguments can accept the "string"
|
2057 |
+
constructor, e.g. ``"int64[pyarrow]"``, ArrowDtype is useful
|
2058 |
+
if the data type contains parameters like ``pyarrow.timestamp``.
|
2059 |
+
|
2060 |
+
Parameters
|
2061 |
+
----------
|
2062 |
+
pyarrow_dtype : pa.DataType
|
2063 |
+
An instance of a `pyarrow.DataType <https://arrow.apache.org/docs/python/api/datatypes.html#factory-functions>`__.
|
2064 |
+
|
2065 |
+
Attributes
|
2066 |
+
----------
|
2067 |
+
pyarrow_dtype
|
2068 |
+
|
2069 |
+
Methods
|
2070 |
+
-------
|
2071 |
+
None
|
2072 |
+
|
2073 |
+
Returns
|
2074 |
+
-------
|
2075 |
+
ArrowDtype
|
2076 |
+
|
2077 |
+
Examples
|
2078 |
+
--------
|
2079 |
+
>>> import pyarrow as pa
|
2080 |
+
>>> pd.ArrowDtype(pa.int64())
|
2081 |
+
int64[pyarrow]
|
2082 |
+
|
2083 |
+
Types with parameters must be constructed with ArrowDtype.
|
2084 |
+
|
2085 |
+
>>> pd.ArrowDtype(pa.timestamp("s", tz="America/New_York"))
|
2086 |
+
timestamp[s, tz=America/New_York][pyarrow]
|
2087 |
+
>>> pd.ArrowDtype(pa.list_(pa.int64()))
|
2088 |
+
list<item: int64>[pyarrow]
|
2089 |
+
"""
|
2090 |
+
|
2091 |
+
_metadata = ("storage", "pyarrow_dtype") # type: ignore[assignment]
|
2092 |
+
|
2093 |
+
def __init__(self, pyarrow_dtype: pa.DataType) -> None:
|
2094 |
+
super().__init__("pyarrow")
|
2095 |
+
if pa_version_under10p1:
|
2096 |
+
raise ImportError("pyarrow>=10.0.1 is required for ArrowDtype")
|
2097 |
+
if not isinstance(pyarrow_dtype, pa.DataType):
|
2098 |
+
raise ValueError(
|
2099 |
+
f"pyarrow_dtype ({pyarrow_dtype}) must be an instance "
|
2100 |
+
f"of a pyarrow.DataType. Got {type(pyarrow_dtype)} instead."
|
2101 |
+
)
|
2102 |
+
self.pyarrow_dtype = pyarrow_dtype
|
2103 |
+
|
2104 |
+
def __repr__(self) -> str:
|
2105 |
+
return self.name
|
2106 |
+
|
2107 |
+
def __hash__(self) -> int:
|
2108 |
+
# make myself hashable
|
2109 |
+
return hash(str(self))
|
2110 |
+
|
2111 |
+
def __eq__(self, other: object) -> bool:
|
2112 |
+
if not isinstance(other, type(self)):
|
2113 |
+
return super().__eq__(other)
|
2114 |
+
return self.pyarrow_dtype == other.pyarrow_dtype
|
2115 |
+
|
2116 |
+
@property
|
2117 |
+
def type(self):
|
2118 |
+
"""
|
2119 |
+
Returns associated scalar type.
|
2120 |
+
"""
|
2121 |
+
pa_type = self.pyarrow_dtype
|
2122 |
+
if pa.types.is_integer(pa_type):
|
2123 |
+
return int
|
2124 |
+
elif pa.types.is_floating(pa_type):
|
2125 |
+
return float
|
2126 |
+
elif pa.types.is_string(pa_type) or pa.types.is_large_string(pa_type):
|
2127 |
+
return str
|
2128 |
+
elif (
|
2129 |
+
pa.types.is_binary(pa_type)
|
2130 |
+
or pa.types.is_fixed_size_binary(pa_type)
|
2131 |
+
or pa.types.is_large_binary(pa_type)
|
2132 |
+
):
|
2133 |
+
return bytes
|
2134 |
+
elif pa.types.is_boolean(pa_type):
|
2135 |
+
return bool
|
2136 |
+
elif pa.types.is_duration(pa_type):
|
2137 |
+
if pa_type.unit == "ns":
|
2138 |
+
return Timedelta
|
2139 |
+
else:
|
2140 |
+
return timedelta
|
2141 |
+
elif pa.types.is_timestamp(pa_type):
|
2142 |
+
if pa_type.unit == "ns":
|
2143 |
+
return Timestamp
|
2144 |
+
else:
|
2145 |
+
return datetime
|
2146 |
+
elif pa.types.is_date(pa_type):
|
2147 |
+
return date
|
2148 |
+
elif pa.types.is_time(pa_type):
|
2149 |
+
return time
|
2150 |
+
elif pa.types.is_decimal(pa_type):
|
2151 |
+
return Decimal
|
2152 |
+
elif pa.types.is_dictionary(pa_type):
|
2153 |
+
# TODO: Potentially change this & CategoricalDtype.type to
|
2154 |
+
# something more representative of the scalar
|
2155 |
+
return CategoricalDtypeType
|
2156 |
+
elif pa.types.is_list(pa_type) or pa.types.is_large_list(pa_type):
|
2157 |
+
return list
|
2158 |
+
elif pa.types.is_fixed_size_list(pa_type):
|
2159 |
+
return list
|
2160 |
+
elif pa.types.is_map(pa_type):
|
2161 |
+
return list
|
2162 |
+
elif pa.types.is_struct(pa_type):
|
2163 |
+
return dict
|
2164 |
+
elif pa.types.is_null(pa_type):
|
2165 |
+
# TODO: None? pd.NA? pa.null?
|
2166 |
+
return type(pa_type)
|
2167 |
+
elif isinstance(pa_type, pa.ExtensionType):
|
2168 |
+
return type(self)(pa_type.storage_type).type
|
2169 |
+
raise NotImplementedError(pa_type)
|
2170 |
+
|
2171 |
+
@property
|
2172 |
+
def name(self) -> str: # type: ignore[override]
|
2173 |
+
"""
|
2174 |
+
A string identifying the data type.
|
2175 |
+
"""
|
2176 |
+
return f"{str(self.pyarrow_dtype)}[{self.storage}]"
|
2177 |
+
|
2178 |
+
@cache_readonly
|
2179 |
+
def numpy_dtype(self) -> np.dtype:
|
2180 |
+
"""Return an instance of the related numpy dtype"""
|
2181 |
+
if pa.types.is_timestamp(self.pyarrow_dtype):
|
2182 |
+
# pa.timestamp(unit).to_pandas_dtype() returns ns units
|
2183 |
+
# regardless of the pyarrow timestamp units.
|
2184 |
+
# This can be removed if/when pyarrow addresses it:
|
2185 |
+
# https://github.com/apache/arrow/issues/34462
|
2186 |
+
return np.dtype(f"datetime64[{self.pyarrow_dtype.unit}]")
|
2187 |
+
if pa.types.is_duration(self.pyarrow_dtype):
|
2188 |
+
# pa.duration(unit).to_pandas_dtype() returns ns units
|
2189 |
+
# regardless of the pyarrow duration units
|
2190 |
+
# This can be removed if/when pyarrow addresses it:
|
2191 |
+
# https://github.com/apache/arrow/issues/34462
|
2192 |
+
return np.dtype(f"timedelta64[{self.pyarrow_dtype.unit}]")
|
2193 |
+
if pa.types.is_string(self.pyarrow_dtype) or pa.types.is_large_string(
|
2194 |
+
self.pyarrow_dtype
|
2195 |
+
):
|
2196 |
+
# pa.string().to_pandas_dtype() = object which we don't want
|
2197 |
+
return np.dtype(str)
|
2198 |
+
try:
|
2199 |
+
return np.dtype(self.pyarrow_dtype.to_pandas_dtype())
|
2200 |
+
except (NotImplementedError, TypeError):
|
2201 |
+
return np.dtype(object)
|
2202 |
+
|
2203 |
+
@cache_readonly
|
2204 |
+
def kind(self) -> str:
|
2205 |
+
if pa.types.is_timestamp(self.pyarrow_dtype):
|
2206 |
+
# To mirror DatetimeTZDtype
|
2207 |
+
return "M"
|
2208 |
+
return self.numpy_dtype.kind
|
2209 |
+
|
2210 |
+
@cache_readonly
|
2211 |
+
def itemsize(self) -> int:
|
2212 |
+
"""Return the number of bytes in this dtype"""
|
2213 |
+
return self.numpy_dtype.itemsize
|
2214 |
+
|
2215 |
+
@classmethod
|
2216 |
+
def construct_array_type(cls) -> type_t[ArrowExtensionArray]:
|
2217 |
+
"""
|
2218 |
+
Return the array type associated with this dtype.
|
2219 |
+
|
2220 |
+
Returns
|
2221 |
+
-------
|
2222 |
+
type
|
2223 |
+
"""
|
2224 |
+
from pandas.core.arrays.arrow import ArrowExtensionArray
|
2225 |
+
|
2226 |
+
return ArrowExtensionArray
|
2227 |
+
|
2228 |
+
@classmethod
|
2229 |
+
def construct_from_string(cls, string: str) -> ArrowDtype:
|
2230 |
+
"""
|
2231 |
+
Construct this type from a string.
|
2232 |
+
|
2233 |
+
Parameters
|
2234 |
+
----------
|
2235 |
+
string : str
|
2236 |
+
string should follow the format f"{pyarrow_type}[pyarrow]"
|
2237 |
+
e.g. int64[pyarrow]
|
2238 |
+
"""
|
2239 |
+
if not isinstance(string, str):
|
2240 |
+
raise TypeError(
|
2241 |
+
f"'construct_from_string' expects a string, got {type(string)}"
|
2242 |
+
)
|
2243 |
+
if not string.endswith("[pyarrow]"):
|
2244 |
+
raise TypeError(f"'{string}' must end with '[pyarrow]'")
|
2245 |
+
if string == "string[pyarrow]":
|
2246 |
+
# Ensure Registry.find skips ArrowDtype to use StringDtype instead
|
2247 |
+
raise TypeError("string[pyarrow] should be constructed by StringDtype")
|
2248 |
+
|
2249 |
+
base_type = string[:-9] # get rid of "[pyarrow]"
|
2250 |
+
try:
|
2251 |
+
pa_dtype = pa.type_for_alias(base_type)
|
2252 |
+
except ValueError as err:
|
2253 |
+
has_parameters = re.search(r"[\[\(].*[\]\)]", base_type)
|
2254 |
+
if has_parameters:
|
2255 |
+
# Fallback to try common temporal types
|
2256 |
+
try:
|
2257 |
+
return cls._parse_temporal_dtype_string(base_type)
|
2258 |
+
except (NotImplementedError, ValueError):
|
2259 |
+
# Fall through to raise with nice exception message below
|
2260 |
+
pass
|
2261 |
+
|
2262 |
+
raise NotImplementedError(
|
2263 |
+
"Passing pyarrow type specific parameters "
|
2264 |
+
f"({has_parameters.group()}) in the string is not supported. "
|
2265 |
+
"Please construct an ArrowDtype object with a pyarrow_dtype "
|
2266 |
+
"instance with specific parameters."
|
2267 |
+
) from err
|
2268 |
+
raise TypeError(f"'{base_type}' is not a valid pyarrow data type.") from err
|
2269 |
+
return cls(pa_dtype)
|
2270 |
+
|
2271 |
+
# TODO(arrow#33642): This can be removed once supported by pyarrow
|
2272 |
+
@classmethod
|
2273 |
+
def _parse_temporal_dtype_string(cls, string: str) -> ArrowDtype:
|
2274 |
+
"""
|
2275 |
+
Construct a temporal ArrowDtype from string.
|
2276 |
+
"""
|
2277 |
+
# we assume
|
2278 |
+
# 1) "[pyarrow]" has already been stripped from the end of our string.
|
2279 |
+
# 2) we know "[" is present
|
2280 |
+
head, tail = string.split("[", 1)
|
2281 |
+
|
2282 |
+
if not tail.endswith("]"):
|
2283 |
+
raise ValueError
|
2284 |
+
tail = tail[:-1]
|
2285 |
+
|
2286 |
+
if head == "timestamp":
|
2287 |
+
assert "," in tail # otherwise type_for_alias should work
|
2288 |
+
unit, tz = tail.split(",", 1)
|
2289 |
+
unit = unit.strip()
|
2290 |
+
tz = tz.strip()
|
2291 |
+
if tz.startswith("tz="):
|
2292 |
+
tz = tz[3:]
|
2293 |
+
|
2294 |
+
pa_type = pa.timestamp(unit, tz=tz)
|
2295 |
+
dtype = cls(pa_type)
|
2296 |
+
return dtype
|
2297 |
+
|
2298 |
+
raise NotImplementedError(string)
|
2299 |
+
|
2300 |
+
@property
|
2301 |
+
def _is_numeric(self) -> bool:
|
2302 |
+
"""
|
2303 |
+
Whether columns with this dtype should be considered numeric.
|
2304 |
+
"""
|
2305 |
+
# TODO: pa.types.is_boolean?
|
2306 |
+
return (
|
2307 |
+
pa.types.is_integer(self.pyarrow_dtype)
|
2308 |
+
or pa.types.is_floating(self.pyarrow_dtype)
|
2309 |
+
or pa.types.is_decimal(self.pyarrow_dtype)
|
2310 |
+
)
|
2311 |
+
|
2312 |
+
@property
|
2313 |
+
def _is_boolean(self) -> bool:
|
2314 |
+
"""
|
2315 |
+
Whether this dtype should be considered boolean.
|
2316 |
+
"""
|
2317 |
+
return pa.types.is_boolean(self.pyarrow_dtype)
|
2318 |
+
|
2319 |
+
def _get_common_dtype(self, dtypes: list[DtypeObj]) -> DtypeObj | None:
|
2320 |
+
# We unwrap any masked dtypes, find the common dtype we would use
|
2321 |
+
# for that, then re-mask the result.
|
2322 |
+
# Mirrors BaseMaskedDtype
|
2323 |
+
from pandas.core.dtypes.cast import find_common_type
|
2324 |
+
|
2325 |
+
null_dtype = type(self)(pa.null())
|
2326 |
+
|
2327 |
+
new_dtype = find_common_type(
|
2328 |
+
[
|
2329 |
+
dtype.numpy_dtype if isinstance(dtype, ArrowDtype) else dtype
|
2330 |
+
for dtype in dtypes
|
2331 |
+
if dtype != null_dtype
|
2332 |
+
]
|
2333 |
+
)
|
2334 |
+
if not isinstance(new_dtype, np.dtype):
|
2335 |
+
return None
|
2336 |
+
try:
|
2337 |
+
pa_dtype = pa.from_numpy_dtype(new_dtype)
|
2338 |
+
return type(self)(pa_dtype)
|
2339 |
+
except NotImplementedError:
|
2340 |
+
return None
|
2341 |
+
|
2342 |
+
def __from_arrow__(self, array: pa.Array | pa.ChunkedArray):
|
2343 |
+
"""
|
2344 |
+
Construct IntegerArray/FloatingArray from pyarrow Array/ChunkedArray.
|
2345 |
+
"""
|
2346 |
+
array_class = self.construct_array_type()
|
2347 |
+
arr = array.cast(self.pyarrow_dtype, safe=True)
|
2348 |
+
return array_class(arr)
|
llmeval-env/lib/python3.10/site-packages/pandas/core/dtypes/generic.py
ADDED
@@ -0,0 +1,147 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
""" define generic base classes for pandas objects """
|
2 |
+
from __future__ import annotations
|
3 |
+
|
4 |
+
from typing import (
|
5 |
+
TYPE_CHECKING,
|
6 |
+
Type,
|
7 |
+
cast,
|
8 |
+
)
|
9 |
+
|
10 |
+
if TYPE_CHECKING:
|
11 |
+
from pandas import (
|
12 |
+
Categorical,
|
13 |
+
CategoricalIndex,
|
14 |
+
DataFrame,
|
15 |
+
DatetimeIndex,
|
16 |
+
Index,
|
17 |
+
IntervalIndex,
|
18 |
+
MultiIndex,
|
19 |
+
PeriodIndex,
|
20 |
+
RangeIndex,
|
21 |
+
Series,
|
22 |
+
TimedeltaIndex,
|
23 |
+
)
|
24 |
+
from pandas.core.arrays import (
|
25 |
+
DatetimeArray,
|
26 |
+
ExtensionArray,
|
27 |
+
NumpyExtensionArray,
|
28 |
+
PeriodArray,
|
29 |
+
TimedeltaArray,
|
30 |
+
)
|
31 |
+
from pandas.core.generic import NDFrame
|
32 |
+
|
33 |
+
|
34 |
+
# define abstract base classes to enable isinstance type checking on our
|
35 |
+
# objects
|
36 |
+
def create_pandas_abc_type(name, attr, comp):
|
37 |
+
def _check(inst) -> bool:
|
38 |
+
return getattr(inst, attr, "_typ") in comp
|
39 |
+
|
40 |
+
# https://github.com/python/mypy/issues/1006
|
41 |
+
# error: 'classmethod' used with a non-method
|
42 |
+
@classmethod # type: ignore[misc]
|
43 |
+
def _instancecheck(cls, inst) -> bool:
|
44 |
+
return _check(inst) and not isinstance(inst, type)
|
45 |
+
|
46 |
+
@classmethod # type: ignore[misc]
|
47 |
+
def _subclasscheck(cls, inst) -> bool:
|
48 |
+
# Raise instead of returning False
|
49 |
+
# This is consistent with default __subclasscheck__ behavior
|
50 |
+
if not isinstance(inst, type):
|
51 |
+
raise TypeError("issubclass() arg 1 must be a class")
|
52 |
+
|
53 |
+
return _check(inst)
|
54 |
+
|
55 |
+
dct = {"__instancecheck__": _instancecheck, "__subclasscheck__": _subclasscheck}
|
56 |
+
meta = type("ABCBase", (type,), dct)
|
57 |
+
return meta(name, (), dct)
|
58 |
+
|
59 |
+
|
60 |
+
ABCRangeIndex = cast(
|
61 |
+
"Type[RangeIndex]",
|
62 |
+
create_pandas_abc_type("ABCRangeIndex", "_typ", ("rangeindex",)),
|
63 |
+
)
|
64 |
+
ABCMultiIndex = cast(
|
65 |
+
"Type[MultiIndex]",
|
66 |
+
create_pandas_abc_type("ABCMultiIndex", "_typ", ("multiindex",)),
|
67 |
+
)
|
68 |
+
ABCDatetimeIndex = cast(
|
69 |
+
"Type[DatetimeIndex]",
|
70 |
+
create_pandas_abc_type("ABCDatetimeIndex", "_typ", ("datetimeindex",)),
|
71 |
+
)
|
72 |
+
ABCTimedeltaIndex = cast(
|
73 |
+
"Type[TimedeltaIndex]",
|
74 |
+
create_pandas_abc_type("ABCTimedeltaIndex", "_typ", ("timedeltaindex",)),
|
75 |
+
)
|
76 |
+
ABCPeriodIndex = cast(
|
77 |
+
"Type[PeriodIndex]",
|
78 |
+
create_pandas_abc_type("ABCPeriodIndex", "_typ", ("periodindex",)),
|
79 |
+
)
|
80 |
+
ABCCategoricalIndex = cast(
|
81 |
+
"Type[CategoricalIndex]",
|
82 |
+
create_pandas_abc_type("ABCCategoricalIndex", "_typ", ("categoricalindex",)),
|
83 |
+
)
|
84 |
+
ABCIntervalIndex = cast(
|
85 |
+
"Type[IntervalIndex]",
|
86 |
+
create_pandas_abc_type("ABCIntervalIndex", "_typ", ("intervalindex",)),
|
87 |
+
)
|
88 |
+
ABCIndex = cast(
|
89 |
+
"Type[Index]",
|
90 |
+
create_pandas_abc_type(
|
91 |
+
"ABCIndex",
|
92 |
+
"_typ",
|
93 |
+
{
|
94 |
+
"index",
|
95 |
+
"rangeindex",
|
96 |
+
"multiindex",
|
97 |
+
"datetimeindex",
|
98 |
+
"timedeltaindex",
|
99 |
+
"periodindex",
|
100 |
+
"categoricalindex",
|
101 |
+
"intervalindex",
|
102 |
+
},
|
103 |
+
),
|
104 |
+
)
|
105 |
+
|
106 |
+
|
107 |
+
ABCNDFrame = cast(
|
108 |
+
"Type[NDFrame]",
|
109 |
+
create_pandas_abc_type("ABCNDFrame", "_typ", ("series", "dataframe")),
|
110 |
+
)
|
111 |
+
ABCSeries = cast(
|
112 |
+
"Type[Series]",
|
113 |
+
create_pandas_abc_type("ABCSeries", "_typ", ("series",)),
|
114 |
+
)
|
115 |
+
ABCDataFrame = cast(
|
116 |
+
"Type[DataFrame]", create_pandas_abc_type("ABCDataFrame", "_typ", ("dataframe",))
|
117 |
+
)
|
118 |
+
|
119 |
+
ABCCategorical = cast(
|
120 |
+
"Type[Categorical]",
|
121 |
+
create_pandas_abc_type("ABCCategorical", "_typ", ("categorical")),
|
122 |
+
)
|
123 |
+
ABCDatetimeArray = cast(
|
124 |
+
"Type[DatetimeArray]",
|
125 |
+
create_pandas_abc_type("ABCDatetimeArray", "_typ", ("datetimearray")),
|
126 |
+
)
|
127 |
+
ABCTimedeltaArray = cast(
|
128 |
+
"Type[TimedeltaArray]",
|
129 |
+
create_pandas_abc_type("ABCTimedeltaArray", "_typ", ("timedeltaarray")),
|
130 |
+
)
|
131 |
+
ABCPeriodArray = cast(
|
132 |
+
"Type[PeriodArray]",
|
133 |
+
create_pandas_abc_type("ABCPeriodArray", "_typ", ("periodarray",)),
|
134 |
+
)
|
135 |
+
ABCExtensionArray = cast(
|
136 |
+
"Type[ExtensionArray]",
|
137 |
+
create_pandas_abc_type(
|
138 |
+
"ABCExtensionArray",
|
139 |
+
"_typ",
|
140 |
+
# Note: IntervalArray and SparseArray are included bc they have _typ="extension"
|
141 |
+
{"extension", "categorical", "periodarray", "datetimearray", "timedeltaarray"},
|
142 |
+
),
|
143 |
+
)
|
144 |
+
ABCNumpyExtensionArray = cast(
|
145 |
+
"Type[NumpyExtensionArray]",
|
146 |
+
create_pandas_abc_type("ABCNumpyExtensionArray", "_typ", ("npy_extension",)),
|
147 |
+
)
|
llmeval-env/lib/python3.10/site-packages/pandas/core/dtypes/inference.py
ADDED
@@ -0,0 +1,437 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
""" basic inference routines """
|
2 |
+
|
3 |
+
from __future__ import annotations
|
4 |
+
|
5 |
+
from collections import abc
|
6 |
+
from numbers import Number
|
7 |
+
import re
|
8 |
+
from re import Pattern
|
9 |
+
from typing import TYPE_CHECKING
|
10 |
+
|
11 |
+
import numpy as np
|
12 |
+
|
13 |
+
from pandas._libs import lib
|
14 |
+
|
15 |
+
if TYPE_CHECKING:
|
16 |
+
from collections.abc import Hashable
|
17 |
+
|
18 |
+
from pandas._typing import TypeGuard
|
19 |
+
|
20 |
+
is_bool = lib.is_bool
|
21 |
+
|
22 |
+
is_integer = lib.is_integer
|
23 |
+
|
24 |
+
is_float = lib.is_float
|
25 |
+
|
26 |
+
is_complex = lib.is_complex
|
27 |
+
|
28 |
+
is_scalar = lib.is_scalar
|
29 |
+
|
30 |
+
is_decimal = lib.is_decimal
|
31 |
+
|
32 |
+
is_interval = lib.is_interval
|
33 |
+
|
34 |
+
is_list_like = lib.is_list_like
|
35 |
+
|
36 |
+
is_iterator = lib.is_iterator
|
37 |
+
|
38 |
+
|
39 |
+
def is_number(obj) -> TypeGuard[Number | np.number]:
|
40 |
+
"""
|
41 |
+
Check if the object is a number.
|
42 |
+
|
43 |
+
Returns True when the object is a number, and False if is not.
|
44 |
+
|
45 |
+
Parameters
|
46 |
+
----------
|
47 |
+
obj : any type
|
48 |
+
The object to check if is a number.
|
49 |
+
|
50 |
+
Returns
|
51 |
+
-------
|
52 |
+
bool
|
53 |
+
Whether `obj` is a number or not.
|
54 |
+
|
55 |
+
See Also
|
56 |
+
--------
|
57 |
+
api.types.is_integer: Checks a subgroup of numbers.
|
58 |
+
|
59 |
+
Examples
|
60 |
+
--------
|
61 |
+
>>> from pandas.api.types import is_number
|
62 |
+
>>> is_number(1)
|
63 |
+
True
|
64 |
+
>>> is_number(7.15)
|
65 |
+
True
|
66 |
+
|
67 |
+
Booleans are valid because they are int subclass.
|
68 |
+
|
69 |
+
>>> is_number(False)
|
70 |
+
True
|
71 |
+
|
72 |
+
>>> is_number("foo")
|
73 |
+
False
|
74 |
+
>>> is_number("5")
|
75 |
+
False
|
76 |
+
"""
|
77 |
+
return isinstance(obj, (Number, np.number))
|
78 |
+
|
79 |
+
|
80 |
+
def iterable_not_string(obj) -> bool:
|
81 |
+
"""
|
82 |
+
Check if the object is an iterable but not a string.
|
83 |
+
|
84 |
+
Parameters
|
85 |
+
----------
|
86 |
+
obj : The object to check.
|
87 |
+
|
88 |
+
Returns
|
89 |
+
-------
|
90 |
+
is_iter_not_string : bool
|
91 |
+
Whether `obj` is a non-string iterable.
|
92 |
+
|
93 |
+
Examples
|
94 |
+
--------
|
95 |
+
>>> iterable_not_string([1, 2, 3])
|
96 |
+
True
|
97 |
+
>>> iterable_not_string("foo")
|
98 |
+
False
|
99 |
+
>>> iterable_not_string(1)
|
100 |
+
False
|
101 |
+
"""
|
102 |
+
return isinstance(obj, abc.Iterable) and not isinstance(obj, str)
|
103 |
+
|
104 |
+
|
105 |
+
def is_file_like(obj) -> bool:
|
106 |
+
"""
|
107 |
+
Check if the object is a file-like object.
|
108 |
+
|
109 |
+
For objects to be considered file-like, they must
|
110 |
+
be an iterator AND have either a `read` and/or `write`
|
111 |
+
method as an attribute.
|
112 |
+
|
113 |
+
Note: file-like objects must be iterable, but
|
114 |
+
iterable objects need not be file-like.
|
115 |
+
|
116 |
+
Parameters
|
117 |
+
----------
|
118 |
+
obj : The object to check
|
119 |
+
|
120 |
+
Returns
|
121 |
+
-------
|
122 |
+
bool
|
123 |
+
Whether `obj` has file-like properties.
|
124 |
+
|
125 |
+
Examples
|
126 |
+
--------
|
127 |
+
>>> import io
|
128 |
+
>>> from pandas.api.types import is_file_like
|
129 |
+
>>> buffer = io.StringIO("data")
|
130 |
+
>>> is_file_like(buffer)
|
131 |
+
True
|
132 |
+
>>> is_file_like([1, 2, 3])
|
133 |
+
False
|
134 |
+
"""
|
135 |
+
if not (hasattr(obj, "read") or hasattr(obj, "write")):
|
136 |
+
return False
|
137 |
+
|
138 |
+
return bool(hasattr(obj, "__iter__"))
|
139 |
+
|
140 |
+
|
141 |
+
def is_re(obj) -> TypeGuard[Pattern]:
|
142 |
+
"""
|
143 |
+
Check if the object is a regex pattern instance.
|
144 |
+
|
145 |
+
Parameters
|
146 |
+
----------
|
147 |
+
obj : The object to check
|
148 |
+
|
149 |
+
Returns
|
150 |
+
-------
|
151 |
+
bool
|
152 |
+
Whether `obj` is a regex pattern.
|
153 |
+
|
154 |
+
Examples
|
155 |
+
--------
|
156 |
+
>>> from pandas.api.types import is_re
|
157 |
+
>>> import re
|
158 |
+
>>> is_re(re.compile(".*"))
|
159 |
+
True
|
160 |
+
>>> is_re("foo")
|
161 |
+
False
|
162 |
+
"""
|
163 |
+
return isinstance(obj, Pattern)
|
164 |
+
|
165 |
+
|
166 |
+
def is_re_compilable(obj) -> bool:
|
167 |
+
"""
|
168 |
+
Check if the object can be compiled into a regex pattern instance.
|
169 |
+
|
170 |
+
Parameters
|
171 |
+
----------
|
172 |
+
obj : The object to check
|
173 |
+
|
174 |
+
Returns
|
175 |
+
-------
|
176 |
+
bool
|
177 |
+
Whether `obj` can be compiled as a regex pattern.
|
178 |
+
|
179 |
+
Examples
|
180 |
+
--------
|
181 |
+
>>> from pandas.api.types import is_re_compilable
|
182 |
+
>>> is_re_compilable(".*")
|
183 |
+
True
|
184 |
+
>>> is_re_compilable(1)
|
185 |
+
False
|
186 |
+
"""
|
187 |
+
try:
|
188 |
+
re.compile(obj)
|
189 |
+
except TypeError:
|
190 |
+
return False
|
191 |
+
else:
|
192 |
+
return True
|
193 |
+
|
194 |
+
|
195 |
+
def is_array_like(obj) -> bool:
|
196 |
+
"""
|
197 |
+
Check if the object is array-like.
|
198 |
+
|
199 |
+
For an object to be considered array-like, it must be list-like and
|
200 |
+
have a `dtype` attribute.
|
201 |
+
|
202 |
+
Parameters
|
203 |
+
----------
|
204 |
+
obj : The object to check
|
205 |
+
|
206 |
+
Returns
|
207 |
+
-------
|
208 |
+
is_array_like : bool
|
209 |
+
Whether `obj` has array-like properties.
|
210 |
+
|
211 |
+
Examples
|
212 |
+
--------
|
213 |
+
>>> is_array_like(np.array([1, 2, 3]))
|
214 |
+
True
|
215 |
+
>>> is_array_like(pd.Series(["a", "b"]))
|
216 |
+
True
|
217 |
+
>>> is_array_like(pd.Index(["2016-01-01"]))
|
218 |
+
True
|
219 |
+
>>> is_array_like([1, 2, 3])
|
220 |
+
False
|
221 |
+
>>> is_array_like(("a", "b"))
|
222 |
+
False
|
223 |
+
"""
|
224 |
+
return is_list_like(obj) and hasattr(obj, "dtype")
|
225 |
+
|
226 |
+
|
227 |
+
def is_nested_list_like(obj) -> bool:
|
228 |
+
"""
|
229 |
+
Check if the object is list-like, and that all of its elements
|
230 |
+
are also list-like.
|
231 |
+
|
232 |
+
Parameters
|
233 |
+
----------
|
234 |
+
obj : The object to check
|
235 |
+
|
236 |
+
Returns
|
237 |
+
-------
|
238 |
+
is_list_like : bool
|
239 |
+
Whether `obj` has list-like properties.
|
240 |
+
|
241 |
+
Examples
|
242 |
+
--------
|
243 |
+
>>> is_nested_list_like([[1, 2, 3]])
|
244 |
+
True
|
245 |
+
>>> is_nested_list_like([{1, 2, 3}, {1, 2, 3}])
|
246 |
+
True
|
247 |
+
>>> is_nested_list_like(["foo"])
|
248 |
+
False
|
249 |
+
>>> is_nested_list_like([])
|
250 |
+
False
|
251 |
+
>>> is_nested_list_like([[1, 2, 3], 1])
|
252 |
+
False
|
253 |
+
|
254 |
+
Notes
|
255 |
+
-----
|
256 |
+
This won't reliably detect whether a consumable iterator (e. g.
|
257 |
+
a generator) is a nested-list-like without consuming the iterator.
|
258 |
+
To avoid consuming it, we always return False if the outer container
|
259 |
+
doesn't define `__len__`.
|
260 |
+
|
261 |
+
See Also
|
262 |
+
--------
|
263 |
+
is_list_like
|
264 |
+
"""
|
265 |
+
return (
|
266 |
+
is_list_like(obj)
|
267 |
+
and hasattr(obj, "__len__")
|
268 |
+
and len(obj) > 0
|
269 |
+
and all(is_list_like(item) for item in obj)
|
270 |
+
)
|
271 |
+
|
272 |
+
|
273 |
+
def is_dict_like(obj) -> bool:
|
274 |
+
"""
|
275 |
+
Check if the object is dict-like.
|
276 |
+
|
277 |
+
Parameters
|
278 |
+
----------
|
279 |
+
obj : The object to check
|
280 |
+
|
281 |
+
Returns
|
282 |
+
-------
|
283 |
+
bool
|
284 |
+
Whether `obj` has dict-like properties.
|
285 |
+
|
286 |
+
Examples
|
287 |
+
--------
|
288 |
+
>>> from pandas.api.types import is_dict_like
|
289 |
+
>>> is_dict_like({1: 2})
|
290 |
+
True
|
291 |
+
>>> is_dict_like([1, 2, 3])
|
292 |
+
False
|
293 |
+
>>> is_dict_like(dict)
|
294 |
+
False
|
295 |
+
>>> is_dict_like(dict())
|
296 |
+
True
|
297 |
+
"""
|
298 |
+
dict_like_attrs = ("__getitem__", "keys", "__contains__")
|
299 |
+
return (
|
300 |
+
all(hasattr(obj, attr) for attr in dict_like_attrs)
|
301 |
+
# [GH 25196] exclude classes
|
302 |
+
and not isinstance(obj, type)
|
303 |
+
)
|
304 |
+
|
305 |
+
|
306 |
+
def is_named_tuple(obj) -> bool:
|
307 |
+
"""
|
308 |
+
Check if the object is a named tuple.
|
309 |
+
|
310 |
+
Parameters
|
311 |
+
----------
|
312 |
+
obj : The object to check
|
313 |
+
|
314 |
+
Returns
|
315 |
+
-------
|
316 |
+
bool
|
317 |
+
Whether `obj` is a named tuple.
|
318 |
+
|
319 |
+
Examples
|
320 |
+
--------
|
321 |
+
>>> from collections import namedtuple
|
322 |
+
>>> from pandas.api.types import is_named_tuple
|
323 |
+
>>> Point = namedtuple("Point", ["x", "y"])
|
324 |
+
>>> p = Point(1, 2)
|
325 |
+
>>>
|
326 |
+
>>> is_named_tuple(p)
|
327 |
+
True
|
328 |
+
>>> is_named_tuple((1, 2))
|
329 |
+
False
|
330 |
+
"""
|
331 |
+
return isinstance(obj, abc.Sequence) and hasattr(obj, "_fields")
|
332 |
+
|
333 |
+
|
334 |
+
def is_hashable(obj) -> TypeGuard[Hashable]:
|
335 |
+
"""
|
336 |
+
Return True if hash(obj) will succeed, False otherwise.
|
337 |
+
|
338 |
+
Some types will pass a test against collections.abc.Hashable but fail when
|
339 |
+
they are actually hashed with hash().
|
340 |
+
|
341 |
+
Distinguish between these and other types by trying the call to hash() and
|
342 |
+
seeing if they raise TypeError.
|
343 |
+
|
344 |
+
Returns
|
345 |
+
-------
|
346 |
+
bool
|
347 |
+
|
348 |
+
Examples
|
349 |
+
--------
|
350 |
+
>>> import collections
|
351 |
+
>>> from pandas.api.types import is_hashable
|
352 |
+
>>> a = ([],)
|
353 |
+
>>> isinstance(a, collections.abc.Hashable)
|
354 |
+
True
|
355 |
+
>>> is_hashable(a)
|
356 |
+
False
|
357 |
+
"""
|
358 |
+
# Unfortunately, we can't use isinstance(obj, collections.abc.Hashable),
|
359 |
+
# which can be faster than calling hash. That is because numpy scalars
|
360 |
+
# fail this test.
|
361 |
+
|
362 |
+
# Reconsider this decision once this numpy bug is fixed:
|
363 |
+
# https://github.com/numpy/numpy/issues/5562
|
364 |
+
|
365 |
+
try:
|
366 |
+
hash(obj)
|
367 |
+
except TypeError:
|
368 |
+
return False
|
369 |
+
else:
|
370 |
+
return True
|
371 |
+
|
372 |
+
|
373 |
+
def is_sequence(obj) -> bool:
|
374 |
+
"""
|
375 |
+
Check if the object is a sequence of objects.
|
376 |
+
String types are not included as sequences here.
|
377 |
+
|
378 |
+
Parameters
|
379 |
+
----------
|
380 |
+
obj : The object to check
|
381 |
+
|
382 |
+
Returns
|
383 |
+
-------
|
384 |
+
is_sequence : bool
|
385 |
+
Whether `obj` is a sequence of objects.
|
386 |
+
|
387 |
+
Examples
|
388 |
+
--------
|
389 |
+
>>> l = [1, 2, 3]
|
390 |
+
>>>
|
391 |
+
>>> is_sequence(l)
|
392 |
+
True
|
393 |
+
>>> is_sequence(iter(l))
|
394 |
+
False
|
395 |
+
"""
|
396 |
+
try:
|
397 |
+
iter(obj) # Can iterate over it.
|
398 |
+
len(obj) # Has a length associated with it.
|
399 |
+
return not isinstance(obj, (str, bytes))
|
400 |
+
except (TypeError, AttributeError):
|
401 |
+
return False
|
402 |
+
|
403 |
+
|
404 |
+
def is_dataclass(item) -> bool:
|
405 |
+
"""
|
406 |
+
Checks if the object is a data-class instance
|
407 |
+
|
408 |
+
Parameters
|
409 |
+
----------
|
410 |
+
item : object
|
411 |
+
|
412 |
+
Returns
|
413 |
+
--------
|
414 |
+
is_dataclass : bool
|
415 |
+
True if the item is an instance of a data-class,
|
416 |
+
will return false if you pass the data class itself
|
417 |
+
|
418 |
+
Examples
|
419 |
+
--------
|
420 |
+
>>> from dataclasses import dataclass
|
421 |
+
>>> @dataclass
|
422 |
+
... class Point:
|
423 |
+
... x: int
|
424 |
+
... y: int
|
425 |
+
|
426 |
+
>>> is_dataclass(Point)
|
427 |
+
False
|
428 |
+
>>> is_dataclass(Point(0,2))
|
429 |
+
True
|
430 |
+
|
431 |
+
"""
|
432 |
+
try:
|
433 |
+
import dataclasses
|
434 |
+
|
435 |
+
return dataclasses.is_dataclass(item) and not isinstance(item, type)
|
436 |
+
except ImportError:
|
437 |
+
return False
|
llmeval-env/lib/python3.10/site-packages/pandas/core/dtypes/missing.py
ADDED
@@ -0,0 +1,810 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
"""
|
2 |
+
missing types & inference
|
3 |
+
"""
|
4 |
+
from __future__ import annotations
|
5 |
+
|
6 |
+
from decimal import Decimal
|
7 |
+
from functools import partial
|
8 |
+
from typing import (
|
9 |
+
TYPE_CHECKING,
|
10 |
+
overload,
|
11 |
+
)
|
12 |
+
import warnings
|
13 |
+
|
14 |
+
import numpy as np
|
15 |
+
|
16 |
+
from pandas._config import get_option
|
17 |
+
|
18 |
+
from pandas._libs import lib
|
19 |
+
import pandas._libs.missing as libmissing
|
20 |
+
from pandas._libs.tslibs import (
|
21 |
+
NaT,
|
22 |
+
iNaT,
|
23 |
+
)
|
24 |
+
|
25 |
+
from pandas.core.dtypes.common import (
|
26 |
+
DT64NS_DTYPE,
|
27 |
+
TD64NS_DTYPE,
|
28 |
+
ensure_object,
|
29 |
+
is_scalar,
|
30 |
+
is_string_or_object_np_dtype,
|
31 |
+
)
|
32 |
+
from pandas.core.dtypes.dtypes import (
|
33 |
+
CategoricalDtype,
|
34 |
+
DatetimeTZDtype,
|
35 |
+
ExtensionDtype,
|
36 |
+
IntervalDtype,
|
37 |
+
PeriodDtype,
|
38 |
+
)
|
39 |
+
from pandas.core.dtypes.generic import (
|
40 |
+
ABCDataFrame,
|
41 |
+
ABCExtensionArray,
|
42 |
+
ABCIndex,
|
43 |
+
ABCMultiIndex,
|
44 |
+
ABCSeries,
|
45 |
+
)
|
46 |
+
from pandas.core.dtypes.inference import is_list_like
|
47 |
+
|
48 |
+
if TYPE_CHECKING:
|
49 |
+
from re import Pattern
|
50 |
+
|
51 |
+
from pandas._typing import (
|
52 |
+
ArrayLike,
|
53 |
+
DtypeObj,
|
54 |
+
NDFrame,
|
55 |
+
NDFrameT,
|
56 |
+
Scalar,
|
57 |
+
npt,
|
58 |
+
)
|
59 |
+
|
60 |
+
from pandas import Series
|
61 |
+
from pandas.core.indexes.base import Index
|
62 |
+
|
63 |
+
|
64 |
+
isposinf_scalar = libmissing.isposinf_scalar
|
65 |
+
isneginf_scalar = libmissing.isneginf_scalar
|
66 |
+
|
67 |
+
nan_checker = np.isnan
|
68 |
+
INF_AS_NA = False
|
69 |
+
_dtype_object = np.dtype("object")
|
70 |
+
_dtype_str = np.dtype(str)
|
71 |
+
|
72 |
+
|
73 |
+
@overload
|
74 |
+
def isna(obj: Scalar | Pattern) -> bool:
|
75 |
+
...
|
76 |
+
|
77 |
+
|
78 |
+
@overload
|
79 |
+
def isna(
|
80 |
+
obj: ArrayLike | Index | list,
|
81 |
+
) -> npt.NDArray[np.bool_]:
|
82 |
+
...
|
83 |
+
|
84 |
+
|
85 |
+
@overload
|
86 |
+
def isna(obj: NDFrameT) -> NDFrameT:
|
87 |
+
...
|
88 |
+
|
89 |
+
|
90 |
+
# handle unions
|
91 |
+
@overload
|
92 |
+
def isna(obj: NDFrameT | ArrayLike | Index | list) -> NDFrameT | npt.NDArray[np.bool_]:
|
93 |
+
...
|
94 |
+
|
95 |
+
|
96 |
+
@overload
|
97 |
+
def isna(obj: object) -> bool | npt.NDArray[np.bool_] | NDFrame:
|
98 |
+
...
|
99 |
+
|
100 |
+
|
101 |
+
def isna(obj: object) -> bool | npt.NDArray[np.bool_] | NDFrame:
|
102 |
+
"""
|
103 |
+
Detect missing values for an array-like object.
|
104 |
+
|
105 |
+
This function takes a scalar or array-like object and indicates
|
106 |
+
whether values are missing (``NaN`` in numeric arrays, ``None`` or ``NaN``
|
107 |
+
in object arrays, ``NaT`` in datetimelike).
|
108 |
+
|
109 |
+
Parameters
|
110 |
+
----------
|
111 |
+
obj : scalar or array-like
|
112 |
+
Object to check for null or missing values.
|
113 |
+
|
114 |
+
Returns
|
115 |
+
-------
|
116 |
+
bool or array-like of bool
|
117 |
+
For scalar input, returns a scalar boolean.
|
118 |
+
For array input, returns an array of boolean indicating whether each
|
119 |
+
corresponding element is missing.
|
120 |
+
|
121 |
+
See Also
|
122 |
+
--------
|
123 |
+
notna : Boolean inverse of pandas.isna.
|
124 |
+
Series.isna : Detect missing values in a Series.
|
125 |
+
DataFrame.isna : Detect missing values in a DataFrame.
|
126 |
+
Index.isna : Detect missing values in an Index.
|
127 |
+
|
128 |
+
Examples
|
129 |
+
--------
|
130 |
+
Scalar arguments (including strings) result in a scalar boolean.
|
131 |
+
|
132 |
+
>>> pd.isna('dog')
|
133 |
+
False
|
134 |
+
|
135 |
+
>>> pd.isna(pd.NA)
|
136 |
+
True
|
137 |
+
|
138 |
+
>>> pd.isna(np.nan)
|
139 |
+
True
|
140 |
+
|
141 |
+
ndarrays result in an ndarray of booleans.
|
142 |
+
|
143 |
+
>>> array = np.array([[1, np.nan, 3], [4, 5, np.nan]])
|
144 |
+
>>> array
|
145 |
+
array([[ 1., nan, 3.],
|
146 |
+
[ 4., 5., nan]])
|
147 |
+
>>> pd.isna(array)
|
148 |
+
array([[False, True, False],
|
149 |
+
[False, False, True]])
|
150 |
+
|
151 |
+
For indexes, an ndarray of booleans is returned.
|
152 |
+
|
153 |
+
>>> index = pd.DatetimeIndex(["2017-07-05", "2017-07-06", None,
|
154 |
+
... "2017-07-08"])
|
155 |
+
>>> index
|
156 |
+
DatetimeIndex(['2017-07-05', '2017-07-06', 'NaT', '2017-07-08'],
|
157 |
+
dtype='datetime64[ns]', freq=None)
|
158 |
+
>>> pd.isna(index)
|
159 |
+
array([False, False, True, False])
|
160 |
+
|
161 |
+
For Series and DataFrame, the same type is returned, containing booleans.
|
162 |
+
|
163 |
+
>>> df = pd.DataFrame([['ant', 'bee', 'cat'], ['dog', None, 'fly']])
|
164 |
+
>>> df
|
165 |
+
0 1 2
|
166 |
+
0 ant bee cat
|
167 |
+
1 dog None fly
|
168 |
+
>>> pd.isna(df)
|
169 |
+
0 1 2
|
170 |
+
0 False False False
|
171 |
+
1 False True False
|
172 |
+
|
173 |
+
>>> pd.isna(df[1])
|
174 |
+
0 False
|
175 |
+
1 True
|
176 |
+
Name: 1, dtype: bool
|
177 |
+
"""
|
178 |
+
return _isna(obj)
|
179 |
+
|
180 |
+
|
181 |
+
isnull = isna
|
182 |
+
|
183 |
+
|
184 |
+
def _isna(obj, inf_as_na: bool = False):
|
185 |
+
"""
|
186 |
+
Detect missing values, treating None, NaN or NA as null. Infinite
|
187 |
+
values will also be treated as null if inf_as_na is True.
|
188 |
+
|
189 |
+
Parameters
|
190 |
+
----------
|
191 |
+
obj: ndarray or object value
|
192 |
+
Input array or scalar value.
|
193 |
+
inf_as_na: bool
|
194 |
+
Whether to treat infinity as null.
|
195 |
+
|
196 |
+
Returns
|
197 |
+
-------
|
198 |
+
boolean ndarray or boolean
|
199 |
+
"""
|
200 |
+
if is_scalar(obj):
|
201 |
+
return libmissing.checknull(obj, inf_as_na=inf_as_na)
|
202 |
+
elif isinstance(obj, ABCMultiIndex):
|
203 |
+
raise NotImplementedError("isna is not defined for MultiIndex")
|
204 |
+
elif isinstance(obj, type):
|
205 |
+
return False
|
206 |
+
elif isinstance(obj, (np.ndarray, ABCExtensionArray)):
|
207 |
+
return _isna_array(obj, inf_as_na=inf_as_na)
|
208 |
+
elif isinstance(obj, ABCIndex):
|
209 |
+
# Try to use cached isna, which also short-circuits for integer dtypes
|
210 |
+
# and avoids materializing RangeIndex._values
|
211 |
+
if not obj._can_hold_na:
|
212 |
+
return obj.isna()
|
213 |
+
return _isna_array(obj._values, inf_as_na=inf_as_na)
|
214 |
+
|
215 |
+
elif isinstance(obj, ABCSeries):
|
216 |
+
result = _isna_array(obj._values, inf_as_na=inf_as_na)
|
217 |
+
# box
|
218 |
+
result = obj._constructor(result, index=obj.index, name=obj.name, copy=False)
|
219 |
+
return result
|
220 |
+
elif isinstance(obj, ABCDataFrame):
|
221 |
+
return obj.isna()
|
222 |
+
elif isinstance(obj, list):
|
223 |
+
return _isna_array(np.asarray(obj, dtype=object), inf_as_na=inf_as_na)
|
224 |
+
elif hasattr(obj, "__array__"):
|
225 |
+
return _isna_array(np.asarray(obj), inf_as_na=inf_as_na)
|
226 |
+
else:
|
227 |
+
return False
|
228 |
+
|
229 |
+
|
230 |
+
def _use_inf_as_na(key) -> None:
|
231 |
+
"""
|
232 |
+
Option change callback for na/inf behaviour.
|
233 |
+
|
234 |
+
Choose which replacement for numpy.isnan / -numpy.isfinite is used.
|
235 |
+
|
236 |
+
Parameters
|
237 |
+
----------
|
238 |
+
flag: bool
|
239 |
+
True means treat None, NaN, INF, -INF as null (old way),
|
240 |
+
False means None and NaN are null, but INF, -INF are not null
|
241 |
+
(new way).
|
242 |
+
|
243 |
+
Notes
|
244 |
+
-----
|
245 |
+
This approach to setting global module values is discussed and
|
246 |
+
approved here:
|
247 |
+
|
248 |
+
* https://stackoverflow.com/questions/4859217/
|
249 |
+
programmatically-creating-variables-in-python/4859312#4859312
|
250 |
+
"""
|
251 |
+
inf_as_na = get_option(key)
|
252 |
+
globals()["_isna"] = partial(_isna, inf_as_na=inf_as_na)
|
253 |
+
if inf_as_na:
|
254 |
+
globals()["nan_checker"] = lambda x: ~np.isfinite(x)
|
255 |
+
globals()["INF_AS_NA"] = True
|
256 |
+
else:
|
257 |
+
globals()["nan_checker"] = np.isnan
|
258 |
+
globals()["INF_AS_NA"] = False
|
259 |
+
|
260 |
+
|
261 |
+
def _isna_array(values: ArrayLike, inf_as_na: bool = False):
|
262 |
+
"""
|
263 |
+
Return an array indicating which values of the input array are NaN / NA.
|
264 |
+
|
265 |
+
Parameters
|
266 |
+
----------
|
267 |
+
obj: ndarray or ExtensionArray
|
268 |
+
The input array whose elements are to be checked.
|
269 |
+
inf_as_na: bool
|
270 |
+
Whether or not to treat infinite values as NA.
|
271 |
+
|
272 |
+
Returns
|
273 |
+
-------
|
274 |
+
array-like
|
275 |
+
Array of boolean values denoting the NA status of each element.
|
276 |
+
"""
|
277 |
+
dtype = values.dtype
|
278 |
+
|
279 |
+
if not isinstance(values, np.ndarray):
|
280 |
+
# i.e. ExtensionArray
|
281 |
+
if inf_as_na and isinstance(dtype, CategoricalDtype):
|
282 |
+
result = libmissing.isnaobj(values.to_numpy(), inf_as_na=inf_as_na)
|
283 |
+
else:
|
284 |
+
# error: Incompatible types in assignment (expression has type
|
285 |
+
# "Union[ndarray[Any, Any], ExtensionArraySupportsAnyAll]", variable has
|
286 |
+
# type "ndarray[Any, dtype[bool_]]")
|
287 |
+
result = values.isna() # type: ignore[assignment]
|
288 |
+
elif isinstance(values, np.rec.recarray):
|
289 |
+
# GH 48526
|
290 |
+
result = _isna_recarray_dtype(values, inf_as_na=inf_as_na)
|
291 |
+
elif is_string_or_object_np_dtype(values.dtype):
|
292 |
+
result = _isna_string_dtype(values, inf_as_na=inf_as_na)
|
293 |
+
elif dtype.kind in "mM":
|
294 |
+
# this is the NaT pattern
|
295 |
+
result = values.view("i8") == iNaT
|
296 |
+
else:
|
297 |
+
if inf_as_na:
|
298 |
+
result = ~np.isfinite(values)
|
299 |
+
else:
|
300 |
+
result = np.isnan(values)
|
301 |
+
|
302 |
+
return result
|
303 |
+
|
304 |
+
|
305 |
+
def _isna_string_dtype(values: np.ndarray, inf_as_na: bool) -> npt.NDArray[np.bool_]:
|
306 |
+
# Working around NumPy ticket 1542
|
307 |
+
dtype = values.dtype
|
308 |
+
|
309 |
+
if dtype.kind in ("S", "U"):
|
310 |
+
result = np.zeros(values.shape, dtype=bool)
|
311 |
+
else:
|
312 |
+
if values.ndim in {1, 2}:
|
313 |
+
result = libmissing.isnaobj(values, inf_as_na=inf_as_na)
|
314 |
+
else:
|
315 |
+
# 0-D, reached via e.g. mask_missing
|
316 |
+
result = libmissing.isnaobj(values.ravel(), inf_as_na=inf_as_na)
|
317 |
+
result = result.reshape(values.shape)
|
318 |
+
|
319 |
+
return result
|
320 |
+
|
321 |
+
|
322 |
+
def _has_record_inf_value(record_as_array: np.ndarray) -> np.bool_:
|
323 |
+
is_inf_in_record = np.zeros(len(record_as_array), dtype=bool)
|
324 |
+
for i, value in enumerate(record_as_array):
|
325 |
+
is_element_inf = False
|
326 |
+
try:
|
327 |
+
is_element_inf = np.isinf(value)
|
328 |
+
except TypeError:
|
329 |
+
is_element_inf = False
|
330 |
+
is_inf_in_record[i] = is_element_inf
|
331 |
+
|
332 |
+
return np.any(is_inf_in_record)
|
333 |
+
|
334 |
+
|
335 |
+
def _isna_recarray_dtype(
|
336 |
+
values: np.rec.recarray, inf_as_na: bool
|
337 |
+
) -> npt.NDArray[np.bool_]:
|
338 |
+
result = np.zeros(values.shape, dtype=bool)
|
339 |
+
for i, record in enumerate(values):
|
340 |
+
record_as_array = np.array(record.tolist())
|
341 |
+
does_record_contain_nan = isna_all(record_as_array)
|
342 |
+
does_record_contain_inf = False
|
343 |
+
if inf_as_na:
|
344 |
+
does_record_contain_inf = bool(_has_record_inf_value(record_as_array))
|
345 |
+
result[i] = np.any(
|
346 |
+
np.logical_or(does_record_contain_nan, does_record_contain_inf)
|
347 |
+
)
|
348 |
+
|
349 |
+
return result
|
350 |
+
|
351 |
+
|
352 |
+
@overload
|
353 |
+
def notna(obj: Scalar) -> bool:
|
354 |
+
...
|
355 |
+
|
356 |
+
|
357 |
+
@overload
|
358 |
+
def notna(
|
359 |
+
obj: ArrayLike | Index | list,
|
360 |
+
) -> npt.NDArray[np.bool_]:
|
361 |
+
...
|
362 |
+
|
363 |
+
|
364 |
+
@overload
|
365 |
+
def notna(obj: NDFrameT) -> NDFrameT:
|
366 |
+
...
|
367 |
+
|
368 |
+
|
369 |
+
# handle unions
|
370 |
+
@overload
|
371 |
+
def notna(obj: NDFrameT | ArrayLike | Index | list) -> NDFrameT | npt.NDArray[np.bool_]:
|
372 |
+
...
|
373 |
+
|
374 |
+
|
375 |
+
@overload
|
376 |
+
def notna(obj: object) -> bool | npt.NDArray[np.bool_] | NDFrame:
|
377 |
+
...
|
378 |
+
|
379 |
+
|
380 |
+
def notna(obj: object) -> bool | npt.NDArray[np.bool_] | NDFrame:
|
381 |
+
"""
|
382 |
+
Detect non-missing values for an array-like object.
|
383 |
+
|
384 |
+
This function takes a scalar or array-like object and indicates
|
385 |
+
whether values are valid (not missing, which is ``NaN`` in numeric
|
386 |
+
arrays, ``None`` or ``NaN`` in object arrays, ``NaT`` in datetimelike).
|
387 |
+
|
388 |
+
Parameters
|
389 |
+
----------
|
390 |
+
obj : array-like or object value
|
391 |
+
Object to check for *not* null or *non*-missing values.
|
392 |
+
|
393 |
+
Returns
|
394 |
+
-------
|
395 |
+
bool or array-like of bool
|
396 |
+
For scalar input, returns a scalar boolean.
|
397 |
+
For array input, returns an array of boolean indicating whether each
|
398 |
+
corresponding element is valid.
|
399 |
+
|
400 |
+
See Also
|
401 |
+
--------
|
402 |
+
isna : Boolean inverse of pandas.notna.
|
403 |
+
Series.notna : Detect valid values in a Series.
|
404 |
+
DataFrame.notna : Detect valid values in a DataFrame.
|
405 |
+
Index.notna : Detect valid values in an Index.
|
406 |
+
|
407 |
+
Examples
|
408 |
+
--------
|
409 |
+
Scalar arguments (including strings) result in a scalar boolean.
|
410 |
+
|
411 |
+
>>> pd.notna('dog')
|
412 |
+
True
|
413 |
+
|
414 |
+
>>> pd.notna(pd.NA)
|
415 |
+
False
|
416 |
+
|
417 |
+
>>> pd.notna(np.nan)
|
418 |
+
False
|
419 |
+
|
420 |
+
ndarrays result in an ndarray of booleans.
|
421 |
+
|
422 |
+
>>> array = np.array([[1, np.nan, 3], [4, 5, np.nan]])
|
423 |
+
>>> array
|
424 |
+
array([[ 1., nan, 3.],
|
425 |
+
[ 4., 5., nan]])
|
426 |
+
>>> pd.notna(array)
|
427 |
+
array([[ True, False, True],
|
428 |
+
[ True, True, False]])
|
429 |
+
|
430 |
+
For indexes, an ndarray of booleans is returned.
|
431 |
+
|
432 |
+
>>> index = pd.DatetimeIndex(["2017-07-05", "2017-07-06", None,
|
433 |
+
... "2017-07-08"])
|
434 |
+
>>> index
|
435 |
+
DatetimeIndex(['2017-07-05', '2017-07-06', 'NaT', '2017-07-08'],
|
436 |
+
dtype='datetime64[ns]', freq=None)
|
437 |
+
>>> pd.notna(index)
|
438 |
+
array([ True, True, False, True])
|
439 |
+
|
440 |
+
For Series and DataFrame, the same type is returned, containing booleans.
|
441 |
+
|
442 |
+
>>> df = pd.DataFrame([['ant', 'bee', 'cat'], ['dog', None, 'fly']])
|
443 |
+
>>> df
|
444 |
+
0 1 2
|
445 |
+
0 ant bee cat
|
446 |
+
1 dog None fly
|
447 |
+
>>> pd.notna(df)
|
448 |
+
0 1 2
|
449 |
+
0 True True True
|
450 |
+
1 True False True
|
451 |
+
|
452 |
+
>>> pd.notna(df[1])
|
453 |
+
0 True
|
454 |
+
1 False
|
455 |
+
Name: 1, dtype: bool
|
456 |
+
"""
|
457 |
+
res = isna(obj)
|
458 |
+
if isinstance(res, bool):
|
459 |
+
return not res
|
460 |
+
return ~res
|
461 |
+
|
462 |
+
|
463 |
+
notnull = notna
|
464 |
+
|
465 |
+
|
466 |
+
def array_equivalent(
|
467 |
+
left,
|
468 |
+
right,
|
469 |
+
strict_nan: bool = False,
|
470 |
+
dtype_equal: bool = False,
|
471 |
+
) -> bool:
|
472 |
+
"""
|
473 |
+
True if two arrays, left and right, have equal non-NaN elements, and NaNs
|
474 |
+
in corresponding locations. False otherwise. It is assumed that left and
|
475 |
+
right are NumPy arrays of the same dtype. The behavior of this function
|
476 |
+
(particularly with respect to NaNs) is not defined if the dtypes are
|
477 |
+
different.
|
478 |
+
|
479 |
+
Parameters
|
480 |
+
----------
|
481 |
+
left, right : ndarrays
|
482 |
+
strict_nan : bool, default False
|
483 |
+
If True, consider NaN and None to be different.
|
484 |
+
dtype_equal : bool, default False
|
485 |
+
Whether `left` and `right` are known to have the same dtype
|
486 |
+
according to `is_dtype_equal`. Some methods like `BlockManager.equals`.
|
487 |
+
require that the dtypes match. Setting this to ``True`` can improve
|
488 |
+
performance, but will give different results for arrays that are
|
489 |
+
equal but different dtypes.
|
490 |
+
|
491 |
+
Returns
|
492 |
+
-------
|
493 |
+
b : bool
|
494 |
+
Returns True if the arrays are equivalent.
|
495 |
+
|
496 |
+
Examples
|
497 |
+
--------
|
498 |
+
>>> array_equivalent(
|
499 |
+
... np.array([1, 2, np.nan]),
|
500 |
+
... np.array([1, 2, np.nan]))
|
501 |
+
True
|
502 |
+
>>> array_equivalent(
|
503 |
+
... np.array([1, np.nan, 2]),
|
504 |
+
... np.array([1, 2, np.nan]))
|
505 |
+
False
|
506 |
+
"""
|
507 |
+
left, right = np.asarray(left), np.asarray(right)
|
508 |
+
|
509 |
+
# shape compat
|
510 |
+
if left.shape != right.shape:
|
511 |
+
return False
|
512 |
+
|
513 |
+
if dtype_equal:
|
514 |
+
# fastpath when we require that the dtypes match (Block.equals)
|
515 |
+
if left.dtype.kind in "fc":
|
516 |
+
return _array_equivalent_float(left, right)
|
517 |
+
elif left.dtype.kind in "mM":
|
518 |
+
return _array_equivalent_datetimelike(left, right)
|
519 |
+
elif is_string_or_object_np_dtype(left.dtype):
|
520 |
+
# TODO: fastpath for pandas' StringDtype
|
521 |
+
return _array_equivalent_object(left, right, strict_nan)
|
522 |
+
else:
|
523 |
+
return np.array_equal(left, right)
|
524 |
+
|
525 |
+
# Slow path when we allow comparing different dtypes.
|
526 |
+
# Object arrays can contain None, NaN and NaT.
|
527 |
+
# string dtypes must be come to this path for NumPy 1.7.1 compat
|
528 |
+
if left.dtype.kind in "OSU" or right.dtype.kind in "OSU":
|
529 |
+
# Note: `in "OSU"` is non-trivially faster than `in ["O", "S", "U"]`
|
530 |
+
# or `in ("O", "S", "U")`
|
531 |
+
return _array_equivalent_object(left, right, strict_nan)
|
532 |
+
|
533 |
+
# NaNs can occur in float and complex arrays.
|
534 |
+
if left.dtype.kind in "fc":
|
535 |
+
if not (left.size and right.size):
|
536 |
+
return True
|
537 |
+
return ((left == right) | (isna(left) & isna(right))).all()
|
538 |
+
|
539 |
+
elif left.dtype.kind in "mM" or right.dtype.kind in "mM":
|
540 |
+
# datetime64, timedelta64, Period
|
541 |
+
if left.dtype != right.dtype:
|
542 |
+
return False
|
543 |
+
|
544 |
+
left = left.view("i8")
|
545 |
+
right = right.view("i8")
|
546 |
+
|
547 |
+
# if we have structured dtypes, compare first
|
548 |
+
if (
|
549 |
+
left.dtype.type is np.void or right.dtype.type is np.void
|
550 |
+
) and left.dtype != right.dtype:
|
551 |
+
return False
|
552 |
+
|
553 |
+
return np.array_equal(left, right)
|
554 |
+
|
555 |
+
|
556 |
+
def _array_equivalent_float(left: np.ndarray, right: np.ndarray) -> bool:
|
557 |
+
return bool(((left == right) | (np.isnan(left) & np.isnan(right))).all())
|
558 |
+
|
559 |
+
|
560 |
+
def _array_equivalent_datetimelike(left: np.ndarray, right: np.ndarray):
|
561 |
+
return np.array_equal(left.view("i8"), right.view("i8"))
|
562 |
+
|
563 |
+
|
564 |
+
def _array_equivalent_object(left: np.ndarray, right: np.ndarray, strict_nan: bool):
|
565 |
+
left = ensure_object(left)
|
566 |
+
right = ensure_object(right)
|
567 |
+
|
568 |
+
mask: npt.NDArray[np.bool_] | None = None
|
569 |
+
if strict_nan:
|
570 |
+
mask = isna(left) & isna(right)
|
571 |
+
if not mask.any():
|
572 |
+
mask = None
|
573 |
+
|
574 |
+
try:
|
575 |
+
if mask is None:
|
576 |
+
return lib.array_equivalent_object(left, right)
|
577 |
+
if not lib.array_equivalent_object(left[~mask], right[~mask]):
|
578 |
+
return False
|
579 |
+
left_remaining = left[mask]
|
580 |
+
right_remaining = right[mask]
|
581 |
+
except ValueError:
|
582 |
+
# can raise a ValueError if left and right cannot be
|
583 |
+
# compared (e.g. nested arrays)
|
584 |
+
left_remaining = left
|
585 |
+
right_remaining = right
|
586 |
+
|
587 |
+
for left_value, right_value in zip(left_remaining, right_remaining):
|
588 |
+
if left_value is NaT and right_value is not NaT:
|
589 |
+
return False
|
590 |
+
|
591 |
+
elif left_value is libmissing.NA and right_value is not libmissing.NA:
|
592 |
+
return False
|
593 |
+
|
594 |
+
elif isinstance(left_value, float) and np.isnan(left_value):
|
595 |
+
if not isinstance(right_value, float) or not np.isnan(right_value):
|
596 |
+
return False
|
597 |
+
else:
|
598 |
+
with warnings.catch_warnings():
|
599 |
+
# suppress numpy's "elementwise comparison failed"
|
600 |
+
warnings.simplefilter("ignore", DeprecationWarning)
|
601 |
+
try:
|
602 |
+
if np.any(np.asarray(left_value != right_value)):
|
603 |
+
return False
|
604 |
+
except TypeError as err:
|
605 |
+
if "boolean value of NA is ambiguous" in str(err):
|
606 |
+
return False
|
607 |
+
raise
|
608 |
+
except ValueError:
|
609 |
+
# numpy can raise a ValueError if left and right cannot be
|
610 |
+
# compared (e.g. nested arrays)
|
611 |
+
return False
|
612 |
+
return True
|
613 |
+
|
614 |
+
|
615 |
+
def array_equals(left: ArrayLike, right: ArrayLike) -> bool:
|
616 |
+
"""
|
617 |
+
ExtensionArray-compatible implementation of array_equivalent.
|
618 |
+
"""
|
619 |
+
if left.dtype != right.dtype:
|
620 |
+
return False
|
621 |
+
elif isinstance(left, ABCExtensionArray):
|
622 |
+
return left.equals(right)
|
623 |
+
else:
|
624 |
+
return array_equivalent(left, right, dtype_equal=True)
|
625 |
+
|
626 |
+
|
627 |
+
def infer_fill_value(val):
|
628 |
+
"""
|
629 |
+
infer the fill value for the nan/NaT from the provided
|
630 |
+
scalar/ndarray/list-like if we are a NaT, return the correct dtyped
|
631 |
+
element to provide proper block construction
|
632 |
+
"""
|
633 |
+
if not is_list_like(val):
|
634 |
+
val = [val]
|
635 |
+
val = np.asarray(val)
|
636 |
+
if val.dtype.kind in "mM":
|
637 |
+
return np.array("NaT", dtype=val.dtype)
|
638 |
+
elif val.dtype == object:
|
639 |
+
dtype = lib.infer_dtype(ensure_object(val), skipna=False)
|
640 |
+
if dtype in ["datetime", "datetime64"]:
|
641 |
+
return np.array("NaT", dtype=DT64NS_DTYPE)
|
642 |
+
elif dtype in ["timedelta", "timedelta64"]:
|
643 |
+
return np.array("NaT", dtype=TD64NS_DTYPE)
|
644 |
+
return np.array(np.nan, dtype=object)
|
645 |
+
elif val.dtype.kind == "U":
|
646 |
+
return np.array(np.nan, dtype=val.dtype)
|
647 |
+
return np.nan
|
648 |
+
|
649 |
+
|
650 |
+
def construct_1d_array_from_inferred_fill_value(
|
651 |
+
value: object, length: int
|
652 |
+
) -> ArrayLike:
|
653 |
+
# Find our empty_value dtype by constructing an array
|
654 |
+
# from our value and doing a .take on it
|
655 |
+
from pandas.core.algorithms import take_nd
|
656 |
+
from pandas.core.construction import sanitize_array
|
657 |
+
from pandas.core.indexes.base import Index
|
658 |
+
|
659 |
+
arr = sanitize_array(value, Index(range(1)), copy=False)
|
660 |
+
taker = -1 * np.ones(length, dtype=np.intp)
|
661 |
+
return take_nd(arr, taker)
|
662 |
+
|
663 |
+
|
664 |
+
def maybe_fill(arr: np.ndarray) -> np.ndarray:
|
665 |
+
"""
|
666 |
+
Fill numpy.ndarray with NaN, unless we have a integer or boolean dtype.
|
667 |
+
"""
|
668 |
+
if arr.dtype.kind not in "iub":
|
669 |
+
arr.fill(np.nan)
|
670 |
+
return arr
|
671 |
+
|
672 |
+
|
673 |
+
def na_value_for_dtype(dtype: DtypeObj, compat: bool = True):
|
674 |
+
"""
|
675 |
+
Return a dtype compat na value
|
676 |
+
|
677 |
+
Parameters
|
678 |
+
----------
|
679 |
+
dtype : string / dtype
|
680 |
+
compat : bool, default True
|
681 |
+
|
682 |
+
Returns
|
683 |
+
-------
|
684 |
+
np.dtype or a pandas dtype
|
685 |
+
|
686 |
+
Examples
|
687 |
+
--------
|
688 |
+
>>> na_value_for_dtype(np.dtype('int64'))
|
689 |
+
0
|
690 |
+
>>> na_value_for_dtype(np.dtype('int64'), compat=False)
|
691 |
+
nan
|
692 |
+
>>> na_value_for_dtype(np.dtype('float64'))
|
693 |
+
nan
|
694 |
+
>>> na_value_for_dtype(np.dtype('bool'))
|
695 |
+
False
|
696 |
+
>>> na_value_for_dtype(np.dtype('datetime64[ns]'))
|
697 |
+
numpy.datetime64('NaT')
|
698 |
+
"""
|
699 |
+
|
700 |
+
if isinstance(dtype, ExtensionDtype):
|
701 |
+
return dtype.na_value
|
702 |
+
elif dtype.kind in "mM":
|
703 |
+
unit = np.datetime_data(dtype)[0]
|
704 |
+
return dtype.type("NaT", unit)
|
705 |
+
elif dtype.kind == "f":
|
706 |
+
return np.nan
|
707 |
+
elif dtype.kind in "iu":
|
708 |
+
if compat:
|
709 |
+
return 0
|
710 |
+
return np.nan
|
711 |
+
elif dtype.kind == "b":
|
712 |
+
if compat:
|
713 |
+
return False
|
714 |
+
return np.nan
|
715 |
+
return np.nan
|
716 |
+
|
717 |
+
|
718 |
+
def remove_na_arraylike(arr: Series | Index | np.ndarray):
|
719 |
+
"""
|
720 |
+
Return array-like containing only true/non-NaN values, possibly empty.
|
721 |
+
"""
|
722 |
+
if isinstance(arr.dtype, ExtensionDtype):
|
723 |
+
return arr[notna(arr)]
|
724 |
+
else:
|
725 |
+
return arr[notna(np.asarray(arr))]
|
726 |
+
|
727 |
+
|
728 |
+
def is_valid_na_for_dtype(obj, dtype: DtypeObj) -> bool:
|
729 |
+
"""
|
730 |
+
isna check that excludes incompatible dtypes
|
731 |
+
|
732 |
+
Parameters
|
733 |
+
----------
|
734 |
+
obj : object
|
735 |
+
dtype : np.datetime64, np.timedelta64, DatetimeTZDtype, or PeriodDtype
|
736 |
+
|
737 |
+
Returns
|
738 |
+
-------
|
739 |
+
bool
|
740 |
+
"""
|
741 |
+
if not lib.is_scalar(obj) or not isna(obj):
|
742 |
+
return False
|
743 |
+
elif dtype.kind == "M":
|
744 |
+
if isinstance(dtype, np.dtype):
|
745 |
+
# i.e. not tzaware
|
746 |
+
return not isinstance(obj, (np.timedelta64, Decimal))
|
747 |
+
# we have to rule out tznaive dt64("NaT")
|
748 |
+
return not isinstance(obj, (np.timedelta64, np.datetime64, Decimal))
|
749 |
+
elif dtype.kind == "m":
|
750 |
+
return not isinstance(obj, (np.datetime64, Decimal))
|
751 |
+
elif dtype.kind in "iufc":
|
752 |
+
# Numeric
|
753 |
+
return obj is not NaT and not isinstance(obj, (np.datetime64, np.timedelta64))
|
754 |
+
elif dtype.kind == "b":
|
755 |
+
# We allow pd.NA, None, np.nan in BooleanArray (same as IntervalDtype)
|
756 |
+
return lib.is_float(obj) or obj is None or obj is libmissing.NA
|
757 |
+
|
758 |
+
elif dtype == _dtype_str:
|
759 |
+
# numpy string dtypes to avoid float np.nan
|
760 |
+
return not isinstance(obj, (np.datetime64, np.timedelta64, Decimal, float))
|
761 |
+
|
762 |
+
elif dtype == _dtype_object:
|
763 |
+
# This is needed for Categorical, but is kind of weird
|
764 |
+
return True
|
765 |
+
|
766 |
+
elif isinstance(dtype, PeriodDtype):
|
767 |
+
return not isinstance(obj, (np.datetime64, np.timedelta64, Decimal))
|
768 |
+
|
769 |
+
elif isinstance(dtype, IntervalDtype):
|
770 |
+
return lib.is_float(obj) or obj is None or obj is libmissing.NA
|
771 |
+
|
772 |
+
elif isinstance(dtype, CategoricalDtype):
|
773 |
+
return is_valid_na_for_dtype(obj, dtype.categories.dtype)
|
774 |
+
|
775 |
+
# fallback, default to allowing NaN, None, NA, NaT
|
776 |
+
return not isinstance(obj, (np.datetime64, np.timedelta64, Decimal))
|
777 |
+
|
778 |
+
|
779 |
+
def isna_all(arr: ArrayLike) -> bool:
|
780 |
+
"""
|
781 |
+
Optimized equivalent to isna(arr).all()
|
782 |
+
"""
|
783 |
+
total_len = len(arr)
|
784 |
+
|
785 |
+
# Usually it's enough to check but a small fraction of values to see if
|
786 |
+
# a block is NOT null, chunks should help in such cases.
|
787 |
+
# parameters 1000 and 40 were chosen arbitrarily
|
788 |
+
chunk_len = max(total_len // 40, 1000)
|
789 |
+
|
790 |
+
dtype = arr.dtype
|
791 |
+
if lib.is_np_dtype(dtype, "f"):
|
792 |
+
checker = nan_checker
|
793 |
+
|
794 |
+
elif (lib.is_np_dtype(dtype, "mM")) or isinstance(
|
795 |
+
dtype, (DatetimeTZDtype, PeriodDtype)
|
796 |
+
):
|
797 |
+
# error: Incompatible types in assignment (expression has type
|
798 |
+
# "Callable[[Any], Any]", variable has type "ufunc")
|
799 |
+
checker = lambda x: np.asarray(x.view("i8")) == iNaT # type: ignore[assignment]
|
800 |
+
|
801 |
+
else:
|
802 |
+
# error: Incompatible types in assignment (expression has type "Callable[[Any],
|
803 |
+
# Any]", variable has type "ufunc")
|
804 |
+
checker = lambda x: _isna_array( # type: ignore[assignment]
|
805 |
+
x, inf_as_na=INF_AS_NA
|
806 |
+
)
|
807 |
+
|
808 |
+
return all(
|
809 |
+
checker(arr[i : i + chunk_len]).all() for i in range(0, total_len, chunk_len)
|
810 |
+
)
|
llmeval-env/lib/python3.10/site-packages/pandas/core/interchange/__init__.py
ADDED
File without changes
|
llmeval-env/lib/python3.10/site-packages/pandas/core/interchange/__pycache__/__init__.cpython-310.pyc
ADDED
Binary file (196 Bytes). View file
|
|
llmeval-env/lib/python3.10/site-packages/pandas/core/interchange/__pycache__/buffer.cpython-310.pyc
ADDED
Binary file (3.62 kB). View file
|
|
llmeval-env/lib/python3.10/site-packages/pandas/core/interchange/__pycache__/column.cpython-310.pyc
ADDED
Binary file (12.1 kB). View file
|
|
llmeval-env/lib/python3.10/site-packages/pandas/core/interchange/__pycache__/dataframe.cpython-310.pyc
ADDED
Binary file (4.69 kB). View file
|
|
llmeval-env/lib/python3.10/site-packages/pandas/core/interchange/__pycache__/dataframe_protocol.cpython-310.pyc
ADDED
Binary file (17.9 kB). View file
|
|
llmeval-env/lib/python3.10/site-packages/pandas/core/interchange/__pycache__/from_dataframe.cpython-310.pyc
ADDED
Binary file (12.2 kB). View file
|
|