applied-ai-018 commited on
Commit
13336c2
·
verified ·
1 Parent(s): 70e71e5

Add files using upload-large-folder tool

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. env-llmeval/lib/python3.10/site-packages/pandas/core/_numba/__init__.py +0 -0
  2. env-llmeval/lib/python3.10/site-packages/pandas/core/_numba/__pycache__/__init__.cpython-310.pyc +0 -0
  3. env-llmeval/lib/python3.10/site-packages/pandas/core/_numba/__pycache__/executor.cpython-310.pyc +0 -0
  4. env-llmeval/lib/python3.10/site-packages/pandas/core/_numba/__pycache__/extensions.cpython-310.pyc +0 -0
  5. env-llmeval/lib/python3.10/site-packages/pandas/core/_numba/executor.py +239 -0
  6. env-llmeval/lib/python3.10/site-packages/pandas/core/_numba/extensions.py +584 -0
  7. env-llmeval/lib/python3.10/site-packages/pandas/core/_numba/kernels/__init__.py +27 -0
  8. env-llmeval/lib/python3.10/site-packages/pandas/core/_numba/kernels/__pycache__/__init__.cpython-310.pyc +0 -0
  9. env-llmeval/lib/python3.10/site-packages/pandas/core/_numba/kernels/__pycache__/mean_.cpython-310.pyc +0 -0
  10. env-llmeval/lib/python3.10/site-packages/pandas/core/_numba/kernels/__pycache__/min_max_.cpython-310.pyc +0 -0
  11. env-llmeval/lib/python3.10/site-packages/pandas/core/_numba/kernels/__pycache__/shared.cpython-310.pyc +0 -0
  12. env-llmeval/lib/python3.10/site-packages/pandas/core/_numba/kernels/__pycache__/sum_.cpython-310.pyc +0 -0
  13. env-llmeval/lib/python3.10/site-packages/pandas/core/_numba/kernels/__pycache__/var_.cpython-310.pyc +0 -0
  14. env-llmeval/lib/python3.10/site-packages/pandas/core/_numba/kernels/mean_.py +196 -0
  15. env-llmeval/lib/python3.10/site-packages/pandas/core/_numba/kernels/min_max_.py +125 -0
  16. env-llmeval/lib/python3.10/site-packages/pandas/core/_numba/kernels/shared.py +29 -0
  17. env-llmeval/lib/python3.10/site-packages/pandas/core/_numba/kernels/sum_.py +244 -0
  18. env-llmeval/lib/python3.10/site-packages/pandas/core/_numba/kernels/var_.py +245 -0
  19. env-llmeval/lib/python3.10/site-packages/pandas/core/arrays/__pycache__/_arrow_string_mixins.cpython-310.pyc +0 -0
  20. env-llmeval/lib/python3.10/site-packages/pandas/core/arrays/__pycache__/_mixins.cpython-310.pyc +0 -0
  21. env-llmeval/lib/python3.10/site-packages/pandas/core/arrays/__pycache__/base.cpython-310.pyc +0 -0
  22. env-llmeval/lib/python3.10/site-packages/pandas/core/arrays/__pycache__/boolean.cpython-310.pyc +0 -0
  23. env-llmeval/lib/python3.10/site-packages/pandas/core/arrays/__pycache__/datetimelike.cpython-310.pyc +0 -0
  24. env-llmeval/lib/python3.10/site-packages/pandas/core/arrays/__pycache__/datetimes.cpython-310.pyc +0 -0
  25. env-llmeval/lib/python3.10/site-packages/pandas/core/arrays/__pycache__/interval.cpython-310.pyc +0 -0
  26. env-llmeval/lib/python3.10/site-packages/pandas/core/arrays/__pycache__/string_.cpython-310.pyc +0 -0
  27. env-llmeval/lib/python3.10/site-packages/pandas/core/arrays/__pycache__/timedeltas.cpython-310.pyc +0 -0
  28. env-llmeval/lib/python3.10/site-packages/pandas/core/arrays/sparse/array.py +1929 -0
  29. env-llmeval/lib/python3.10/site-packages/pandas/core/dtypes/__init__.py +0 -0
  30. env-llmeval/lib/python3.10/site-packages/pandas/core/dtypes/__pycache__/__init__.cpython-310.pyc +0 -0
  31. env-llmeval/lib/python3.10/site-packages/pandas/core/dtypes/__pycache__/api.cpython-310.pyc +0 -0
  32. env-llmeval/lib/python3.10/site-packages/pandas/core/dtypes/__pycache__/astype.cpython-310.pyc +0 -0
  33. env-llmeval/lib/python3.10/site-packages/pandas/core/dtypes/__pycache__/base.cpython-310.pyc +0 -0
  34. env-llmeval/lib/python3.10/site-packages/pandas/core/dtypes/__pycache__/cast.cpython-310.pyc +0 -0
  35. env-llmeval/lib/python3.10/site-packages/pandas/core/dtypes/__pycache__/common.cpython-310.pyc +0 -0
  36. env-llmeval/lib/python3.10/site-packages/pandas/core/dtypes/__pycache__/concat.cpython-310.pyc +0 -0
  37. env-llmeval/lib/python3.10/site-packages/pandas/core/dtypes/__pycache__/dtypes.cpython-310.pyc +0 -0
  38. env-llmeval/lib/python3.10/site-packages/pandas/core/dtypes/__pycache__/generic.cpython-310.pyc +0 -0
  39. env-llmeval/lib/python3.10/site-packages/pandas/core/dtypes/__pycache__/inference.cpython-310.pyc +0 -0
  40. env-llmeval/lib/python3.10/site-packages/pandas/core/dtypes/__pycache__/missing.cpython-310.pyc +0 -0
  41. env-llmeval/lib/python3.10/site-packages/pandas/core/dtypes/api.py +85 -0
  42. env-llmeval/lib/python3.10/site-packages/pandas/core/dtypes/astype.py +301 -0
  43. env-llmeval/lib/python3.10/site-packages/pandas/core/dtypes/base.py +583 -0
  44. env-llmeval/lib/python3.10/site-packages/pandas/core/dtypes/cast.py +1973 -0
  45. env-llmeval/lib/python3.10/site-packages/pandas/core/dtypes/common.py +1748 -0
  46. env-llmeval/lib/python3.10/site-packages/pandas/core/dtypes/concat.py +348 -0
  47. env-llmeval/lib/python3.10/site-packages/pandas/core/dtypes/dtypes.py +2348 -0
  48. env-llmeval/lib/python3.10/site-packages/pandas/core/dtypes/generic.py +147 -0
  49. env-llmeval/lib/python3.10/site-packages/pandas/core/dtypes/inference.py +437 -0
  50. env-llmeval/lib/python3.10/site-packages/pandas/core/dtypes/missing.py +810 -0
env-llmeval/lib/python3.10/site-packages/pandas/core/_numba/__init__.py ADDED
File without changes
env-llmeval/lib/python3.10/site-packages/pandas/core/_numba/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (183 Bytes). View file
 
env-llmeval/lib/python3.10/site-packages/pandas/core/_numba/__pycache__/executor.cpython-310.pyc ADDED
Binary file (5.1 kB). View file
 
env-llmeval/lib/python3.10/site-packages/pandas/core/_numba/__pycache__/extensions.cpython-310.pyc ADDED
Binary file (15.9 kB). View file
 
env-llmeval/lib/python3.10/site-packages/pandas/core/_numba/executor.py ADDED
@@ -0,0 +1,239 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from __future__ import annotations
2
+
3
+ import functools
4
+ from typing import (
5
+ TYPE_CHECKING,
6
+ Any,
7
+ Callable,
8
+ )
9
+
10
+ if TYPE_CHECKING:
11
+ from pandas._typing import Scalar
12
+
13
+ import numpy as np
14
+
15
+ from pandas.compat._optional import import_optional_dependency
16
+
17
+
18
+ @functools.cache
19
+ def generate_apply_looper(func, nopython=True, nogil=True, parallel=False):
20
+ if TYPE_CHECKING:
21
+ import numba
22
+ else:
23
+ numba = import_optional_dependency("numba")
24
+ nb_compat_func = numba.extending.register_jitable(func)
25
+
26
+ @numba.jit(nopython=nopython, nogil=nogil, parallel=parallel)
27
+ def nb_looper(values, axis):
28
+ # Operate on the first row/col in order to get
29
+ # the output shape
30
+ if axis == 0:
31
+ first_elem = values[:, 0]
32
+ dim0 = values.shape[1]
33
+ else:
34
+ first_elem = values[0]
35
+ dim0 = values.shape[0]
36
+ res0 = nb_compat_func(first_elem)
37
+ # Use np.asarray to get shape for
38
+ # https://github.com/numba/numba/issues/4202#issuecomment-1185981507
39
+ buf_shape = (dim0,) + np.atleast_1d(np.asarray(res0)).shape
40
+ if axis == 0:
41
+ buf_shape = buf_shape[::-1]
42
+ buff = np.empty(buf_shape)
43
+
44
+ if axis == 1:
45
+ buff[0] = res0
46
+ for i in numba.prange(1, values.shape[0]):
47
+ buff[i] = nb_compat_func(values[i])
48
+ else:
49
+ buff[:, 0] = res0
50
+ for j in numba.prange(1, values.shape[1]):
51
+ buff[:, j] = nb_compat_func(values[:, j])
52
+ return buff
53
+
54
+ return nb_looper
55
+
56
+
57
+ @functools.cache
58
+ def make_looper(func, result_dtype, is_grouped_kernel, nopython, nogil, parallel):
59
+ if TYPE_CHECKING:
60
+ import numba
61
+ else:
62
+ numba = import_optional_dependency("numba")
63
+
64
+ if is_grouped_kernel:
65
+
66
+ @numba.jit(nopython=nopython, nogil=nogil, parallel=parallel)
67
+ def column_looper(
68
+ values: np.ndarray,
69
+ labels: np.ndarray,
70
+ ngroups: int,
71
+ min_periods: int,
72
+ *args,
73
+ ):
74
+ result = np.empty((values.shape[0], ngroups), dtype=result_dtype)
75
+ na_positions = {}
76
+ for i in numba.prange(values.shape[0]):
77
+ output, na_pos = func(
78
+ values[i], result_dtype, labels, ngroups, min_periods, *args
79
+ )
80
+ result[i] = output
81
+ if len(na_pos) > 0:
82
+ na_positions[i] = np.array(na_pos)
83
+ return result, na_positions
84
+
85
+ else:
86
+
87
+ @numba.jit(nopython=nopython, nogil=nogil, parallel=parallel)
88
+ def column_looper(
89
+ values: np.ndarray,
90
+ start: np.ndarray,
91
+ end: np.ndarray,
92
+ min_periods: int,
93
+ *args,
94
+ ):
95
+ result = np.empty((values.shape[0], len(start)), dtype=result_dtype)
96
+ na_positions = {}
97
+ for i in numba.prange(values.shape[0]):
98
+ output, na_pos = func(
99
+ values[i], result_dtype, start, end, min_periods, *args
100
+ )
101
+ result[i] = output
102
+ if len(na_pos) > 0:
103
+ na_positions[i] = np.array(na_pos)
104
+ return result, na_positions
105
+
106
+ return column_looper
107
+
108
+
109
+ default_dtype_mapping: dict[np.dtype, Any] = {
110
+ np.dtype("int8"): np.int64,
111
+ np.dtype("int16"): np.int64,
112
+ np.dtype("int32"): np.int64,
113
+ np.dtype("int64"): np.int64,
114
+ np.dtype("uint8"): np.uint64,
115
+ np.dtype("uint16"): np.uint64,
116
+ np.dtype("uint32"): np.uint64,
117
+ np.dtype("uint64"): np.uint64,
118
+ np.dtype("float32"): np.float64,
119
+ np.dtype("float64"): np.float64,
120
+ np.dtype("complex64"): np.complex128,
121
+ np.dtype("complex128"): np.complex128,
122
+ }
123
+
124
+
125
+ # TODO: Preserve complex dtypes
126
+
127
+ float_dtype_mapping: dict[np.dtype, Any] = {
128
+ np.dtype("int8"): np.float64,
129
+ np.dtype("int16"): np.float64,
130
+ np.dtype("int32"): np.float64,
131
+ np.dtype("int64"): np.float64,
132
+ np.dtype("uint8"): np.float64,
133
+ np.dtype("uint16"): np.float64,
134
+ np.dtype("uint32"): np.float64,
135
+ np.dtype("uint64"): np.float64,
136
+ np.dtype("float32"): np.float64,
137
+ np.dtype("float64"): np.float64,
138
+ np.dtype("complex64"): np.float64,
139
+ np.dtype("complex128"): np.float64,
140
+ }
141
+
142
+ identity_dtype_mapping: dict[np.dtype, Any] = {
143
+ np.dtype("int8"): np.int8,
144
+ np.dtype("int16"): np.int16,
145
+ np.dtype("int32"): np.int32,
146
+ np.dtype("int64"): np.int64,
147
+ np.dtype("uint8"): np.uint8,
148
+ np.dtype("uint16"): np.uint16,
149
+ np.dtype("uint32"): np.uint32,
150
+ np.dtype("uint64"): np.uint64,
151
+ np.dtype("float32"): np.float32,
152
+ np.dtype("float64"): np.float64,
153
+ np.dtype("complex64"): np.complex64,
154
+ np.dtype("complex128"): np.complex128,
155
+ }
156
+
157
+
158
+ def generate_shared_aggregator(
159
+ func: Callable[..., Scalar],
160
+ dtype_mapping: dict[np.dtype, np.dtype],
161
+ is_grouped_kernel: bool,
162
+ nopython: bool,
163
+ nogil: bool,
164
+ parallel: bool,
165
+ ):
166
+ """
167
+ Generate a Numba function that loops over the columns 2D object and applies
168
+ a 1D numba kernel over each column.
169
+
170
+ Parameters
171
+ ----------
172
+ func : function
173
+ aggregation function to be applied to each column
174
+ dtype_mapping: dict or None
175
+ If not None, maps a dtype to a result dtype.
176
+ Otherwise, will fall back to default mapping.
177
+ is_grouped_kernel: bool, default False
178
+ Whether func operates using the group labels (True)
179
+ or using starts/ends arrays
180
+
181
+ If true, you also need to pass the number of groups to this function
182
+ nopython : bool
183
+ nopython to be passed into numba.jit
184
+ nogil : bool
185
+ nogil to be passed into numba.jit
186
+ parallel : bool
187
+ parallel to be passed into numba.jit
188
+
189
+ Returns
190
+ -------
191
+ Numba function
192
+ """
193
+
194
+ # A wrapper around the looper function,
195
+ # to dispatch based on dtype since numba is unable to do that in nopython mode
196
+
197
+ # It also post-processes the values by inserting nans where number of observations
198
+ # is less than min_periods
199
+ # Cannot do this in numba nopython mode
200
+ # (you'll run into type-unification error when you cast int -> float)
201
+ def looper_wrapper(
202
+ values,
203
+ start=None,
204
+ end=None,
205
+ labels=None,
206
+ ngroups=None,
207
+ min_periods: int = 0,
208
+ **kwargs,
209
+ ):
210
+ result_dtype = dtype_mapping[values.dtype]
211
+ column_looper = make_looper(
212
+ func, result_dtype, is_grouped_kernel, nopython, nogil, parallel
213
+ )
214
+ # Need to unpack kwargs since numba only supports *args
215
+ if is_grouped_kernel:
216
+ result, na_positions = column_looper(
217
+ values, labels, ngroups, min_periods, *kwargs.values()
218
+ )
219
+ else:
220
+ result, na_positions = column_looper(
221
+ values, start, end, min_periods, *kwargs.values()
222
+ )
223
+ if result.dtype.kind == "i":
224
+ # Look if na_positions is not empty
225
+ # If so, convert the whole block
226
+ # This is OK since int dtype cannot hold nan,
227
+ # so if min_periods not satisfied for 1 col, it is not satisfied for
228
+ # all columns at that index
229
+ for na_pos in na_positions.values():
230
+ if len(na_pos) > 0:
231
+ result = result.astype("float64")
232
+ break
233
+ # TODO: Optimize this
234
+ for i, na_pos in na_positions.items():
235
+ if len(na_pos) > 0:
236
+ result[i, na_pos] = np.nan
237
+ return result
238
+
239
+ return looper_wrapper
env-llmeval/lib/python3.10/site-packages/pandas/core/_numba/extensions.py ADDED
@@ -0,0 +1,584 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Disable type checking for this module since numba's internals
2
+ # are not typed, and we use numba's internals via its extension API
3
+ # mypy: ignore-errors
4
+ """
5
+ Utility classes/functions to let numba recognize
6
+ pandas Index/Series/DataFrame
7
+
8
+ Mostly vendored from https://github.com/numba/numba/blob/main/numba/tests/pdlike_usecase.py
9
+ """
10
+
11
+ from __future__ import annotations
12
+
13
+ from contextlib import contextmanager
14
+ import operator
15
+
16
+ import numba
17
+ from numba import types
18
+ from numba.core import cgutils
19
+ from numba.core.datamodel import models
20
+ from numba.core.extending import (
21
+ NativeValue,
22
+ box,
23
+ lower_builtin,
24
+ make_attribute_wrapper,
25
+ overload,
26
+ overload_attribute,
27
+ overload_method,
28
+ register_model,
29
+ type_callable,
30
+ typeof_impl,
31
+ unbox,
32
+ )
33
+ from numba.core.imputils import impl_ret_borrowed
34
+ import numpy as np
35
+
36
+ from pandas._libs import lib
37
+
38
+ from pandas.core.indexes.base import Index
39
+ from pandas.core.indexing import _iLocIndexer
40
+ from pandas.core.internals import SingleBlockManager
41
+ from pandas.core.series import Series
42
+
43
+
44
+ # Helper function to hack around fact that Index casts numpy string dtype to object
45
+ #
46
+ # Idea is to set an attribute on a Index called _numba_data
47
+ # that is the original data, or the object data casted to numpy string dtype,
48
+ # with a context manager that is unset afterwards
49
+ @contextmanager
50
+ def set_numba_data(index: Index):
51
+ numba_data = index._data
52
+ if numba_data.dtype == object:
53
+ if not lib.is_string_array(numba_data):
54
+ raise ValueError(
55
+ "The numba engine only supports using string or numeric column names"
56
+ )
57
+ numba_data = numba_data.astype("U")
58
+ try:
59
+ index._numba_data = numba_data
60
+ yield index
61
+ finally:
62
+ del index._numba_data
63
+
64
+
65
+ # TODO: Range index support
66
+ # (this currently lowers OK, but does not round-trip)
67
+ class IndexType(types.Type):
68
+ """
69
+ The type class for Index objects.
70
+ """
71
+
72
+ def __init__(self, dtype, layout, pyclass: any) -> None:
73
+ self.pyclass = pyclass
74
+ name = f"index({dtype}, {layout})"
75
+ self.dtype = dtype
76
+ self.layout = layout
77
+ super().__init__(name)
78
+
79
+ @property
80
+ def key(self):
81
+ return self.pyclass, self.dtype, self.layout
82
+
83
+ @property
84
+ def as_array(self):
85
+ return types.Array(self.dtype, 1, self.layout)
86
+
87
+ def copy(self, dtype=None, ndim: int = 1, layout=None):
88
+ assert ndim == 1
89
+ if dtype is None:
90
+ dtype = self.dtype
91
+ layout = layout or self.layout
92
+ return type(self)(dtype, layout, self.pyclass)
93
+
94
+
95
+ class SeriesType(types.Type):
96
+ """
97
+ The type class for Series objects.
98
+ """
99
+
100
+ def __init__(self, dtype, index, namety) -> None:
101
+ assert isinstance(index, IndexType)
102
+ self.dtype = dtype
103
+ self.index = index
104
+ self.values = types.Array(self.dtype, 1, "C")
105
+ self.namety = namety
106
+ name = f"series({dtype}, {index}, {namety})"
107
+ super().__init__(name)
108
+
109
+ @property
110
+ def key(self):
111
+ return self.dtype, self.index, self.namety
112
+
113
+ @property
114
+ def as_array(self):
115
+ return self.values
116
+
117
+ def copy(self, dtype=None, ndim: int = 1, layout: str = "C"):
118
+ assert ndim == 1
119
+ assert layout == "C"
120
+ if dtype is None:
121
+ dtype = self.dtype
122
+ return type(self)(dtype, self.index, self.namety)
123
+
124
+
125
+ @typeof_impl.register(Index)
126
+ def typeof_index(val, c):
127
+ """
128
+ This will assume that only strings are in object dtype
129
+ index.
130
+ (you should check this before this gets lowered down to numba)
131
+ """
132
+ # arrty = typeof_impl(val._data, c)
133
+ arrty = typeof_impl(val._numba_data, c)
134
+ assert arrty.ndim == 1
135
+ return IndexType(arrty.dtype, arrty.layout, type(val))
136
+
137
+
138
+ @typeof_impl.register(Series)
139
+ def typeof_series(val, c):
140
+ index = typeof_impl(val.index, c)
141
+ arrty = typeof_impl(val.values, c)
142
+ namety = typeof_impl(val.name, c)
143
+ assert arrty.ndim == 1
144
+ assert arrty.layout == "C"
145
+ return SeriesType(arrty.dtype, index, namety)
146
+
147
+
148
+ @type_callable(Series)
149
+ def type_series_constructor(context):
150
+ def typer(data, index, name=None):
151
+ if isinstance(index, IndexType) and isinstance(data, types.Array):
152
+ assert data.ndim == 1
153
+ if name is None:
154
+ name = types.intp
155
+ return SeriesType(data.dtype, index, name)
156
+
157
+ return typer
158
+
159
+
160
+ @type_callable(Index)
161
+ def type_index_constructor(context):
162
+ def typer(data, hashmap=None):
163
+ if isinstance(data, types.Array):
164
+ assert data.layout == "C"
165
+ assert data.ndim == 1
166
+ assert hashmap is None or isinstance(hashmap, types.DictType)
167
+ return IndexType(data.dtype, layout=data.layout, pyclass=Index)
168
+
169
+ return typer
170
+
171
+
172
+ # Backend extensions for Index and Series and Frame
173
+ @register_model(IndexType)
174
+ class IndexModel(models.StructModel):
175
+ def __init__(self, dmm, fe_type) -> None:
176
+ # We don't want the numpy string scalar type in our hashmap
177
+ members = [
178
+ ("data", fe_type.as_array),
179
+ # This is an attempt to emulate our hashtable code with a numba
180
+ # typed dict
181
+ # It maps from values in the index to their integer positions in the array
182
+ ("hashmap", types.DictType(fe_type.dtype, types.intp)),
183
+ # Pointer to the Index object this was created from, or that it
184
+ # boxes to
185
+ # https://numba.discourse.group/t/qst-how-to-cache-the-boxing-of-an-object/2128/2?u=lithomas1
186
+ ("parent", types.pyobject),
187
+ ]
188
+ models.StructModel.__init__(self, dmm, fe_type, members)
189
+
190
+
191
+ @register_model(SeriesType)
192
+ class SeriesModel(models.StructModel):
193
+ def __init__(self, dmm, fe_type) -> None:
194
+ members = [
195
+ ("index", fe_type.index),
196
+ ("values", fe_type.as_array),
197
+ ("name", fe_type.namety),
198
+ ]
199
+ models.StructModel.__init__(self, dmm, fe_type, members)
200
+
201
+
202
+ make_attribute_wrapper(IndexType, "data", "_data")
203
+ make_attribute_wrapper(IndexType, "hashmap", "hashmap")
204
+
205
+ make_attribute_wrapper(SeriesType, "index", "index")
206
+ make_attribute_wrapper(SeriesType, "values", "values")
207
+ make_attribute_wrapper(SeriesType, "name", "name")
208
+
209
+
210
+ @lower_builtin(Series, types.Array, IndexType)
211
+ def pdseries_constructor(context, builder, sig, args):
212
+ data, index = args
213
+ series = cgutils.create_struct_proxy(sig.return_type)(context, builder)
214
+ series.index = index
215
+ series.values = data
216
+ series.name = context.get_constant(types.intp, 0)
217
+ return impl_ret_borrowed(context, builder, sig.return_type, series._getvalue())
218
+
219
+
220
+ @lower_builtin(Series, types.Array, IndexType, types.intp)
221
+ @lower_builtin(Series, types.Array, IndexType, types.float64)
222
+ @lower_builtin(Series, types.Array, IndexType, types.unicode_type)
223
+ def pdseries_constructor_with_name(context, builder, sig, args):
224
+ data, index, name = args
225
+ series = cgutils.create_struct_proxy(sig.return_type)(context, builder)
226
+ series.index = index
227
+ series.values = data
228
+ series.name = name
229
+ return impl_ret_borrowed(context, builder, sig.return_type, series._getvalue())
230
+
231
+
232
+ @lower_builtin(Index, types.Array, types.DictType, types.pyobject)
233
+ def index_constructor_2arg(context, builder, sig, args):
234
+ (data, hashmap, parent) = args
235
+ index = cgutils.create_struct_proxy(sig.return_type)(context, builder)
236
+
237
+ index.data = data
238
+ index.hashmap = hashmap
239
+ index.parent = parent
240
+ return impl_ret_borrowed(context, builder, sig.return_type, index._getvalue())
241
+
242
+
243
+ @lower_builtin(Index, types.Array, types.DictType)
244
+ def index_constructor_2arg_parent(context, builder, sig, args):
245
+ # Basically same as index_constructor_1arg, but also lets you specify the
246
+ # parent object
247
+ (data, hashmap) = args
248
+ index = cgutils.create_struct_proxy(sig.return_type)(context, builder)
249
+
250
+ index.data = data
251
+ index.hashmap = hashmap
252
+ return impl_ret_borrowed(context, builder, sig.return_type, index._getvalue())
253
+
254
+
255
+ @lower_builtin(Index, types.Array)
256
+ def index_constructor_1arg(context, builder, sig, args):
257
+ from numba.typed import Dict
258
+
259
+ key_type = sig.return_type.dtype
260
+ value_type = types.intp
261
+
262
+ def index_impl(data):
263
+ return Index(data, Dict.empty(key_type, value_type))
264
+
265
+ return context.compile_internal(builder, index_impl, sig, args)
266
+
267
+
268
+ # Helper to convert the unicodecharseq (numpy string scalar) into a unicode_type
269
+ # (regular string)
270
+ def maybe_cast_str(x):
271
+ # Dummy function that numba can overload
272
+ pass
273
+
274
+
275
+ @overload(maybe_cast_str)
276
+ def maybe_cast_str_impl(x):
277
+ """Converts numba UnicodeCharSeq (numpy string scalar) -> unicode type (string).
278
+ Is a no-op for other types."""
279
+ if isinstance(x, types.UnicodeCharSeq):
280
+ return lambda x: str(x)
281
+ else:
282
+ return lambda x: x
283
+
284
+
285
+ @unbox(IndexType)
286
+ def unbox_index(typ, obj, c):
287
+ """
288
+ Convert a Index object to a native structure.
289
+
290
+ Note: Object dtype is not allowed here
291
+ """
292
+ data_obj = c.pyapi.object_getattr_string(obj, "_numba_data")
293
+ index = cgutils.create_struct_proxy(typ)(c.context, c.builder)
294
+ # If we see an object array, assume its been validated as only containing strings
295
+ # We still need to do the conversion though
296
+ index.data = c.unbox(typ.as_array, data_obj).value
297
+ typed_dict_obj = c.pyapi.unserialize(c.pyapi.serialize_object(numba.typed.Dict))
298
+ # Create an empty typed dict in numba for the hashmap for indexing
299
+ # equiv of numba.typed.Dict.empty(typ.dtype, types.intp)
300
+ arr_type_obj = c.pyapi.unserialize(c.pyapi.serialize_object(typ.dtype))
301
+ intp_type_obj = c.pyapi.unserialize(c.pyapi.serialize_object(types.intp))
302
+ hashmap_obj = c.pyapi.call_method(
303
+ typed_dict_obj, "empty", (arr_type_obj, intp_type_obj)
304
+ )
305
+ index.hashmap = c.unbox(types.DictType(typ.dtype, types.intp), hashmap_obj).value
306
+ # Set the parent for speedy boxing.
307
+ index.parent = obj
308
+
309
+ # Decrefs
310
+ c.pyapi.decref(data_obj)
311
+ c.pyapi.decref(arr_type_obj)
312
+ c.pyapi.decref(intp_type_obj)
313
+ c.pyapi.decref(typed_dict_obj)
314
+
315
+ return NativeValue(index._getvalue())
316
+
317
+
318
+ @unbox(SeriesType)
319
+ def unbox_series(typ, obj, c):
320
+ """
321
+ Convert a Series object to a native structure.
322
+ """
323
+ index_obj = c.pyapi.object_getattr_string(obj, "index")
324
+ values_obj = c.pyapi.object_getattr_string(obj, "values")
325
+ name_obj = c.pyapi.object_getattr_string(obj, "name")
326
+
327
+ series = cgutils.create_struct_proxy(typ)(c.context, c.builder)
328
+ series.index = c.unbox(typ.index, index_obj).value
329
+ series.values = c.unbox(typ.values, values_obj).value
330
+ series.name = c.unbox(typ.namety, name_obj).value
331
+
332
+ # Decrefs
333
+ c.pyapi.decref(index_obj)
334
+ c.pyapi.decref(values_obj)
335
+ c.pyapi.decref(name_obj)
336
+
337
+ return NativeValue(series._getvalue())
338
+
339
+
340
+ @box(IndexType)
341
+ def box_index(typ, val, c):
342
+ """
343
+ Convert a native index structure to a Index object.
344
+
345
+ If our native index is of a numpy string dtype, we'll cast it to
346
+ object.
347
+ """
348
+ # First build a Numpy array object, then wrap it in a Index
349
+ index = cgutils.create_struct_proxy(typ)(c.context, c.builder, value=val)
350
+
351
+ res = cgutils.alloca_once_value(c.builder, index.parent)
352
+
353
+ # Does parent exist?
354
+ # (it means already boxed once, or Index same as original df.index or df.columns)
355
+ # xref https://github.com/numba/numba/blob/596e8a55334cc46854e3192766e643767bd7c934/numba/core/boxing.py#L593C17-L593C17
356
+ with c.builder.if_else(cgutils.is_not_null(c.builder, index.parent)) as (
357
+ has_parent,
358
+ otherwise,
359
+ ):
360
+ with has_parent:
361
+ c.pyapi.incref(index.parent)
362
+ with otherwise:
363
+ # TODO: preserve the original class for the index
364
+ # Also need preserve the name of the Index
365
+ # class_obj = c.pyapi.unserialize(c.pyapi.serialize_object(typ.pyclass))
366
+ class_obj = c.pyapi.unserialize(c.pyapi.serialize_object(Index))
367
+ array_obj = c.box(typ.as_array, index.data)
368
+ if isinstance(typ.dtype, types.UnicodeCharSeq):
369
+ # We converted to numpy string dtype, convert back
370
+ # to object since _simple_new won't do that for uss
371
+ object_str_obj = c.pyapi.unserialize(c.pyapi.serialize_object("object"))
372
+ array_obj = c.pyapi.call_method(array_obj, "astype", (object_str_obj,))
373
+ c.pyapi.decref(object_str_obj)
374
+ # this is basically Index._simple_new(array_obj, name_obj) in python
375
+ index_obj = c.pyapi.call_method(class_obj, "_simple_new", (array_obj,))
376
+ index.parent = index_obj
377
+ c.builder.store(index_obj, res)
378
+
379
+ # Decrefs
380
+ c.pyapi.decref(class_obj)
381
+ c.pyapi.decref(array_obj)
382
+ return c.builder.load(res)
383
+
384
+
385
+ @box(SeriesType)
386
+ def box_series(typ, val, c):
387
+ """
388
+ Convert a native series structure to a Series object.
389
+ """
390
+ series = cgutils.create_struct_proxy(typ)(c.context, c.builder, value=val)
391
+ series_const_obj = c.pyapi.unserialize(c.pyapi.serialize_object(Series._from_mgr))
392
+ mgr_const_obj = c.pyapi.unserialize(
393
+ c.pyapi.serialize_object(SingleBlockManager.from_array)
394
+ )
395
+ index_obj = c.box(typ.index, series.index)
396
+ array_obj = c.box(typ.as_array, series.values)
397
+ name_obj = c.box(typ.namety, series.name)
398
+ # This is basically equivalent of
399
+ # pd.Series(data=array_obj, index=index_obj)
400
+ # To improve perf, we will construct the Series from a manager
401
+ # object to avoid checks.
402
+ # We'll also set the name attribute manually to avoid validation
403
+ mgr_obj = c.pyapi.call_function_objargs(
404
+ mgr_const_obj,
405
+ (
406
+ array_obj,
407
+ index_obj,
408
+ ),
409
+ )
410
+ mgr_axes_obj = c.pyapi.object_getattr_string(mgr_obj, "axes")
411
+ # Series._constructor_from_mgr(mgr, axes)
412
+ series_obj = c.pyapi.call_function_objargs(
413
+ series_const_obj, (mgr_obj, mgr_axes_obj)
414
+ )
415
+ c.pyapi.object_setattr_string(series_obj, "_name", name_obj)
416
+
417
+ # Decrefs
418
+ c.pyapi.decref(series_const_obj)
419
+ c.pyapi.decref(mgr_axes_obj)
420
+ c.pyapi.decref(mgr_obj)
421
+ c.pyapi.decref(mgr_const_obj)
422
+ c.pyapi.decref(index_obj)
423
+ c.pyapi.decref(array_obj)
424
+ c.pyapi.decref(name_obj)
425
+
426
+ return series_obj
427
+
428
+
429
+ # Add common series reductions (e.g. mean, sum),
430
+ # and also add common binops (e.g. add, sub, mul, div)
431
+ def generate_series_reduction(ser_reduction, ser_method):
432
+ @overload_method(SeriesType, ser_reduction)
433
+ def series_reduction(series):
434
+ def series_reduction_impl(series):
435
+ return ser_method(series.values)
436
+
437
+ return series_reduction_impl
438
+
439
+ return series_reduction
440
+
441
+
442
+ def generate_series_binop(binop):
443
+ @overload(binop)
444
+ def series_binop(series1, value):
445
+ if isinstance(series1, SeriesType):
446
+ if isinstance(value, SeriesType):
447
+
448
+ def series_binop_impl(series1, series2):
449
+ # TODO: Check index matching?
450
+ return Series(
451
+ binop(series1.values, series2.values),
452
+ series1.index,
453
+ series1.name,
454
+ )
455
+
456
+ return series_binop_impl
457
+ else:
458
+
459
+ def series_binop_impl(series1, value):
460
+ return Series(
461
+ binop(series1.values, value), series1.index, series1.name
462
+ )
463
+
464
+ return series_binop_impl
465
+
466
+ return series_binop
467
+
468
+
469
+ series_reductions = [
470
+ ("sum", np.sum),
471
+ ("mean", np.mean),
472
+ # Disabled due to discrepancies between numba std. dev
473
+ # and pandas std. dev (no way to specify dof)
474
+ # ("std", np.std),
475
+ # ("var", np.var),
476
+ ("min", np.min),
477
+ ("max", np.max),
478
+ ]
479
+ for reduction, reduction_method in series_reductions:
480
+ generate_series_reduction(reduction, reduction_method)
481
+
482
+ series_binops = [operator.add, operator.sub, operator.mul, operator.truediv]
483
+
484
+ for ser_binop in series_binops:
485
+ generate_series_binop(ser_binop)
486
+
487
+
488
+ # get_loc on Index
489
+ @overload_method(IndexType, "get_loc")
490
+ def index_get_loc(index, item):
491
+ def index_get_loc_impl(index, item):
492
+ # Initialize the hash table if not initialized
493
+ if len(index.hashmap) == 0:
494
+ for i, val in enumerate(index._data):
495
+ index.hashmap[val] = i
496
+ return index.hashmap[item]
497
+
498
+ return index_get_loc_impl
499
+
500
+
501
+ # Indexing for Series/Index
502
+ @overload(operator.getitem)
503
+ def series_indexing(series, item):
504
+ if isinstance(series, SeriesType):
505
+
506
+ def series_getitem(series, item):
507
+ loc = series.index.get_loc(item)
508
+ return series.iloc[loc]
509
+
510
+ return series_getitem
511
+
512
+
513
+ @overload(operator.getitem)
514
+ def index_indexing(index, idx):
515
+ if isinstance(index, IndexType):
516
+
517
+ def index_getitem(index, idx):
518
+ return index._data[idx]
519
+
520
+ return index_getitem
521
+
522
+
523
+ class IlocType(types.Type):
524
+ def __init__(self, obj_type) -> None:
525
+ self.obj_type = obj_type
526
+ name = f"iLocIndexer({obj_type})"
527
+ super().__init__(name=name)
528
+
529
+ @property
530
+ def key(self):
531
+ return self.obj_type
532
+
533
+
534
+ @typeof_impl.register(_iLocIndexer)
535
+ def typeof_iloc(val, c):
536
+ objtype = typeof_impl(val.obj, c)
537
+ return IlocType(objtype)
538
+
539
+
540
+ @type_callable(_iLocIndexer)
541
+ def type_iloc_constructor(context):
542
+ def typer(obj):
543
+ if isinstance(obj, SeriesType):
544
+ return IlocType(obj)
545
+
546
+ return typer
547
+
548
+
549
+ @lower_builtin(_iLocIndexer, SeriesType)
550
+ def iloc_constructor(context, builder, sig, args):
551
+ (obj,) = args
552
+ iloc_indexer = cgutils.create_struct_proxy(sig.return_type)(context, builder)
553
+ iloc_indexer.obj = obj
554
+ return impl_ret_borrowed(
555
+ context, builder, sig.return_type, iloc_indexer._getvalue()
556
+ )
557
+
558
+
559
+ @register_model(IlocType)
560
+ class ILocModel(models.StructModel):
561
+ def __init__(self, dmm, fe_type) -> None:
562
+ members = [("obj", fe_type.obj_type)]
563
+ models.StructModel.__init__(self, dmm, fe_type, members)
564
+
565
+
566
+ make_attribute_wrapper(IlocType, "obj", "obj")
567
+
568
+
569
+ @overload_attribute(SeriesType, "iloc")
570
+ def series_iloc(series):
571
+ def get(series):
572
+ return _iLocIndexer(series)
573
+
574
+ return get
575
+
576
+
577
+ @overload(operator.getitem)
578
+ def iloc_getitem(iloc_indexer, i):
579
+ if isinstance(iloc_indexer, IlocType):
580
+
581
+ def getitem_impl(iloc_indexer, i):
582
+ return iloc_indexer.obj.values[i]
583
+
584
+ return getitem_impl
env-llmeval/lib/python3.10/site-packages/pandas/core/_numba/kernels/__init__.py ADDED
@@ -0,0 +1,27 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from pandas.core._numba.kernels.mean_ import (
2
+ grouped_mean,
3
+ sliding_mean,
4
+ )
5
+ from pandas.core._numba.kernels.min_max_ import (
6
+ grouped_min_max,
7
+ sliding_min_max,
8
+ )
9
+ from pandas.core._numba.kernels.sum_ import (
10
+ grouped_sum,
11
+ sliding_sum,
12
+ )
13
+ from pandas.core._numba.kernels.var_ import (
14
+ grouped_var,
15
+ sliding_var,
16
+ )
17
+
18
+ __all__ = [
19
+ "sliding_mean",
20
+ "grouped_mean",
21
+ "sliding_sum",
22
+ "grouped_sum",
23
+ "sliding_var",
24
+ "grouped_var",
25
+ "sliding_min_max",
26
+ "grouped_min_max",
27
+ ]
env-llmeval/lib/python3.10/site-packages/pandas/core/_numba/kernels/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (623 Bytes). View file
 
env-llmeval/lib/python3.10/site-packages/pandas/core/_numba/kernels/__pycache__/mean_.cpython-310.pyc ADDED
Binary file (3.49 kB). View file
 
env-llmeval/lib/python3.10/site-packages/pandas/core/_numba/kernels/__pycache__/min_max_.cpython-310.pyc ADDED
Binary file (2.45 kB). View file
 
env-llmeval/lib/python3.10/site-packages/pandas/core/_numba/kernels/__pycache__/shared.cpython-310.pyc ADDED
Binary file (782 Bytes). View file
 
env-llmeval/lib/python3.10/site-packages/pandas/core/_numba/kernels/__pycache__/sum_.cpython-310.pyc ADDED
Binary file (3.81 kB). View file
 
env-llmeval/lib/python3.10/site-packages/pandas/core/_numba/kernels/__pycache__/var_.cpython-310.pyc ADDED
Binary file (3.95 kB). View file
 
env-llmeval/lib/python3.10/site-packages/pandas/core/_numba/kernels/mean_.py ADDED
@@ -0,0 +1,196 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ Numba 1D mean kernels that can be shared by
3
+ * Dataframe / Series
4
+ * groupby
5
+ * rolling / expanding
6
+
7
+ Mirrors pandas/_libs/window/aggregation.pyx
8
+ """
9
+ from __future__ import annotations
10
+
11
+ from typing import TYPE_CHECKING
12
+
13
+ import numba
14
+ import numpy as np
15
+
16
+ from pandas.core._numba.kernels.shared import is_monotonic_increasing
17
+ from pandas.core._numba.kernels.sum_ import grouped_kahan_sum
18
+
19
+ if TYPE_CHECKING:
20
+ from pandas._typing import npt
21
+
22
+
23
+ @numba.jit(nopython=True, nogil=True, parallel=False)
24
+ def add_mean(
25
+ val: float,
26
+ nobs: int,
27
+ sum_x: float,
28
+ neg_ct: int,
29
+ compensation: float,
30
+ num_consecutive_same_value: int,
31
+ prev_value: float,
32
+ ) -> tuple[int, float, int, float, int, float]:
33
+ if not np.isnan(val):
34
+ nobs += 1
35
+ y = val - compensation
36
+ t = sum_x + y
37
+ compensation = t - sum_x - y
38
+ sum_x = t
39
+ if val < 0:
40
+ neg_ct += 1
41
+
42
+ if val == prev_value:
43
+ num_consecutive_same_value += 1
44
+ else:
45
+ num_consecutive_same_value = 1
46
+ prev_value = val
47
+
48
+ return nobs, sum_x, neg_ct, compensation, num_consecutive_same_value, prev_value
49
+
50
+
51
+ @numba.jit(nopython=True, nogil=True, parallel=False)
52
+ def remove_mean(
53
+ val: float, nobs: int, sum_x: float, neg_ct: int, compensation: float
54
+ ) -> tuple[int, float, int, float]:
55
+ if not np.isnan(val):
56
+ nobs -= 1
57
+ y = -val - compensation
58
+ t = sum_x + y
59
+ compensation = t - sum_x - y
60
+ sum_x = t
61
+ if val < 0:
62
+ neg_ct -= 1
63
+ return nobs, sum_x, neg_ct, compensation
64
+
65
+
66
+ @numba.jit(nopython=True, nogil=True, parallel=False)
67
+ def sliding_mean(
68
+ values: np.ndarray,
69
+ result_dtype: np.dtype,
70
+ start: np.ndarray,
71
+ end: np.ndarray,
72
+ min_periods: int,
73
+ ) -> tuple[np.ndarray, list[int]]:
74
+ N = len(start)
75
+ nobs = 0
76
+ sum_x = 0.0
77
+ neg_ct = 0
78
+ compensation_add = 0.0
79
+ compensation_remove = 0.0
80
+
81
+ is_monotonic_increasing_bounds = is_monotonic_increasing(
82
+ start
83
+ ) and is_monotonic_increasing(end)
84
+
85
+ output = np.empty(N, dtype=result_dtype)
86
+
87
+ for i in range(N):
88
+ s = start[i]
89
+ e = end[i]
90
+ if i == 0 or not is_monotonic_increasing_bounds:
91
+ prev_value = values[s]
92
+ num_consecutive_same_value = 0
93
+
94
+ for j in range(s, e):
95
+ val = values[j]
96
+ (
97
+ nobs,
98
+ sum_x,
99
+ neg_ct,
100
+ compensation_add,
101
+ num_consecutive_same_value,
102
+ prev_value,
103
+ ) = add_mean(
104
+ val,
105
+ nobs,
106
+ sum_x,
107
+ neg_ct,
108
+ compensation_add,
109
+ num_consecutive_same_value,
110
+ prev_value, # pyright: ignore[reportGeneralTypeIssues]
111
+ )
112
+ else:
113
+ for j in range(start[i - 1], s):
114
+ val = values[j]
115
+ nobs, sum_x, neg_ct, compensation_remove = remove_mean(
116
+ val, nobs, sum_x, neg_ct, compensation_remove
117
+ )
118
+
119
+ for j in range(end[i - 1], e):
120
+ val = values[j]
121
+ (
122
+ nobs,
123
+ sum_x,
124
+ neg_ct,
125
+ compensation_add,
126
+ num_consecutive_same_value,
127
+ prev_value,
128
+ ) = add_mean(
129
+ val,
130
+ nobs,
131
+ sum_x,
132
+ neg_ct,
133
+ compensation_add,
134
+ num_consecutive_same_value,
135
+ prev_value, # pyright: ignore[reportGeneralTypeIssues]
136
+ )
137
+
138
+ if nobs >= min_periods and nobs > 0:
139
+ result = sum_x / nobs
140
+ if num_consecutive_same_value >= nobs:
141
+ result = prev_value
142
+ elif neg_ct == 0 and result < 0:
143
+ result = 0
144
+ elif neg_ct == nobs and result > 0:
145
+ result = 0
146
+ else:
147
+ result = np.nan
148
+
149
+ output[i] = result
150
+
151
+ if not is_monotonic_increasing_bounds:
152
+ nobs = 0
153
+ sum_x = 0.0
154
+ neg_ct = 0
155
+ compensation_remove = 0.0
156
+
157
+ # na_position is empty list since float64 can already hold nans
158
+ # Do list comprehension, since numba cannot figure out that na_pos is
159
+ # empty list of ints on its own
160
+ na_pos = [0 for i in range(0)]
161
+ return output, na_pos
162
+
163
+
164
+ @numba.jit(nopython=True, nogil=True, parallel=False)
165
+ def grouped_mean(
166
+ values: np.ndarray,
167
+ result_dtype: np.dtype,
168
+ labels: npt.NDArray[np.intp],
169
+ ngroups: int,
170
+ min_periods: int,
171
+ ) -> tuple[np.ndarray, list[int]]:
172
+ output, nobs_arr, comp_arr, consecutive_counts, prev_vals = grouped_kahan_sum(
173
+ values, result_dtype, labels, ngroups
174
+ )
175
+
176
+ # Post-processing, replace sums that don't satisfy min_periods
177
+ for lab in range(ngroups):
178
+ nobs = nobs_arr[lab]
179
+ num_consecutive_same_value = consecutive_counts[lab]
180
+ prev_value = prev_vals[lab]
181
+ sum_x = output[lab]
182
+ if nobs >= min_periods:
183
+ if num_consecutive_same_value >= nobs:
184
+ result = prev_value * nobs
185
+ else:
186
+ result = sum_x
187
+ else:
188
+ result = np.nan
189
+ result /= nobs
190
+ output[lab] = result
191
+
192
+ # na_position is empty list since float64 can already hold nans
193
+ # Do list comprehension, since numba cannot figure out that na_pos is
194
+ # empty list of ints on its own
195
+ na_pos = [0 for i in range(0)]
196
+ return output, na_pos
env-llmeval/lib/python3.10/site-packages/pandas/core/_numba/kernels/min_max_.py ADDED
@@ -0,0 +1,125 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ Numba 1D min/max kernels that can be shared by
3
+ * Dataframe / Series
4
+ * groupby
5
+ * rolling / expanding
6
+
7
+ Mirrors pandas/_libs/window/aggregation.pyx
8
+ """
9
+ from __future__ import annotations
10
+
11
+ from typing import TYPE_CHECKING
12
+
13
+ import numba
14
+ import numpy as np
15
+
16
+ if TYPE_CHECKING:
17
+ from pandas._typing import npt
18
+
19
+
20
+ @numba.jit(nopython=True, nogil=True, parallel=False)
21
+ def sliding_min_max(
22
+ values: np.ndarray,
23
+ result_dtype: np.dtype,
24
+ start: np.ndarray,
25
+ end: np.ndarray,
26
+ min_periods: int,
27
+ is_max: bool,
28
+ ) -> tuple[np.ndarray, list[int]]:
29
+ N = len(start)
30
+ nobs = 0
31
+ output = np.empty(N, dtype=result_dtype)
32
+ na_pos = []
33
+ # Use deque once numba supports it
34
+ # https://github.com/numba/numba/issues/7417
35
+ Q: list = []
36
+ W: list = []
37
+ for i in range(N):
38
+ curr_win_size = end[i] - start[i]
39
+ if i == 0:
40
+ st = start[i]
41
+ else:
42
+ st = end[i - 1]
43
+
44
+ for k in range(st, end[i]):
45
+ ai = values[k]
46
+ if not np.isnan(ai):
47
+ nobs += 1
48
+ elif is_max:
49
+ ai = -np.inf
50
+ else:
51
+ ai = np.inf
52
+ # Discard previous entries if we find new min or max
53
+ if is_max:
54
+ while Q and ((ai >= values[Q[-1]]) or values[Q[-1]] != values[Q[-1]]):
55
+ Q.pop()
56
+ else:
57
+ while Q and ((ai <= values[Q[-1]]) or values[Q[-1]] != values[Q[-1]]):
58
+ Q.pop()
59
+ Q.append(k)
60
+ W.append(k)
61
+
62
+ # Discard entries outside and left of current window
63
+ while Q and Q[0] <= start[i] - 1:
64
+ Q.pop(0)
65
+ while W and W[0] <= start[i] - 1:
66
+ if not np.isnan(values[W[0]]):
67
+ nobs -= 1
68
+ W.pop(0)
69
+
70
+ # Save output based on index in input value array
71
+ if Q and curr_win_size > 0 and nobs >= min_periods:
72
+ output[i] = values[Q[0]]
73
+ else:
74
+ if values.dtype.kind != "i":
75
+ output[i] = np.nan
76
+ else:
77
+ na_pos.append(i)
78
+
79
+ return output, na_pos
80
+
81
+
82
+ @numba.jit(nopython=True, nogil=True, parallel=False)
83
+ def grouped_min_max(
84
+ values: np.ndarray,
85
+ result_dtype: np.dtype,
86
+ labels: npt.NDArray[np.intp],
87
+ ngroups: int,
88
+ min_periods: int,
89
+ is_max: bool,
90
+ ) -> tuple[np.ndarray, list[int]]:
91
+ N = len(labels)
92
+ nobs = np.zeros(ngroups, dtype=np.int64)
93
+ na_pos = []
94
+ output = np.empty(ngroups, dtype=result_dtype)
95
+
96
+ for i in range(N):
97
+ lab = labels[i]
98
+ val = values[i]
99
+ if lab < 0:
100
+ continue
101
+
102
+ if values.dtype.kind == "i" or not np.isnan(val):
103
+ nobs[lab] += 1
104
+ else:
105
+ # NaN value cannot be a min/max value
106
+ continue
107
+
108
+ if nobs[lab] == 1:
109
+ # First element in group, set output equal to this
110
+ output[lab] = val
111
+ continue
112
+
113
+ if is_max:
114
+ if val > output[lab]:
115
+ output[lab] = val
116
+ else:
117
+ if val < output[lab]:
118
+ output[lab] = val
119
+
120
+ # Set labels that don't satisfy min_periods as np.nan
121
+ for lab, count in enumerate(nobs):
122
+ if count < min_periods:
123
+ na_pos.append(lab)
124
+
125
+ return output, na_pos
env-llmeval/lib/python3.10/site-packages/pandas/core/_numba/kernels/shared.py ADDED
@@ -0,0 +1,29 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from __future__ import annotations
2
+
3
+ from typing import TYPE_CHECKING
4
+
5
+ import numba
6
+
7
+ if TYPE_CHECKING:
8
+ import numpy as np
9
+
10
+
11
+ @numba.jit(
12
+ # error: Any? not callable
13
+ numba.boolean(numba.int64[:]), # type: ignore[misc]
14
+ nopython=True,
15
+ nogil=True,
16
+ parallel=False,
17
+ )
18
+ def is_monotonic_increasing(bounds: np.ndarray) -> bool:
19
+ """Check if int64 values are monotonically increasing."""
20
+ n = len(bounds)
21
+ if n < 2:
22
+ return True
23
+ prev = bounds[0]
24
+ for i in range(1, n):
25
+ cur = bounds[i]
26
+ if cur < prev:
27
+ return False
28
+ prev = cur
29
+ return True
env-llmeval/lib/python3.10/site-packages/pandas/core/_numba/kernels/sum_.py ADDED
@@ -0,0 +1,244 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ Numba 1D sum kernels that can be shared by
3
+ * Dataframe / Series
4
+ * groupby
5
+ * rolling / expanding
6
+
7
+ Mirrors pandas/_libs/window/aggregation.pyx
8
+ """
9
+ from __future__ import annotations
10
+
11
+ from typing import (
12
+ TYPE_CHECKING,
13
+ Any,
14
+ )
15
+
16
+ import numba
17
+ from numba.extending import register_jitable
18
+ import numpy as np
19
+
20
+ if TYPE_CHECKING:
21
+ from pandas._typing import npt
22
+
23
+ from pandas.core._numba.kernels.shared import is_monotonic_increasing
24
+
25
+
26
+ @numba.jit(nopython=True, nogil=True, parallel=False)
27
+ def add_sum(
28
+ val: Any,
29
+ nobs: int,
30
+ sum_x: Any,
31
+ compensation: Any,
32
+ num_consecutive_same_value: int,
33
+ prev_value: Any,
34
+ ) -> tuple[int, Any, Any, int, Any]:
35
+ if not np.isnan(val):
36
+ nobs += 1
37
+ y = val - compensation
38
+ t = sum_x + y
39
+ compensation = t - sum_x - y
40
+ sum_x = t
41
+
42
+ if val == prev_value:
43
+ num_consecutive_same_value += 1
44
+ else:
45
+ num_consecutive_same_value = 1
46
+ prev_value = val
47
+
48
+ return nobs, sum_x, compensation, num_consecutive_same_value, prev_value
49
+
50
+
51
+ @numba.jit(nopython=True, nogil=True, parallel=False)
52
+ def remove_sum(
53
+ val: Any, nobs: int, sum_x: Any, compensation: Any
54
+ ) -> tuple[int, Any, Any]:
55
+ if not np.isnan(val):
56
+ nobs -= 1
57
+ y = -val - compensation
58
+ t = sum_x + y
59
+ compensation = t - sum_x - y
60
+ sum_x = t
61
+ return nobs, sum_x, compensation
62
+
63
+
64
+ @numba.jit(nopython=True, nogil=True, parallel=False)
65
+ def sliding_sum(
66
+ values: np.ndarray,
67
+ result_dtype: np.dtype,
68
+ start: np.ndarray,
69
+ end: np.ndarray,
70
+ min_periods: int,
71
+ ) -> tuple[np.ndarray, list[int]]:
72
+ dtype = values.dtype
73
+
74
+ na_val: object = np.nan
75
+ if dtype.kind == "i":
76
+ na_val = 0
77
+
78
+ N = len(start)
79
+ nobs = 0
80
+ sum_x = 0
81
+ compensation_add = 0
82
+ compensation_remove = 0
83
+ na_pos = []
84
+
85
+ is_monotonic_increasing_bounds = is_monotonic_increasing(
86
+ start
87
+ ) and is_monotonic_increasing(end)
88
+
89
+ output = np.empty(N, dtype=result_dtype)
90
+
91
+ for i in range(N):
92
+ s = start[i]
93
+ e = end[i]
94
+ if i == 0 or not is_monotonic_increasing_bounds:
95
+ prev_value = values[s]
96
+ num_consecutive_same_value = 0
97
+
98
+ for j in range(s, e):
99
+ val = values[j]
100
+ (
101
+ nobs,
102
+ sum_x,
103
+ compensation_add,
104
+ num_consecutive_same_value,
105
+ prev_value,
106
+ ) = add_sum(
107
+ val,
108
+ nobs,
109
+ sum_x,
110
+ compensation_add,
111
+ num_consecutive_same_value,
112
+ prev_value,
113
+ )
114
+ else:
115
+ for j in range(start[i - 1], s):
116
+ val = values[j]
117
+ nobs, sum_x, compensation_remove = remove_sum(
118
+ val, nobs, sum_x, compensation_remove
119
+ )
120
+
121
+ for j in range(end[i - 1], e):
122
+ val = values[j]
123
+ (
124
+ nobs,
125
+ sum_x,
126
+ compensation_add,
127
+ num_consecutive_same_value,
128
+ prev_value,
129
+ ) = add_sum(
130
+ val,
131
+ nobs,
132
+ sum_x,
133
+ compensation_add,
134
+ num_consecutive_same_value,
135
+ prev_value,
136
+ )
137
+
138
+ if nobs == 0 == min_periods:
139
+ result: object = 0
140
+ elif nobs >= min_periods:
141
+ if num_consecutive_same_value >= nobs:
142
+ result = prev_value * nobs
143
+ else:
144
+ result = sum_x
145
+ else:
146
+ result = na_val
147
+ if dtype.kind == "i":
148
+ na_pos.append(i)
149
+
150
+ output[i] = result
151
+
152
+ if not is_monotonic_increasing_bounds:
153
+ nobs = 0
154
+ sum_x = 0
155
+ compensation_remove = 0
156
+
157
+ return output, na_pos
158
+
159
+
160
+ # Mypy/pyright don't like the fact that the decorator is untyped
161
+ @register_jitable # type: ignore[misc]
162
+ def grouped_kahan_sum(
163
+ values: np.ndarray,
164
+ result_dtype: np.dtype,
165
+ labels: npt.NDArray[np.intp],
166
+ ngroups: int,
167
+ ) -> tuple[
168
+ np.ndarray, npt.NDArray[np.int64], np.ndarray, npt.NDArray[np.int64], np.ndarray
169
+ ]:
170
+ N = len(labels)
171
+
172
+ nobs_arr = np.zeros(ngroups, dtype=np.int64)
173
+ comp_arr = np.zeros(ngroups, dtype=values.dtype)
174
+ consecutive_counts = np.zeros(ngroups, dtype=np.int64)
175
+ prev_vals = np.zeros(ngroups, dtype=values.dtype)
176
+ output = np.zeros(ngroups, dtype=result_dtype)
177
+
178
+ for i in range(N):
179
+ lab = labels[i]
180
+ val = values[i]
181
+
182
+ if lab < 0:
183
+ continue
184
+
185
+ sum_x = output[lab]
186
+ nobs = nobs_arr[lab]
187
+ compensation_add = comp_arr[lab]
188
+ num_consecutive_same_value = consecutive_counts[lab]
189
+ prev_value = prev_vals[lab]
190
+
191
+ (
192
+ nobs,
193
+ sum_x,
194
+ compensation_add,
195
+ num_consecutive_same_value,
196
+ prev_value,
197
+ ) = add_sum(
198
+ val,
199
+ nobs,
200
+ sum_x,
201
+ compensation_add,
202
+ num_consecutive_same_value,
203
+ prev_value,
204
+ )
205
+
206
+ output[lab] = sum_x
207
+ consecutive_counts[lab] = num_consecutive_same_value
208
+ prev_vals[lab] = prev_value
209
+ comp_arr[lab] = compensation_add
210
+ nobs_arr[lab] = nobs
211
+ return output, nobs_arr, comp_arr, consecutive_counts, prev_vals
212
+
213
+
214
+ @numba.jit(nopython=True, nogil=True, parallel=False)
215
+ def grouped_sum(
216
+ values: np.ndarray,
217
+ result_dtype: np.dtype,
218
+ labels: npt.NDArray[np.intp],
219
+ ngroups: int,
220
+ min_periods: int,
221
+ ) -> tuple[np.ndarray, list[int]]:
222
+ na_pos = []
223
+
224
+ output, nobs_arr, comp_arr, consecutive_counts, prev_vals = grouped_kahan_sum(
225
+ values, result_dtype, labels, ngroups
226
+ )
227
+
228
+ # Post-processing, replace sums that don't satisfy min_periods
229
+ for lab in range(ngroups):
230
+ nobs = nobs_arr[lab]
231
+ num_consecutive_same_value = consecutive_counts[lab]
232
+ prev_value = prev_vals[lab]
233
+ sum_x = output[lab]
234
+ if nobs >= min_periods:
235
+ if num_consecutive_same_value >= nobs:
236
+ result = prev_value * nobs
237
+ else:
238
+ result = sum_x
239
+ else:
240
+ result = sum_x # Don't change val, will be replaced by nan later
241
+ na_pos.append(lab)
242
+ output[lab] = result
243
+
244
+ return output, na_pos
env-llmeval/lib/python3.10/site-packages/pandas/core/_numba/kernels/var_.py ADDED
@@ -0,0 +1,245 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ Numba 1D var kernels that can be shared by
3
+ * Dataframe / Series
4
+ * groupby
5
+ * rolling / expanding
6
+
7
+ Mirrors pandas/_libs/window/aggregation.pyx
8
+ """
9
+ from __future__ import annotations
10
+
11
+ from typing import TYPE_CHECKING
12
+
13
+ import numba
14
+ import numpy as np
15
+
16
+ if TYPE_CHECKING:
17
+ from pandas._typing import npt
18
+
19
+ from pandas.core._numba.kernels.shared import is_monotonic_increasing
20
+
21
+
22
+ @numba.jit(nopython=True, nogil=True, parallel=False)
23
+ def add_var(
24
+ val: float,
25
+ nobs: int,
26
+ mean_x: float,
27
+ ssqdm_x: float,
28
+ compensation: float,
29
+ num_consecutive_same_value: int,
30
+ prev_value: float,
31
+ ) -> tuple[int, float, float, float, int, float]:
32
+ if not np.isnan(val):
33
+ if val == prev_value:
34
+ num_consecutive_same_value += 1
35
+ else:
36
+ num_consecutive_same_value = 1
37
+ prev_value = val
38
+
39
+ nobs += 1
40
+ prev_mean = mean_x - compensation
41
+ y = val - compensation
42
+ t = y - mean_x
43
+ compensation = t + mean_x - y
44
+ delta = t
45
+ if nobs:
46
+ mean_x += delta / nobs
47
+ else:
48
+ mean_x = 0
49
+ ssqdm_x += (val - prev_mean) * (val - mean_x)
50
+ return nobs, mean_x, ssqdm_x, compensation, num_consecutive_same_value, prev_value
51
+
52
+
53
+ @numba.jit(nopython=True, nogil=True, parallel=False)
54
+ def remove_var(
55
+ val: float, nobs: int, mean_x: float, ssqdm_x: float, compensation: float
56
+ ) -> tuple[int, float, float, float]:
57
+ if not np.isnan(val):
58
+ nobs -= 1
59
+ if nobs:
60
+ prev_mean = mean_x - compensation
61
+ y = val - compensation
62
+ t = y - mean_x
63
+ compensation = t + mean_x - y
64
+ delta = t
65
+ mean_x -= delta / nobs
66
+ ssqdm_x -= (val - prev_mean) * (val - mean_x)
67
+ else:
68
+ mean_x = 0
69
+ ssqdm_x = 0
70
+ return nobs, mean_x, ssqdm_x, compensation
71
+
72
+
73
+ @numba.jit(nopython=True, nogil=True, parallel=False)
74
+ def sliding_var(
75
+ values: np.ndarray,
76
+ result_dtype: np.dtype,
77
+ start: np.ndarray,
78
+ end: np.ndarray,
79
+ min_periods: int,
80
+ ddof: int = 1,
81
+ ) -> tuple[np.ndarray, list[int]]:
82
+ N = len(start)
83
+ nobs = 0
84
+ mean_x = 0.0
85
+ ssqdm_x = 0.0
86
+ compensation_add = 0.0
87
+ compensation_remove = 0.0
88
+
89
+ min_periods = max(min_periods, 1)
90
+ is_monotonic_increasing_bounds = is_monotonic_increasing(
91
+ start
92
+ ) and is_monotonic_increasing(end)
93
+
94
+ output = np.empty(N, dtype=result_dtype)
95
+
96
+ for i in range(N):
97
+ s = start[i]
98
+ e = end[i]
99
+ if i == 0 or not is_monotonic_increasing_bounds:
100
+ prev_value = values[s]
101
+ num_consecutive_same_value = 0
102
+
103
+ for j in range(s, e):
104
+ val = values[j]
105
+ (
106
+ nobs,
107
+ mean_x,
108
+ ssqdm_x,
109
+ compensation_add,
110
+ num_consecutive_same_value,
111
+ prev_value,
112
+ ) = add_var(
113
+ val,
114
+ nobs,
115
+ mean_x,
116
+ ssqdm_x,
117
+ compensation_add,
118
+ num_consecutive_same_value,
119
+ prev_value,
120
+ )
121
+ else:
122
+ for j in range(start[i - 1], s):
123
+ val = values[j]
124
+ nobs, mean_x, ssqdm_x, compensation_remove = remove_var(
125
+ val, nobs, mean_x, ssqdm_x, compensation_remove
126
+ )
127
+
128
+ for j in range(end[i - 1], e):
129
+ val = values[j]
130
+ (
131
+ nobs,
132
+ mean_x,
133
+ ssqdm_x,
134
+ compensation_add,
135
+ num_consecutive_same_value,
136
+ prev_value,
137
+ ) = add_var(
138
+ val,
139
+ nobs,
140
+ mean_x,
141
+ ssqdm_x,
142
+ compensation_add,
143
+ num_consecutive_same_value,
144
+ prev_value,
145
+ )
146
+
147
+ if nobs >= min_periods and nobs > ddof:
148
+ if nobs == 1 or num_consecutive_same_value >= nobs:
149
+ result = 0.0
150
+ else:
151
+ result = ssqdm_x / (nobs - ddof)
152
+ else:
153
+ result = np.nan
154
+
155
+ output[i] = result
156
+
157
+ if not is_monotonic_increasing_bounds:
158
+ nobs = 0
159
+ mean_x = 0.0
160
+ ssqdm_x = 0.0
161
+ compensation_remove = 0.0
162
+
163
+ # na_position is empty list since float64 can already hold nans
164
+ # Do list comprehension, since numba cannot figure out that na_pos is
165
+ # empty list of ints on its own
166
+ na_pos = [0 for i in range(0)]
167
+ return output, na_pos
168
+
169
+
170
+ @numba.jit(nopython=True, nogil=True, parallel=False)
171
+ def grouped_var(
172
+ values: np.ndarray,
173
+ result_dtype: np.dtype,
174
+ labels: npt.NDArray[np.intp],
175
+ ngroups: int,
176
+ min_periods: int,
177
+ ddof: int = 1,
178
+ ) -> tuple[np.ndarray, list[int]]:
179
+ N = len(labels)
180
+
181
+ nobs_arr = np.zeros(ngroups, dtype=np.int64)
182
+ comp_arr = np.zeros(ngroups, dtype=values.dtype)
183
+ consecutive_counts = np.zeros(ngroups, dtype=np.int64)
184
+ prev_vals = np.zeros(ngroups, dtype=values.dtype)
185
+ output = np.zeros(ngroups, dtype=result_dtype)
186
+ means = np.zeros(ngroups, dtype=result_dtype)
187
+
188
+ for i in range(N):
189
+ lab = labels[i]
190
+ val = values[i]
191
+
192
+ if lab < 0:
193
+ continue
194
+
195
+ mean_x = means[lab]
196
+ ssqdm_x = output[lab]
197
+ nobs = nobs_arr[lab]
198
+ compensation_add = comp_arr[lab]
199
+ num_consecutive_same_value = consecutive_counts[lab]
200
+ prev_value = prev_vals[lab]
201
+
202
+ (
203
+ nobs,
204
+ mean_x,
205
+ ssqdm_x,
206
+ compensation_add,
207
+ num_consecutive_same_value,
208
+ prev_value,
209
+ ) = add_var(
210
+ val,
211
+ nobs,
212
+ mean_x,
213
+ ssqdm_x,
214
+ compensation_add,
215
+ num_consecutive_same_value,
216
+ prev_value,
217
+ )
218
+
219
+ output[lab] = ssqdm_x
220
+ means[lab] = mean_x
221
+ consecutive_counts[lab] = num_consecutive_same_value
222
+ prev_vals[lab] = prev_value
223
+ comp_arr[lab] = compensation_add
224
+ nobs_arr[lab] = nobs
225
+
226
+ # Post-processing, replace vars that don't satisfy min_periods
227
+ for lab in range(ngroups):
228
+ nobs = nobs_arr[lab]
229
+ num_consecutive_same_value = consecutive_counts[lab]
230
+ ssqdm_x = output[lab]
231
+ if nobs >= min_periods and nobs > ddof:
232
+ if nobs == 1 or num_consecutive_same_value >= nobs:
233
+ result = 0.0
234
+ else:
235
+ result = ssqdm_x / (nobs - ddof)
236
+ else:
237
+ result = np.nan
238
+ output[lab] = result
239
+
240
+ # Second pass to get the std.dev
241
+ # na_position is empty list since float64 can already hold nans
242
+ # Do list comprehension, since numba cannot figure out that na_pos is
243
+ # empty list of ints on its own
244
+ na_pos = [0 for i in range(0)]
245
+ return output, na_pos
env-llmeval/lib/python3.10/site-packages/pandas/core/arrays/__pycache__/_arrow_string_mixins.cpython-310.pyc ADDED
Binary file (3.18 kB). View file
 
env-llmeval/lib/python3.10/site-packages/pandas/core/arrays/__pycache__/_mixins.cpython-310.pyc ADDED
Binary file (14.5 kB). View file
 
env-llmeval/lib/python3.10/site-packages/pandas/core/arrays/__pycache__/base.cpython-310.pyc ADDED
Binary file (74.7 kB). View file
 
env-llmeval/lib/python3.10/site-packages/pandas/core/arrays/__pycache__/boolean.cpython-310.pyc ADDED
Binary file (10.5 kB). View file
 
env-llmeval/lib/python3.10/site-packages/pandas/core/arrays/__pycache__/datetimelike.cpython-310.pyc ADDED
Binary file (63.4 kB). View file
 
env-llmeval/lib/python3.10/site-packages/pandas/core/arrays/__pycache__/datetimes.cpython-310.pyc ADDED
Binary file (70.6 kB). View file
 
env-llmeval/lib/python3.10/site-packages/pandas/core/arrays/__pycache__/interval.cpython-310.pyc ADDED
Binary file (47.6 kB). View file
 
env-llmeval/lib/python3.10/site-packages/pandas/core/arrays/__pycache__/string_.cpython-310.pyc ADDED
Binary file (17 kB). View file
 
env-llmeval/lib/python3.10/site-packages/pandas/core/arrays/__pycache__/timedeltas.cpython-310.pyc ADDED
Binary file (30.3 kB). View file
 
env-llmeval/lib/python3.10/site-packages/pandas/core/arrays/sparse/array.py ADDED
@@ -0,0 +1,1929 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ SparseArray data structure
3
+ """
4
+ from __future__ import annotations
5
+
6
+ from collections import abc
7
+ import numbers
8
+ import operator
9
+ from typing import (
10
+ TYPE_CHECKING,
11
+ Any,
12
+ Callable,
13
+ Literal,
14
+ cast,
15
+ overload,
16
+ )
17
+ import warnings
18
+
19
+ import numpy as np
20
+
21
+ from pandas._libs import lib
22
+ import pandas._libs.sparse as splib
23
+ from pandas._libs.sparse import (
24
+ BlockIndex,
25
+ IntIndex,
26
+ SparseIndex,
27
+ )
28
+ from pandas._libs.tslibs import NaT
29
+ from pandas.compat.numpy import function as nv
30
+ from pandas.errors import PerformanceWarning
31
+ from pandas.util._decorators import doc
32
+ from pandas.util._exceptions import find_stack_level
33
+ from pandas.util._validators import (
34
+ validate_bool_kwarg,
35
+ validate_insert_loc,
36
+ )
37
+
38
+ from pandas.core.dtypes.astype import astype_array
39
+ from pandas.core.dtypes.cast import (
40
+ construct_1d_arraylike_from_scalar,
41
+ find_common_type,
42
+ maybe_box_datetimelike,
43
+ )
44
+ from pandas.core.dtypes.common import (
45
+ is_bool_dtype,
46
+ is_integer,
47
+ is_list_like,
48
+ is_object_dtype,
49
+ is_scalar,
50
+ is_string_dtype,
51
+ pandas_dtype,
52
+ )
53
+ from pandas.core.dtypes.dtypes import (
54
+ DatetimeTZDtype,
55
+ SparseDtype,
56
+ )
57
+ from pandas.core.dtypes.generic import (
58
+ ABCIndex,
59
+ ABCSeries,
60
+ )
61
+ from pandas.core.dtypes.missing import (
62
+ isna,
63
+ na_value_for_dtype,
64
+ notna,
65
+ )
66
+
67
+ from pandas.core import arraylike
68
+ import pandas.core.algorithms as algos
69
+ from pandas.core.arraylike import OpsMixin
70
+ from pandas.core.arrays import ExtensionArray
71
+ from pandas.core.base import PandasObject
72
+ import pandas.core.common as com
73
+ from pandas.core.construction import (
74
+ ensure_wrapped_if_datetimelike,
75
+ extract_array,
76
+ sanitize_array,
77
+ )
78
+ from pandas.core.indexers import (
79
+ check_array_indexer,
80
+ unpack_tuple_and_ellipses,
81
+ )
82
+ from pandas.core.nanops import check_below_min_count
83
+
84
+ from pandas.io.formats import printing
85
+
86
+ # See https://github.com/python/typing/issues/684
87
+ if TYPE_CHECKING:
88
+ from collections.abc import Sequence
89
+ from enum import Enum
90
+
91
+ class ellipsis(Enum):
92
+ Ellipsis = "..."
93
+
94
+ Ellipsis = ellipsis.Ellipsis
95
+
96
+ from scipy.sparse import spmatrix
97
+
98
+ from pandas._typing import (
99
+ FillnaOptions,
100
+ NumpySorter,
101
+ )
102
+
103
+ SparseIndexKind = Literal["integer", "block"]
104
+
105
+ from pandas._typing import (
106
+ ArrayLike,
107
+ AstypeArg,
108
+ Axis,
109
+ AxisInt,
110
+ Dtype,
111
+ NpDtype,
112
+ PositionalIndexer,
113
+ Scalar,
114
+ ScalarIndexer,
115
+ Self,
116
+ SequenceIndexer,
117
+ npt,
118
+ )
119
+
120
+ from pandas import Series
121
+
122
+ else:
123
+ ellipsis = type(Ellipsis)
124
+
125
+
126
+ # ----------------------------------------------------------------------------
127
+ # Array
128
+
129
+ _sparray_doc_kwargs = {"klass": "SparseArray"}
130
+
131
+
132
+ def _get_fill(arr: SparseArray) -> np.ndarray:
133
+ """
134
+ Create a 0-dim ndarray containing the fill value
135
+
136
+ Parameters
137
+ ----------
138
+ arr : SparseArray
139
+
140
+ Returns
141
+ -------
142
+ fill_value : ndarray
143
+ 0-dim ndarray with just the fill value.
144
+
145
+ Notes
146
+ -----
147
+ coerce fill_value to arr dtype if possible
148
+ int64 SparseArray can have NaN as fill_value if there is no missing
149
+ """
150
+ try:
151
+ return np.asarray(arr.fill_value, dtype=arr.dtype.subtype)
152
+ except ValueError:
153
+ return np.asarray(arr.fill_value)
154
+
155
+
156
+ def _sparse_array_op(
157
+ left: SparseArray, right: SparseArray, op: Callable, name: str
158
+ ) -> SparseArray:
159
+ """
160
+ Perform a binary operation between two arrays.
161
+
162
+ Parameters
163
+ ----------
164
+ left : Union[SparseArray, ndarray]
165
+ right : Union[SparseArray, ndarray]
166
+ op : Callable
167
+ The binary operation to perform
168
+ name str
169
+ Name of the callable.
170
+
171
+ Returns
172
+ -------
173
+ SparseArray
174
+ """
175
+ if name.startswith("__"):
176
+ # For lookups in _libs.sparse we need non-dunder op name
177
+ name = name[2:-2]
178
+
179
+ # dtype used to find corresponding sparse method
180
+ ltype = left.dtype.subtype
181
+ rtype = right.dtype.subtype
182
+
183
+ if ltype != rtype:
184
+ subtype = find_common_type([ltype, rtype])
185
+ ltype = SparseDtype(subtype, left.fill_value)
186
+ rtype = SparseDtype(subtype, right.fill_value)
187
+
188
+ left = left.astype(ltype, copy=False)
189
+ right = right.astype(rtype, copy=False)
190
+ dtype = ltype.subtype
191
+ else:
192
+ dtype = ltype
193
+
194
+ # dtype the result must have
195
+ result_dtype = None
196
+
197
+ if left.sp_index.ngaps == 0 or right.sp_index.ngaps == 0:
198
+ with np.errstate(all="ignore"):
199
+ result = op(left.to_dense(), right.to_dense())
200
+ fill = op(_get_fill(left), _get_fill(right))
201
+
202
+ if left.sp_index.ngaps == 0:
203
+ index = left.sp_index
204
+ else:
205
+ index = right.sp_index
206
+ elif left.sp_index.equals(right.sp_index):
207
+ with np.errstate(all="ignore"):
208
+ result = op(left.sp_values, right.sp_values)
209
+ fill = op(_get_fill(left), _get_fill(right))
210
+ index = left.sp_index
211
+ else:
212
+ if name[0] == "r":
213
+ left, right = right, left
214
+ name = name[1:]
215
+
216
+ if name in ("and", "or", "xor") and dtype == "bool":
217
+ opname = f"sparse_{name}_uint8"
218
+ # to make template simple, cast here
219
+ left_sp_values = left.sp_values.view(np.uint8)
220
+ right_sp_values = right.sp_values.view(np.uint8)
221
+ result_dtype = bool
222
+ else:
223
+ opname = f"sparse_{name}_{dtype}"
224
+ left_sp_values = left.sp_values
225
+ right_sp_values = right.sp_values
226
+
227
+ if (
228
+ name in ["floordiv", "mod"]
229
+ and (right == 0).any()
230
+ and left.dtype.kind in "iu"
231
+ ):
232
+ # Match the non-Sparse Series behavior
233
+ opname = f"sparse_{name}_float64"
234
+ left_sp_values = left_sp_values.astype("float64")
235
+ right_sp_values = right_sp_values.astype("float64")
236
+
237
+ sparse_op = getattr(splib, opname)
238
+
239
+ with np.errstate(all="ignore"):
240
+ result, index, fill = sparse_op(
241
+ left_sp_values,
242
+ left.sp_index,
243
+ left.fill_value,
244
+ right_sp_values,
245
+ right.sp_index,
246
+ right.fill_value,
247
+ )
248
+
249
+ if name == "divmod":
250
+ # result is a 2-tuple
251
+ # error: Incompatible return value type (got "Tuple[SparseArray,
252
+ # SparseArray]", expected "SparseArray")
253
+ return ( # type: ignore[return-value]
254
+ _wrap_result(name, result[0], index, fill[0], dtype=result_dtype),
255
+ _wrap_result(name, result[1], index, fill[1], dtype=result_dtype),
256
+ )
257
+
258
+ if result_dtype is None:
259
+ result_dtype = result.dtype
260
+
261
+ return _wrap_result(name, result, index, fill, dtype=result_dtype)
262
+
263
+
264
+ def _wrap_result(
265
+ name: str, data, sparse_index, fill_value, dtype: Dtype | None = None
266
+ ) -> SparseArray:
267
+ """
268
+ wrap op result to have correct dtype
269
+ """
270
+ if name.startswith("__"):
271
+ # e.g. __eq__ --> eq
272
+ name = name[2:-2]
273
+
274
+ if name in ("eq", "ne", "lt", "gt", "le", "ge"):
275
+ dtype = bool
276
+
277
+ fill_value = lib.item_from_zerodim(fill_value)
278
+
279
+ if is_bool_dtype(dtype):
280
+ # fill_value may be np.bool_
281
+ fill_value = bool(fill_value)
282
+ return SparseArray(
283
+ data, sparse_index=sparse_index, fill_value=fill_value, dtype=dtype
284
+ )
285
+
286
+
287
+ class SparseArray(OpsMixin, PandasObject, ExtensionArray):
288
+ """
289
+ An ExtensionArray for storing sparse data.
290
+
291
+ Parameters
292
+ ----------
293
+ data : array-like or scalar
294
+ A dense array of values to store in the SparseArray. This may contain
295
+ `fill_value`.
296
+ sparse_index : SparseIndex, optional
297
+ fill_value : scalar, optional
298
+ Elements in data that are ``fill_value`` are not stored in the
299
+ SparseArray. For memory savings, this should be the most common value
300
+ in `data`. By default, `fill_value` depends on the dtype of `data`:
301
+
302
+ =========== ==========
303
+ data.dtype na_value
304
+ =========== ==========
305
+ float ``np.nan``
306
+ int ``0``
307
+ bool False
308
+ datetime64 ``pd.NaT``
309
+ timedelta64 ``pd.NaT``
310
+ =========== ==========
311
+
312
+ The fill value is potentially specified in three ways. In order of
313
+ precedence, these are
314
+
315
+ 1. The `fill_value` argument
316
+ 2. ``dtype.fill_value`` if `fill_value` is None and `dtype` is
317
+ a ``SparseDtype``
318
+ 3. ``data.dtype.fill_value`` if `fill_value` is None and `dtype`
319
+ is not a ``SparseDtype`` and `data` is a ``SparseArray``.
320
+
321
+ kind : str
322
+ Can be 'integer' or 'block', default is 'integer'.
323
+ The type of storage for sparse locations.
324
+
325
+ * 'block': Stores a `block` and `block_length` for each
326
+ contiguous *span* of sparse values. This is best when
327
+ sparse data tends to be clumped together, with large
328
+ regions of ``fill-value`` values between sparse values.
329
+ * 'integer': uses an integer to store the location of
330
+ each sparse value.
331
+
332
+ dtype : np.dtype or SparseDtype, optional
333
+ The dtype to use for the SparseArray. For numpy dtypes, this
334
+ determines the dtype of ``self.sp_values``. For SparseDtype,
335
+ this determines ``self.sp_values`` and ``self.fill_value``.
336
+ copy : bool, default False
337
+ Whether to explicitly copy the incoming `data` array.
338
+
339
+ Attributes
340
+ ----------
341
+ None
342
+
343
+ Methods
344
+ -------
345
+ None
346
+
347
+ Examples
348
+ --------
349
+ >>> from pandas.arrays import SparseArray
350
+ >>> arr = SparseArray([0, 0, 1, 2])
351
+ >>> arr
352
+ [0, 0, 1, 2]
353
+ Fill: 0
354
+ IntIndex
355
+ Indices: array([2, 3], dtype=int32)
356
+ """
357
+
358
+ _subtyp = "sparse_array" # register ABCSparseArray
359
+ _hidden_attrs = PandasObject._hidden_attrs | frozenset([])
360
+ _sparse_index: SparseIndex
361
+ _sparse_values: np.ndarray
362
+ _dtype: SparseDtype
363
+
364
+ def __init__(
365
+ self,
366
+ data,
367
+ sparse_index=None,
368
+ fill_value=None,
369
+ kind: SparseIndexKind = "integer",
370
+ dtype: Dtype | None = None,
371
+ copy: bool = False,
372
+ ) -> None:
373
+ if fill_value is None and isinstance(dtype, SparseDtype):
374
+ fill_value = dtype.fill_value
375
+
376
+ if isinstance(data, type(self)):
377
+ # disable normal inference on dtype, sparse_index, & fill_value
378
+ if sparse_index is None:
379
+ sparse_index = data.sp_index
380
+ if fill_value is None:
381
+ fill_value = data.fill_value
382
+ if dtype is None:
383
+ dtype = data.dtype
384
+ # TODO: make kind=None, and use data.kind?
385
+ data = data.sp_values
386
+
387
+ # Handle use-provided dtype
388
+ if isinstance(dtype, str):
389
+ # Two options: dtype='int', regular numpy dtype
390
+ # or dtype='Sparse[int]', a sparse dtype
391
+ try:
392
+ dtype = SparseDtype.construct_from_string(dtype)
393
+ except TypeError:
394
+ dtype = pandas_dtype(dtype)
395
+
396
+ if isinstance(dtype, SparseDtype):
397
+ if fill_value is None:
398
+ fill_value = dtype.fill_value
399
+ dtype = dtype.subtype
400
+
401
+ if is_scalar(data):
402
+ warnings.warn(
403
+ f"Constructing {type(self).__name__} with scalar data is deprecated "
404
+ "and will raise in a future version. Pass a sequence instead.",
405
+ FutureWarning,
406
+ stacklevel=find_stack_level(),
407
+ )
408
+ if sparse_index is None:
409
+ npoints = 1
410
+ else:
411
+ npoints = sparse_index.length
412
+
413
+ data = construct_1d_arraylike_from_scalar(data, npoints, dtype=None)
414
+ dtype = data.dtype
415
+
416
+ if dtype is not None:
417
+ dtype = pandas_dtype(dtype)
418
+
419
+ # TODO: disentangle the fill_value dtype inference from
420
+ # dtype inference
421
+ if data is None:
422
+ # TODO: What should the empty dtype be? Object or float?
423
+
424
+ # error: Argument "dtype" to "array" has incompatible type
425
+ # "Union[ExtensionDtype, dtype[Any], None]"; expected "Union[dtype[Any],
426
+ # None, type, _SupportsDType, str, Union[Tuple[Any, int], Tuple[Any,
427
+ # Union[int, Sequence[int]]], List[Any], _DTypeDict, Tuple[Any, Any]]]"
428
+ data = np.array([], dtype=dtype) # type: ignore[arg-type]
429
+
430
+ try:
431
+ data = sanitize_array(data, index=None)
432
+ except ValueError:
433
+ # NumPy may raise a ValueError on data like [1, []]
434
+ # we retry with object dtype here.
435
+ if dtype is None:
436
+ dtype = np.dtype(object)
437
+ data = np.atleast_1d(np.asarray(data, dtype=dtype))
438
+ else:
439
+ raise
440
+
441
+ if copy:
442
+ # TODO: avoid double copy when dtype forces cast.
443
+ data = data.copy()
444
+
445
+ if fill_value is None:
446
+ fill_value_dtype = data.dtype if dtype is None else dtype
447
+ if fill_value_dtype is None:
448
+ fill_value = np.nan
449
+ else:
450
+ fill_value = na_value_for_dtype(fill_value_dtype)
451
+
452
+ if isinstance(data, type(self)) and sparse_index is None:
453
+ sparse_index = data._sparse_index
454
+ # error: Argument "dtype" to "asarray" has incompatible type
455
+ # "Union[ExtensionDtype, dtype[Any], None]"; expected "None"
456
+ sparse_values = np.asarray(
457
+ data.sp_values, dtype=dtype # type: ignore[arg-type]
458
+ )
459
+ elif sparse_index is None:
460
+ data = extract_array(data, extract_numpy=True)
461
+ if not isinstance(data, np.ndarray):
462
+ # EA
463
+ if isinstance(data.dtype, DatetimeTZDtype):
464
+ warnings.warn(
465
+ f"Creating SparseArray from {data.dtype} data "
466
+ "loses timezone information. Cast to object before "
467
+ "sparse to retain timezone information.",
468
+ UserWarning,
469
+ stacklevel=find_stack_level(),
470
+ )
471
+ data = np.asarray(data, dtype="datetime64[ns]")
472
+ if fill_value is NaT:
473
+ fill_value = np.datetime64("NaT", "ns")
474
+ data = np.asarray(data)
475
+ sparse_values, sparse_index, fill_value = _make_sparse(
476
+ # error: Argument "dtype" to "_make_sparse" has incompatible type
477
+ # "Union[ExtensionDtype, dtype[Any], None]"; expected
478
+ # "Optional[dtype[Any]]"
479
+ data,
480
+ kind=kind,
481
+ fill_value=fill_value,
482
+ dtype=dtype, # type: ignore[arg-type]
483
+ )
484
+ else:
485
+ # error: Argument "dtype" to "asarray" has incompatible type
486
+ # "Union[ExtensionDtype, dtype[Any], None]"; expected "None"
487
+ sparse_values = np.asarray(data, dtype=dtype) # type: ignore[arg-type]
488
+ if len(sparse_values) != sparse_index.npoints:
489
+ raise AssertionError(
490
+ f"Non array-like type {type(sparse_values)} must "
491
+ "have the same length as the index"
492
+ )
493
+ self._sparse_index = sparse_index
494
+ self._sparse_values = sparse_values
495
+ self._dtype = SparseDtype(sparse_values.dtype, fill_value)
496
+
497
+ @classmethod
498
+ def _simple_new(
499
+ cls,
500
+ sparse_array: np.ndarray,
501
+ sparse_index: SparseIndex,
502
+ dtype: SparseDtype,
503
+ ) -> Self:
504
+ new = object.__new__(cls)
505
+ new._sparse_index = sparse_index
506
+ new._sparse_values = sparse_array
507
+ new._dtype = dtype
508
+ return new
509
+
510
+ @classmethod
511
+ def from_spmatrix(cls, data: spmatrix) -> Self:
512
+ """
513
+ Create a SparseArray from a scipy.sparse matrix.
514
+
515
+ Parameters
516
+ ----------
517
+ data : scipy.sparse.sp_matrix
518
+ This should be a SciPy sparse matrix where the size
519
+ of the second dimension is 1. In other words, a
520
+ sparse matrix with a single column.
521
+
522
+ Returns
523
+ -------
524
+ SparseArray
525
+
526
+ Examples
527
+ --------
528
+ >>> import scipy.sparse
529
+ >>> mat = scipy.sparse.coo_matrix((4, 1))
530
+ >>> pd.arrays.SparseArray.from_spmatrix(mat)
531
+ [0.0, 0.0, 0.0, 0.0]
532
+ Fill: 0.0
533
+ IntIndex
534
+ Indices: array([], dtype=int32)
535
+ """
536
+ length, ncol = data.shape
537
+
538
+ if ncol != 1:
539
+ raise ValueError(f"'data' must have a single column, not '{ncol}'")
540
+
541
+ # our sparse index classes require that the positions be strictly
542
+ # increasing. So we need to sort loc, and arr accordingly.
543
+ data = data.tocsc()
544
+ data.sort_indices()
545
+ arr = data.data
546
+ idx = data.indices
547
+
548
+ zero = np.array(0, dtype=arr.dtype).item()
549
+ dtype = SparseDtype(arr.dtype, zero)
550
+ index = IntIndex(length, idx)
551
+
552
+ return cls._simple_new(arr, index, dtype)
553
+
554
+ def __array__(
555
+ self, dtype: NpDtype | None = None, copy: bool | None = None
556
+ ) -> np.ndarray:
557
+ fill_value = self.fill_value
558
+
559
+ if self.sp_index.ngaps == 0:
560
+ # Compat for na dtype and int values.
561
+ return self.sp_values
562
+ if dtype is None:
563
+ # Can NumPy represent this type?
564
+ # If not, `np.result_type` will raise. We catch that
565
+ # and return object.
566
+ if self.sp_values.dtype.kind == "M":
567
+ # However, we *do* special-case the common case of
568
+ # a datetime64 with pandas NaT.
569
+ if fill_value is NaT:
570
+ # Can't put pd.NaT in a datetime64[ns]
571
+ fill_value = np.datetime64("NaT")
572
+ try:
573
+ dtype = np.result_type(self.sp_values.dtype, type(fill_value))
574
+ except TypeError:
575
+ dtype = object
576
+
577
+ out = np.full(self.shape, fill_value, dtype=dtype)
578
+ out[self.sp_index.indices] = self.sp_values
579
+ return out
580
+
581
+ def __setitem__(self, key, value) -> None:
582
+ # I suppose we could allow setting of non-fill_value elements.
583
+ # TODO(SparseArray.__setitem__): remove special cases in
584
+ # ExtensionBlock.where
585
+ msg = "SparseArray does not support item assignment via setitem"
586
+ raise TypeError(msg)
587
+
588
+ @classmethod
589
+ def _from_sequence(cls, scalars, *, dtype: Dtype | None = None, copy: bool = False):
590
+ return cls(scalars, dtype=dtype)
591
+
592
+ @classmethod
593
+ def _from_factorized(cls, values, original):
594
+ return cls(values, dtype=original.dtype)
595
+
596
+ # ------------------------------------------------------------------------
597
+ # Data
598
+ # ------------------------------------------------------------------------
599
+ @property
600
+ def sp_index(self) -> SparseIndex:
601
+ """
602
+ The SparseIndex containing the location of non- ``fill_value`` points.
603
+ """
604
+ return self._sparse_index
605
+
606
+ @property
607
+ def sp_values(self) -> np.ndarray:
608
+ """
609
+ An ndarray containing the non- ``fill_value`` values.
610
+
611
+ Examples
612
+ --------
613
+ >>> from pandas.arrays import SparseArray
614
+ >>> s = SparseArray([0, 0, 1, 0, 2], fill_value=0)
615
+ >>> s.sp_values
616
+ array([1, 2])
617
+ """
618
+ return self._sparse_values
619
+
620
+ @property
621
+ def dtype(self) -> SparseDtype:
622
+ return self._dtype
623
+
624
+ @property
625
+ def fill_value(self):
626
+ """
627
+ Elements in `data` that are `fill_value` are not stored.
628
+
629
+ For memory savings, this should be the most common value in the array.
630
+
631
+ Examples
632
+ --------
633
+ >>> ser = pd.Series([0, 0, 2, 2, 2], dtype="Sparse[int]")
634
+ >>> ser.sparse.fill_value
635
+ 0
636
+ >>> spa_dtype = pd.SparseDtype(dtype=np.int32, fill_value=2)
637
+ >>> ser = pd.Series([0, 0, 2, 2, 2], dtype=spa_dtype)
638
+ >>> ser.sparse.fill_value
639
+ 2
640
+ """
641
+ return self.dtype.fill_value
642
+
643
+ @fill_value.setter
644
+ def fill_value(self, value) -> None:
645
+ self._dtype = SparseDtype(self.dtype.subtype, value)
646
+
647
+ @property
648
+ def kind(self) -> SparseIndexKind:
649
+ """
650
+ The kind of sparse index for this array. One of {'integer', 'block'}.
651
+ """
652
+ if isinstance(self.sp_index, IntIndex):
653
+ return "integer"
654
+ else:
655
+ return "block"
656
+
657
+ @property
658
+ def _valid_sp_values(self) -> np.ndarray:
659
+ sp_vals = self.sp_values
660
+ mask = notna(sp_vals)
661
+ return sp_vals[mask]
662
+
663
+ def __len__(self) -> int:
664
+ return self.sp_index.length
665
+
666
+ @property
667
+ def _null_fill_value(self) -> bool:
668
+ return self._dtype._is_na_fill_value
669
+
670
+ def _fill_value_matches(self, fill_value) -> bool:
671
+ if self._null_fill_value:
672
+ return isna(fill_value)
673
+ else:
674
+ return self.fill_value == fill_value
675
+
676
+ @property
677
+ def nbytes(self) -> int:
678
+ return self.sp_values.nbytes + self.sp_index.nbytes
679
+
680
+ @property
681
+ def density(self) -> float:
682
+ """
683
+ The percent of non- ``fill_value`` points, as decimal.
684
+
685
+ Examples
686
+ --------
687
+ >>> from pandas.arrays import SparseArray
688
+ >>> s = SparseArray([0, 0, 1, 1, 1], fill_value=0)
689
+ >>> s.density
690
+ 0.6
691
+ """
692
+ return self.sp_index.npoints / self.sp_index.length
693
+
694
+ @property
695
+ def npoints(self) -> int:
696
+ """
697
+ The number of non- ``fill_value`` points.
698
+
699
+ Examples
700
+ --------
701
+ >>> from pandas.arrays import SparseArray
702
+ >>> s = SparseArray([0, 0, 1, 1, 1], fill_value=0)
703
+ >>> s.npoints
704
+ 3
705
+ """
706
+ return self.sp_index.npoints
707
+
708
+ # error: Return type "SparseArray" of "isna" incompatible with return type
709
+ # "ndarray[Any, Any] | ExtensionArraySupportsAnyAll" in supertype "ExtensionArray"
710
+ def isna(self) -> Self: # type: ignore[override]
711
+ # If null fill value, we want SparseDtype[bool, true]
712
+ # to preserve the same memory usage.
713
+ dtype = SparseDtype(bool, self._null_fill_value)
714
+ if self._null_fill_value:
715
+ return type(self)._simple_new(isna(self.sp_values), self.sp_index, dtype)
716
+ mask = np.full(len(self), False, dtype=np.bool_)
717
+ mask[self.sp_index.indices] = isna(self.sp_values)
718
+ return type(self)(mask, fill_value=False, dtype=dtype)
719
+
720
+ def _pad_or_backfill( # pylint: disable=useless-parent-delegation
721
+ self,
722
+ *,
723
+ method: FillnaOptions,
724
+ limit: int | None = None,
725
+ limit_area: Literal["inside", "outside"] | None = None,
726
+ copy: bool = True,
727
+ ) -> Self:
728
+ # TODO(3.0): We can remove this method once deprecation for fillna method
729
+ # keyword is enforced.
730
+ return super()._pad_or_backfill(
731
+ method=method, limit=limit, limit_area=limit_area, copy=copy
732
+ )
733
+
734
+ def fillna(
735
+ self,
736
+ value=None,
737
+ method: FillnaOptions | None = None,
738
+ limit: int | None = None,
739
+ copy: bool = True,
740
+ ) -> Self:
741
+ """
742
+ Fill missing values with `value`.
743
+
744
+ Parameters
745
+ ----------
746
+ value : scalar, optional
747
+ method : str, optional
748
+
749
+ .. warning::
750
+
751
+ Using 'method' will result in high memory use,
752
+ as all `fill_value` methods will be converted to
753
+ an in-memory ndarray
754
+
755
+ limit : int, optional
756
+
757
+ copy: bool, default True
758
+ Ignored for SparseArray.
759
+
760
+ Returns
761
+ -------
762
+ SparseArray
763
+
764
+ Notes
765
+ -----
766
+ When `value` is specified, the result's ``fill_value`` depends on
767
+ ``self.fill_value``. The goal is to maintain low-memory use.
768
+
769
+ If ``self.fill_value`` is NA, the result dtype will be
770
+ ``SparseDtype(self.dtype, fill_value=value)``. This will preserve
771
+ amount of memory used before and after filling.
772
+
773
+ When ``self.fill_value`` is not NA, the result dtype will be
774
+ ``self.dtype``. Again, this preserves the amount of memory used.
775
+ """
776
+ if (method is None and value is None) or (
777
+ method is not None and value is not None
778
+ ):
779
+ raise ValueError("Must specify one of 'method' or 'value'.")
780
+
781
+ if method is not None:
782
+ return super().fillna(method=method, limit=limit)
783
+
784
+ else:
785
+ new_values = np.where(isna(self.sp_values), value, self.sp_values)
786
+
787
+ if self._null_fill_value:
788
+ # This is essentially just updating the dtype.
789
+ new_dtype = SparseDtype(self.dtype.subtype, fill_value=value)
790
+ else:
791
+ new_dtype = self.dtype
792
+
793
+ return self._simple_new(new_values, self._sparse_index, new_dtype)
794
+
795
+ def shift(self, periods: int = 1, fill_value=None) -> Self:
796
+ if not len(self) or periods == 0:
797
+ return self.copy()
798
+
799
+ if isna(fill_value):
800
+ fill_value = self.dtype.na_value
801
+
802
+ subtype = np.result_type(fill_value, self.dtype.subtype)
803
+
804
+ if subtype != self.dtype.subtype:
805
+ # just coerce up front
806
+ arr = self.astype(SparseDtype(subtype, self.fill_value))
807
+ else:
808
+ arr = self
809
+
810
+ empty = self._from_sequence(
811
+ [fill_value] * min(abs(periods), len(self)), dtype=arr.dtype
812
+ )
813
+
814
+ if periods > 0:
815
+ a = empty
816
+ b = arr[:-periods]
817
+ else:
818
+ a = arr[abs(periods) :]
819
+ b = empty
820
+ return arr._concat_same_type([a, b])
821
+
822
+ def _first_fill_value_loc(self):
823
+ """
824
+ Get the location of the first fill value.
825
+
826
+ Returns
827
+ -------
828
+ int
829
+ """
830
+ if len(self) == 0 or self.sp_index.npoints == len(self):
831
+ return -1
832
+
833
+ indices = self.sp_index.indices
834
+ if not len(indices) or indices[0] > 0:
835
+ return 0
836
+
837
+ # a number larger than 1 should be appended to
838
+ # the last in case of fill value only appears
839
+ # in the tail of array
840
+ diff = np.r_[np.diff(indices), 2]
841
+ return indices[(diff > 1).argmax()] + 1
842
+
843
+ @doc(ExtensionArray.duplicated)
844
+ def duplicated(
845
+ self, keep: Literal["first", "last", False] = "first"
846
+ ) -> npt.NDArray[np.bool_]:
847
+ values = np.asarray(self)
848
+ mask = np.asarray(self.isna())
849
+ return algos.duplicated(values, keep=keep, mask=mask)
850
+
851
+ def unique(self) -> Self:
852
+ uniques = algos.unique(self.sp_values)
853
+ if len(self.sp_values) != len(self):
854
+ fill_loc = self._first_fill_value_loc()
855
+ # Inorder to align the behavior of pd.unique or
856
+ # pd.Series.unique, we should keep the original
857
+ # order, here we use unique again to find the
858
+ # insertion place. Since the length of sp_values
859
+ # is not large, maybe minor performance hurt
860
+ # is worthwhile to the correctness.
861
+ insert_loc = len(algos.unique(self.sp_values[:fill_loc]))
862
+ uniques = np.insert(uniques, insert_loc, self.fill_value)
863
+ return type(self)._from_sequence(uniques, dtype=self.dtype)
864
+
865
+ def _values_for_factorize(self):
866
+ # Still override this for hash_pandas_object
867
+ return np.asarray(self), self.fill_value
868
+
869
+ def factorize(
870
+ self,
871
+ use_na_sentinel: bool = True,
872
+ ) -> tuple[np.ndarray, SparseArray]:
873
+ # Currently, ExtensionArray.factorize -> Tuple[ndarray, EA]
874
+ # The sparsity on this is backwards from what Sparse would want. Want
875
+ # ExtensionArray.factorize -> Tuple[EA, EA]
876
+ # Given that we have to return a dense array of codes, why bother
877
+ # implementing an efficient factorize?
878
+ codes, uniques = algos.factorize(
879
+ np.asarray(self), use_na_sentinel=use_na_sentinel
880
+ )
881
+ uniques_sp = SparseArray(uniques, dtype=self.dtype)
882
+ return codes, uniques_sp
883
+
884
+ def value_counts(self, dropna: bool = True) -> Series:
885
+ """
886
+ Returns a Series containing counts of unique values.
887
+
888
+ Parameters
889
+ ----------
890
+ dropna : bool, default True
891
+ Don't include counts of NaN, even if NaN is in sp_values.
892
+
893
+ Returns
894
+ -------
895
+ counts : Series
896
+ """
897
+ from pandas import (
898
+ Index,
899
+ Series,
900
+ )
901
+
902
+ keys, counts, _ = algos.value_counts_arraylike(self.sp_values, dropna=dropna)
903
+ fcounts = self.sp_index.ngaps
904
+ if fcounts > 0 and (not self._null_fill_value or not dropna):
905
+ mask = isna(keys) if self._null_fill_value else keys == self.fill_value
906
+ if mask.any():
907
+ counts[mask] += fcounts
908
+ else:
909
+ # error: Argument 1 to "insert" has incompatible type "Union[
910
+ # ExtensionArray,ndarray[Any, Any]]"; expected "Union[
911
+ # _SupportsArray[dtype[Any]], Sequence[_SupportsArray[dtype
912
+ # [Any]]], Sequence[Sequence[_SupportsArray[dtype[Any]]]],
913
+ # Sequence[Sequence[Sequence[_SupportsArray[dtype[Any]]]]], Sequence
914
+ # [Sequence[Sequence[Sequence[_SupportsArray[dtype[Any]]]]]]]"
915
+ keys = np.insert(keys, 0, self.fill_value) # type: ignore[arg-type]
916
+ counts = np.insert(counts, 0, fcounts)
917
+
918
+ if not isinstance(keys, ABCIndex):
919
+ index = Index(keys)
920
+ else:
921
+ index = keys
922
+ return Series(counts, index=index, copy=False)
923
+
924
+ # --------
925
+ # Indexing
926
+ # --------
927
+ @overload
928
+ def __getitem__(self, key: ScalarIndexer) -> Any:
929
+ ...
930
+
931
+ @overload
932
+ def __getitem__(
933
+ self,
934
+ key: SequenceIndexer | tuple[int | ellipsis, ...],
935
+ ) -> Self:
936
+ ...
937
+
938
+ def __getitem__(
939
+ self,
940
+ key: PositionalIndexer | tuple[int | ellipsis, ...],
941
+ ) -> Self | Any:
942
+ if isinstance(key, tuple):
943
+ key = unpack_tuple_and_ellipses(key)
944
+ if key is Ellipsis:
945
+ raise ValueError("Cannot slice with Ellipsis")
946
+
947
+ if is_integer(key):
948
+ return self._get_val_at(key)
949
+ elif isinstance(key, tuple):
950
+ # error: Invalid index type "Tuple[Union[int, ellipsis], ...]"
951
+ # for "ndarray[Any, Any]"; expected type
952
+ # "Union[SupportsIndex, _SupportsArray[dtype[Union[bool_,
953
+ # integer[Any]]]], _NestedSequence[_SupportsArray[dtype[
954
+ # Union[bool_, integer[Any]]]]], _NestedSequence[Union[
955
+ # bool, int]], Tuple[Union[SupportsIndex, _SupportsArray[
956
+ # dtype[Union[bool_, integer[Any]]]], _NestedSequence[
957
+ # _SupportsArray[dtype[Union[bool_, integer[Any]]]]],
958
+ # _NestedSequence[Union[bool, int]]], ...]]"
959
+ data_slice = self.to_dense()[key] # type: ignore[index]
960
+ elif isinstance(key, slice):
961
+ # Avoid densifying when handling contiguous slices
962
+ if key.step is None or key.step == 1:
963
+ start = 0 if key.start is None else key.start
964
+ if start < 0:
965
+ start += len(self)
966
+
967
+ end = len(self) if key.stop is None else key.stop
968
+ if end < 0:
969
+ end += len(self)
970
+
971
+ indices = self.sp_index.indices
972
+ keep_inds = np.flatnonzero((indices >= start) & (indices < end))
973
+ sp_vals = self.sp_values[keep_inds]
974
+
975
+ sp_index = indices[keep_inds].copy()
976
+
977
+ # If we've sliced to not include the start of the array, all our indices
978
+ # should be shifted. NB: here we are careful to also not shift by a
979
+ # negative value for a case like [0, 1][-100:] where the start index
980
+ # should be treated like 0
981
+ if start > 0:
982
+ sp_index -= start
983
+
984
+ # Length of our result should match applying this slice to a range
985
+ # of the length of our original array
986
+ new_len = len(range(len(self))[key])
987
+ new_sp_index = make_sparse_index(new_len, sp_index, self.kind)
988
+ return type(self)._simple_new(sp_vals, new_sp_index, self.dtype)
989
+ else:
990
+ indices = np.arange(len(self), dtype=np.int32)[key]
991
+ return self.take(indices)
992
+
993
+ elif not is_list_like(key):
994
+ # e.g. "foo" or 2.5
995
+ # exception message copied from numpy
996
+ raise IndexError(
997
+ r"only integers, slices (`:`), ellipsis (`...`), numpy.newaxis "
998
+ r"(`None`) and integer or boolean arrays are valid indices"
999
+ )
1000
+
1001
+ else:
1002
+ if isinstance(key, SparseArray):
1003
+ # NOTE: If we guarantee that SparseDType(bool)
1004
+ # has only fill_value - true, false or nan
1005
+ # (see GH PR 44955)
1006
+ # we can apply mask very fast:
1007
+ if is_bool_dtype(key):
1008
+ if isna(key.fill_value):
1009
+ return self.take(key.sp_index.indices[key.sp_values])
1010
+ if not key.fill_value:
1011
+ return self.take(key.sp_index.indices)
1012
+ n = len(self)
1013
+ mask = np.full(n, True, dtype=np.bool_)
1014
+ mask[key.sp_index.indices] = False
1015
+ return self.take(np.arange(n)[mask])
1016
+ else:
1017
+ key = np.asarray(key)
1018
+
1019
+ key = check_array_indexer(self, key)
1020
+
1021
+ if com.is_bool_indexer(key):
1022
+ # mypy doesn't know we have an array here
1023
+ key = cast(np.ndarray, key)
1024
+ return self.take(np.arange(len(key), dtype=np.int32)[key])
1025
+ elif hasattr(key, "__len__"):
1026
+ return self.take(key)
1027
+ else:
1028
+ raise ValueError(f"Cannot slice with '{key}'")
1029
+
1030
+ return type(self)(data_slice, kind=self.kind)
1031
+
1032
+ def _get_val_at(self, loc):
1033
+ loc = validate_insert_loc(loc, len(self))
1034
+
1035
+ sp_loc = self.sp_index.lookup(loc)
1036
+ if sp_loc == -1:
1037
+ return self.fill_value
1038
+ else:
1039
+ val = self.sp_values[sp_loc]
1040
+ val = maybe_box_datetimelike(val, self.sp_values.dtype)
1041
+ return val
1042
+
1043
+ def take(self, indices, *, allow_fill: bool = False, fill_value=None) -> Self:
1044
+ if is_scalar(indices):
1045
+ raise ValueError(f"'indices' must be an array, not a scalar '{indices}'.")
1046
+ indices = np.asarray(indices, dtype=np.int32)
1047
+
1048
+ dtype = None
1049
+ if indices.size == 0:
1050
+ result = np.array([], dtype="object")
1051
+ dtype = self.dtype
1052
+ elif allow_fill:
1053
+ result = self._take_with_fill(indices, fill_value=fill_value)
1054
+ else:
1055
+ return self._take_without_fill(indices)
1056
+
1057
+ return type(self)(
1058
+ result, fill_value=self.fill_value, kind=self.kind, dtype=dtype
1059
+ )
1060
+
1061
+ def _take_with_fill(self, indices, fill_value=None) -> np.ndarray:
1062
+ if fill_value is None:
1063
+ fill_value = self.dtype.na_value
1064
+
1065
+ if indices.min() < -1:
1066
+ raise ValueError(
1067
+ "Invalid value in 'indices'. Must be between -1 "
1068
+ "and the length of the array."
1069
+ )
1070
+
1071
+ if indices.max() >= len(self):
1072
+ raise IndexError("out of bounds value in 'indices'.")
1073
+
1074
+ if len(self) == 0:
1075
+ # Empty... Allow taking only if all empty
1076
+ if (indices == -1).all():
1077
+ dtype = np.result_type(self.sp_values, type(fill_value))
1078
+ taken = np.empty_like(indices, dtype=dtype)
1079
+ taken.fill(fill_value)
1080
+ return taken
1081
+ else:
1082
+ raise IndexError("cannot do a non-empty take from an empty axes.")
1083
+
1084
+ # sp_indexer may be -1 for two reasons
1085
+ # 1.) we took for an index of -1 (new)
1086
+ # 2.) we took a value that was self.fill_value (old)
1087
+ sp_indexer = self.sp_index.lookup_array(indices)
1088
+ new_fill_indices = indices == -1
1089
+ old_fill_indices = (sp_indexer == -1) & ~new_fill_indices
1090
+
1091
+ if self.sp_index.npoints == 0 and old_fill_indices.all():
1092
+ # We've looked up all valid points on an all-sparse array.
1093
+ taken = np.full(
1094
+ sp_indexer.shape, fill_value=self.fill_value, dtype=self.dtype.subtype
1095
+ )
1096
+
1097
+ elif self.sp_index.npoints == 0:
1098
+ # Use the old fill_value unless we took for an index of -1
1099
+ _dtype = np.result_type(self.dtype.subtype, type(fill_value))
1100
+ taken = np.full(sp_indexer.shape, fill_value=fill_value, dtype=_dtype)
1101
+ taken[old_fill_indices] = self.fill_value
1102
+ else:
1103
+ taken = self.sp_values.take(sp_indexer)
1104
+
1105
+ # Fill in two steps.
1106
+ # Old fill values
1107
+ # New fill values
1108
+ # potentially coercing to a new dtype at each stage.
1109
+
1110
+ m0 = sp_indexer[old_fill_indices] < 0
1111
+ m1 = sp_indexer[new_fill_indices] < 0
1112
+
1113
+ result_type = taken.dtype
1114
+
1115
+ if m0.any():
1116
+ result_type = np.result_type(result_type, type(self.fill_value))
1117
+ taken = taken.astype(result_type)
1118
+ taken[old_fill_indices] = self.fill_value
1119
+
1120
+ if m1.any():
1121
+ result_type = np.result_type(result_type, type(fill_value))
1122
+ taken = taken.astype(result_type)
1123
+ taken[new_fill_indices] = fill_value
1124
+
1125
+ return taken
1126
+
1127
+ def _take_without_fill(self, indices) -> Self:
1128
+ to_shift = indices < 0
1129
+
1130
+ n = len(self)
1131
+
1132
+ if (indices.max() >= n) or (indices.min() < -n):
1133
+ if n == 0:
1134
+ raise IndexError("cannot do a non-empty take from an empty axes.")
1135
+ raise IndexError("out of bounds value in 'indices'.")
1136
+
1137
+ if to_shift.any():
1138
+ indices = indices.copy()
1139
+ indices[to_shift] += n
1140
+
1141
+ sp_indexer = self.sp_index.lookup_array(indices)
1142
+ value_mask = sp_indexer != -1
1143
+ new_sp_values = self.sp_values[sp_indexer[value_mask]]
1144
+
1145
+ value_indices = np.flatnonzero(value_mask).astype(np.int32, copy=False)
1146
+
1147
+ new_sp_index = make_sparse_index(len(indices), value_indices, kind=self.kind)
1148
+ return type(self)._simple_new(new_sp_values, new_sp_index, dtype=self.dtype)
1149
+
1150
+ def searchsorted(
1151
+ self,
1152
+ v: ArrayLike | object,
1153
+ side: Literal["left", "right"] = "left",
1154
+ sorter: NumpySorter | None = None,
1155
+ ) -> npt.NDArray[np.intp] | np.intp:
1156
+ msg = "searchsorted requires high memory usage."
1157
+ warnings.warn(msg, PerformanceWarning, stacklevel=find_stack_level())
1158
+ v = np.asarray(v)
1159
+ return np.asarray(self, dtype=self.dtype.subtype).searchsorted(v, side, sorter)
1160
+
1161
+ def copy(self) -> Self:
1162
+ values = self.sp_values.copy()
1163
+ return self._simple_new(values, self.sp_index, self.dtype)
1164
+
1165
+ @classmethod
1166
+ def _concat_same_type(cls, to_concat: Sequence[Self]) -> Self:
1167
+ fill_value = to_concat[0].fill_value
1168
+
1169
+ values = []
1170
+ length = 0
1171
+
1172
+ if to_concat:
1173
+ sp_kind = to_concat[0].kind
1174
+ else:
1175
+ sp_kind = "integer"
1176
+
1177
+ sp_index: SparseIndex
1178
+ if sp_kind == "integer":
1179
+ indices = []
1180
+
1181
+ for arr in to_concat:
1182
+ int_idx = arr.sp_index.indices.copy()
1183
+ int_idx += length # TODO: wraparound
1184
+ length += arr.sp_index.length
1185
+
1186
+ values.append(arr.sp_values)
1187
+ indices.append(int_idx)
1188
+
1189
+ data = np.concatenate(values)
1190
+ indices_arr = np.concatenate(indices)
1191
+ # error: Argument 2 to "IntIndex" has incompatible type
1192
+ # "ndarray[Any, dtype[signedinteger[_32Bit]]]";
1193
+ # expected "Sequence[int]"
1194
+ sp_index = IntIndex(length, indices_arr) # type: ignore[arg-type]
1195
+
1196
+ else:
1197
+ # when concatenating block indices, we don't claim that you'll
1198
+ # get an identical index as concatenating the values and then
1199
+ # creating a new index. We don't want to spend the time trying
1200
+ # to merge blocks across arrays in `to_concat`, so the resulting
1201
+ # BlockIndex may have more blocks.
1202
+ blengths = []
1203
+ blocs = []
1204
+
1205
+ for arr in to_concat:
1206
+ block_idx = arr.sp_index.to_block_index()
1207
+
1208
+ values.append(arr.sp_values)
1209
+ blocs.append(block_idx.blocs.copy() + length)
1210
+ blengths.append(block_idx.blengths)
1211
+ length += arr.sp_index.length
1212
+
1213
+ data = np.concatenate(values)
1214
+ blocs_arr = np.concatenate(blocs)
1215
+ blengths_arr = np.concatenate(blengths)
1216
+
1217
+ sp_index = BlockIndex(length, blocs_arr, blengths_arr)
1218
+
1219
+ return cls(data, sparse_index=sp_index, fill_value=fill_value)
1220
+
1221
+ def astype(self, dtype: AstypeArg | None = None, copy: bool = True):
1222
+ """
1223
+ Change the dtype of a SparseArray.
1224
+
1225
+ The output will always be a SparseArray. To convert to a dense
1226
+ ndarray with a certain dtype, use :meth:`numpy.asarray`.
1227
+
1228
+ Parameters
1229
+ ----------
1230
+ dtype : np.dtype or ExtensionDtype
1231
+ For SparseDtype, this changes the dtype of
1232
+ ``self.sp_values`` and the ``self.fill_value``.
1233
+
1234
+ For other dtypes, this only changes the dtype of
1235
+ ``self.sp_values``.
1236
+
1237
+ copy : bool, default True
1238
+ Whether to ensure a copy is made, even if not necessary.
1239
+
1240
+ Returns
1241
+ -------
1242
+ SparseArray
1243
+
1244
+ Examples
1245
+ --------
1246
+ >>> arr = pd.arrays.SparseArray([0, 0, 1, 2])
1247
+ >>> arr
1248
+ [0, 0, 1, 2]
1249
+ Fill: 0
1250
+ IntIndex
1251
+ Indices: array([2, 3], dtype=int32)
1252
+
1253
+ >>> arr.astype(SparseDtype(np.dtype('int32')))
1254
+ [0, 0, 1, 2]
1255
+ Fill: 0
1256
+ IntIndex
1257
+ Indices: array([2, 3], dtype=int32)
1258
+
1259
+ Using a NumPy dtype with a different kind (e.g. float) will coerce
1260
+ just ``self.sp_values``.
1261
+
1262
+ >>> arr.astype(SparseDtype(np.dtype('float64')))
1263
+ ... # doctest: +NORMALIZE_WHITESPACE
1264
+ [nan, nan, 1.0, 2.0]
1265
+ Fill: nan
1266
+ IntIndex
1267
+ Indices: array([2, 3], dtype=int32)
1268
+
1269
+ Using a SparseDtype, you can also change the fill value as well.
1270
+
1271
+ >>> arr.astype(SparseDtype("float64", fill_value=0.0))
1272
+ ... # doctest: +NORMALIZE_WHITESPACE
1273
+ [0.0, 0.0, 1.0, 2.0]
1274
+ Fill: 0.0
1275
+ IntIndex
1276
+ Indices: array([2, 3], dtype=int32)
1277
+ """
1278
+ if dtype == self._dtype:
1279
+ if not copy:
1280
+ return self
1281
+ else:
1282
+ return self.copy()
1283
+
1284
+ future_dtype = pandas_dtype(dtype)
1285
+ if not isinstance(future_dtype, SparseDtype):
1286
+ # GH#34457
1287
+ values = np.asarray(self)
1288
+ values = ensure_wrapped_if_datetimelike(values)
1289
+ return astype_array(values, dtype=future_dtype, copy=False)
1290
+
1291
+ dtype = self.dtype.update_dtype(dtype)
1292
+ subtype = pandas_dtype(dtype._subtype_with_str)
1293
+ subtype = cast(np.dtype, subtype) # ensured by update_dtype
1294
+ values = ensure_wrapped_if_datetimelike(self.sp_values)
1295
+ sp_values = astype_array(values, subtype, copy=copy)
1296
+ sp_values = np.asarray(sp_values)
1297
+
1298
+ return self._simple_new(sp_values, self.sp_index, dtype)
1299
+
1300
+ def map(self, mapper, na_action=None) -> Self:
1301
+ """
1302
+ Map categories using an input mapping or function.
1303
+
1304
+ Parameters
1305
+ ----------
1306
+ mapper : dict, Series, callable
1307
+ The correspondence from old values to new.
1308
+ na_action : {None, 'ignore'}, default None
1309
+ If 'ignore', propagate NA values, without passing them to the
1310
+ mapping correspondence.
1311
+
1312
+ Returns
1313
+ -------
1314
+ SparseArray
1315
+ The output array will have the same density as the input.
1316
+ The output fill value will be the result of applying the
1317
+ mapping to ``self.fill_value``
1318
+
1319
+ Examples
1320
+ --------
1321
+ >>> arr = pd.arrays.SparseArray([0, 1, 2])
1322
+ >>> arr.map(lambda x: x + 10)
1323
+ [10, 11, 12]
1324
+ Fill: 10
1325
+ IntIndex
1326
+ Indices: array([1, 2], dtype=int32)
1327
+
1328
+ >>> arr.map({0: 10, 1: 11, 2: 12})
1329
+ [10, 11, 12]
1330
+ Fill: 10
1331
+ IntIndex
1332
+ Indices: array([1, 2], dtype=int32)
1333
+
1334
+ >>> arr.map(pd.Series([10, 11, 12], index=[0, 1, 2]))
1335
+ [10, 11, 12]
1336
+ Fill: 10
1337
+ IntIndex
1338
+ Indices: array([1, 2], dtype=int32)
1339
+ """
1340
+ is_map = isinstance(mapper, (abc.Mapping, ABCSeries))
1341
+
1342
+ fill_val = self.fill_value
1343
+
1344
+ if na_action is None or notna(fill_val):
1345
+ fill_val = mapper.get(fill_val, fill_val) if is_map else mapper(fill_val)
1346
+
1347
+ def func(sp_val):
1348
+ new_sp_val = mapper.get(sp_val, None) if is_map else mapper(sp_val)
1349
+ # check identity and equality because nans are not equal to each other
1350
+ if new_sp_val is fill_val or new_sp_val == fill_val:
1351
+ msg = "fill value in the sparse values not supported"
1352
+ raise ValueError(msg)
1353
+ return new_sp_val
1354
+
1355
+ sp_values = [func(x) for x in self.sp_values]
1356
+
1357
+ return type(self)(sp_values, sparse_index=self.sp_index, fill_value=fill_val)
1358
+
1359
+ def to_dense(self) -> np.ndarray:
1360
+ """
1361
+ Convert SparseArray to a NumPy array.
1362
+
1363
+ Returns
1364
+ -------
1365
+ arr : NumPy array
1366
+ """
1367
+ return np.asarray(self, dtype=self.sp_values.dtype)
1368
+
1369
+ def _where(self, mask, value):
1370
+ # NB: may not preserve dtype, e.g. result may be Sparse[float64]
1371
+ # while self is Sparse[int64]
1372
+ naive_implementation = np.where(mask, self, value)
1373
+ dtype = SparseDtype(naive_implementation.dtype, fill_value=self.fill_value)
1374
+ result = type(self)._from_sequence(naive_implementation, dtype=dtype)
1375
+ return result
1376
+
1377
+ # ------------------------------------------------------------------------
1378
+ # IO
1379
+ # ------------------------------------------------------------------------
1380
+ def __setstate__(self, state) -> None:
1381
+ """Necessary for making this object picklable"""
1382
+ if isinstance(state, tuple):
1383
+ # Compat for pandas < 0.24.0
1384
+ nd_state, (fill_value, sp_index) = state
1385
+ sparse_values = np.array([])
1386
+ sparse_values.__setstate__(nd_state)
1387
+
1388
+ self._sparse_values = sparse_values
1389
+ self._sparse_index = sp_index
1390
+ self._dtype = SparseDtype(sparse_values.dtype, fill_value)
1391
+ else:
1392
+ self.__dict__.update(state)
1393
+
1394
+ def nonzero(self) -> tuple[npt.NDArray[np.int32]]:
1395
+ if self.fill_value == 0:
1396
+ return (self.sp_index.indices,)
1397
+ else:
1398
+ return (self.sp_index.indices[self.sp_values != 0],)
1399
+
1400
+ # ------------------------------------------------------------------------
1401
+ # Reductions
1402
+ # ------------------------------------------------------------------------
1403
+
1404
+ def _reduce(
1405
+ self, name: str, *, skipna: bool = True, keepdims: bool = False, **kwargs
1406
+ ):
1407
+ method = getattr(self, name, None)
1408
+
1409
+ if method is None:
1410
+ raise TypeError(f"cannot perform {name} with type {self.dtype}")
1411
+
1412
+ if skipna:
1413
+ arr = self
1414
+ else:
1415
+ arr = self.dropna()
1416
+
1417
+ result = getattr(arr, name)(**kwargs)
1418
+
1419
+ if keepdims:
1420
+ return type(self)([result], dtype=self.dtype)
1421
+ else:
1422
+ return result
1423
+
1424
+ def all(self, axis=None, *args, **kwargs):
1425
+ """
1426
+ Tests whether all elements evaluate True
1427
+
1428
+ Returns
1429
+ -------
1430
+ all : bool
1431
+
1432
+ See Also
1433
+ --------
1434
+ numpy.all
1435
+ """
1436
+ nv.validate_all(args, kwargs)
1437
+
1438
+ values = self.sp_values
1439
+
1440
+ if len(values) != len(self) and not np.all(self.fill_value):
1441
+ return False
1442
+
1443
+ return values.all()
1444
+
1445
+ def any(self, axis: AxisInt = 0, *args, **kwargs) -> bool:
1446
+ """
1447
+ Tests whether at least one of elements evaluate True
1448
+
1449
+ Returns
1450
+ -------
1451
+ any : bool
1452
+
1453
+ See Also
1454
+ --------
1455
+ numpy.any
1456
+ """
1457
+ nv.validate_any(args, kwargs)
1458
+
1459
+ values = self.sp_values
1460
+
1461
+ if len(values) != len(self) and np.any(self.fill_value):
1462
+ return True
1463
+
1464
+ return values.any().item()
1465
+
1466
+ def sum(
1467
+ self,
1468
+ axis: AxisInt = 0,
1469
+ min_count: int = 0,
1470
+ skipna: bool = True,
1471
+ *args,
1472
+ **kwargs,
1473
+ ) -> Scalar:
1474
+ """
1475
+ Sum of non-NA/null values
1476
+
1477
+ Parameters
1478
+ ----------
1479
+ axis : int, default 0
1480
+ Not Used. NumPy compatibility.
1481
+ min_count : int, default 0
1482
+ The required number of valid values to perform the summation. If fewer
1483
+ than ``min_count`` valid values are present, the result will be the missing
1484
+ value indicator for subarray type.
1485
+ *args, **kwargs
1486
+ Not Used. NumPy compatibility.
1487
+
1488
+ Returns
1489
+ -------
1490
+ scalar
1491
+ """
1492
+ nv.validate_sum(args, kwargs)
1493
+ valid_vals = self._valid_sp_values
1494
+ sp_sum = valid_vals.sum()
1495
+ has_na = self.sp_index.ngaps > 0 and not self._null_fill_value
1496
+
1497
+ if has_na and not skipna:
1498
+ return na_value_for_dtype(self.dtype.subtype, compat=False)
1499
+
1500
+ if self._null_fill_value:
1501
+ if check_below_min_count(valid_vals.shape, None, min_count):
1502
+ return na_value_for_dtype(self.dtype.subtype, compat=False)
1503
+ return sp_sum
1504
+ else:
1505
+ nsparse = self.sp_index.ngaps
1506
+ if check_below_min_count(valid_vals.shape, None, min_count - nsparse):
1507
+ return na_value_for_dtype(self.dtype.subtype, compat=False)
1508
+ return sp_sum + self.fill_value * nsparse
1509
+
1510
+ def cumsum(self, axis: AxisInt = 0, *args, **kwargs) -> SparseArray:
1511
+ """
1512
+ Cumulative sum of non-NA/null values.
1513
+
1514
+ When performing the cumulative summation, any non-NA/null values will
1515
+ be skipped. The resulting SparseArray will preserve the locations of
1516
+ NaN values, but the fill value will be `np.nan` regardless.
1517
+
1518
+ Parameters
1519
+ ----------
1520
+ axis : int or None
1521
+ Axis over which to perform the cumulative summation. If None,
1522
+ perform cumulative summation over flattened array.
1523
+
1524
+ Returns
1525
+ -------
1526
+ cumsum : SparseArray
1527
+ """
1528
+ nv.validate_cumsum(args, kwargs)
1529
+
1530
+ if axis is not None and axis >= self.ndim: # Mimic ndarray behaviour.
1531
+ raise ValueError(f"axis(={axis}) out of bounds")
1532
+
1533
+ if not self._null_fill_value:
1534
+ return SparseArray(self.to_dense()).cumsum()
1535
+
1536
+ return SparseArray(
1537
+ self.sp_values.cumsum(),
1538
+ sparse_index=self.sp_index,
1539
+ fill_value=self.fill_value,
1540
+ )
1541
+
1542
+ def mean(self, axis: Axis = 0, *args, **kwargs):
1543
+ """
1544
+ Mean of non-NA/null values
1545
+
1546
+ Returns
1547
+ -------
1548
+ mean : float
1549
+ """
1550
+ nv.validate_mean(args, kwargs)
1551
+ valid_vals = self._valid_sp_values
1552
+ sp_sum = valid_vals.sum()
1553
+ ct = len(valid_vals)
1554
+
1555
+ if self._null_fill_value:
1556
+ return sp_sum / ct
1557
+ else:
1558
+ nsparse = self.sp_index.ngaps
1559
+ return (sp_sum + self.fill_value * nsparse) / (ct + nsparse)
1560
+
1561
+ def max(self, *, axis: AxisInt | None = None, skipna: bool = True):
1562
+ """
1563
+ Max of array values, ignoring NA values if specified.
1564
+
1565
+ Parameters
1566
+ ----------
1567
+ axis : int, default 0
1568
+ Not Used. NumPy compatibility.
1569
+ skipna : bool, default True
1570
+ Whether to ignore NA values.
1571
+
1572
+ Returns
1573
+ -------
1574
+ scalar
1575
+ """
1576
+ nv.validate_minmax_axis(axis, self.ndim)
1577
+ return self._min_max("max", skipna=skipna)
1578
+
1579
+ def min(self, *, axis: AxisInt | None = None, skipna: bool = True):
1580
+ """
1581
+ Min of array values, ignoring NA values if specified.
1582
+
1583
+ Parameters
1584
+ ----------
1585
+ axis : int, default 0
1586
+ Not Used. NumPy compatibility.
1587
+ skipna : bool, default True
1588
+ Whether to ignore NA values.
1589
+
1590
+ Returns
1591
+ -------
1592
+ scalar
1593
+ """
1594
+ nv.validate_minmax_axis(axis, self.ndim)
1595
+ return self._min_max("min", skipna=skipna)
1596
+
1597
+ def _min_max(self, kind: Literal["min", "max"], skipna: bool) -> Scalar:
1598
+ """
1599
+ Min/max of non-NA/null values
1600
+
1601
+ Parameters
1602
+ ----------
1603
+ kind : {"min", "max"}
1604
+ skipna : bool
1605
+
1606
+ Returns
1607
+ -------
1608
+ scalar
1609
+ """
1610
+ valid_vals = self._valid_sp_values
1611
+ has_nonnull_fill_vals = not self._null_fill_value and self.sp_index.ngaps > 0
1612
+
1613
+ if len(valid_vals) > 0:
1614
+ sp_min_max = getattr(valid_vals, kind)()
1615
+
1616
+ # If a non-null fill value is currently present, it might be the min/max
1617
+ if has_nonnull_fill_vals:
1618
+ func = max if kind == "max" else min
1619
+ return func(sp_min_max, self.fill_value)
1620
+ elif skipna:
1621
+ return sp_min_max
1622
+ elif self.sp_index.ngaps == 0:
1623
+ # No NAs present
1624
+ return sp_min_max
1625
+ else:
1626
+ return na_value_for_dtype(self.dtype.subtype, compat=False)
1627
+ elif has_nonnull_fill_vals:
1628
+ return self.fill_value
1629
+ else:
1630
+ return na_value_for_dtype(self.dtype.subtype, compat=False)
1631
+
1632
+ def _argmin_argmax(self, kind: Literal["argmin", "argmax"]) -> int:
1633
+ values = self._sparse_values
1634
+ index = self._sparse_index.indices
1635
+ mask = np.asarray(isna(values))
1636
+ func = np.argmax if kind == "argmax" else np.argmin
1637
+
1638
+ idx = np.arange(values.shape[0])
1639
+ non_nans = values[~mask]
1640
+ non_nan_idx = idx[~mask]
1641
+
1642
+ _candidate = non_nan_idx[func(non_nans)]
1643
+ candidate = index[_candidate]
1644
+
1645
+ if isna(self.fill_value):
1646
+ return candidate
1647
+ if kind == "argmin" and self[candidate] < self.fill_value:
1648
+ return candidate
1649
+ if kind == "argmax" and self[candidate] > self.fill_value:
1650
+ return candidate
1651
+ _loc = self._first_fill_value_loc()
1652
+ if _loc == -1:
1653
+ # fill_value doesn't exist
1654
+ return candidate
1655
+ else:
1656
+ return _loc
1657
+
1658
+ def argmax(self, skipna: bool = True) -> int:
1659
+ validate_bool_kwarg(skipna, "skipna")
1660
+ if not skipna and self._hasna:
1661
+ raise NotImplementedError
1662
+ return self._argmin_argmax("argmax")
1663
+
1664
+ def argmin(self, skipna: bool = True) -> int:
1665
+ validate_bool_kwarg(skipna, "skipna")
1666
+ if not skipna and self._hasna:
1667
+ raise NotImplementedError
1668
+ return self._argmin_argmax("argmin")
1669
+
1670
+ # ------------------------------------------------------------------------
1671
+ # Ufuncs
1672
+ # ------------------------------------------------------------------------
1673
+
1674
+ _HANDLED_TYPES = (np.ndarray, numbers.Number)
1675
+
1676
+ def __array_ufunc__(self, ufunc: np.ufunc, method: str, *inputs, **kwargs):
1677
+ out = kwargs.get("out", ())
1678
+
1679
+ for x in inputs + out:
1680
+ if not isinstance(x, self._HANDLED_TYPES + (SparseArray,)):
1681
+ return NotImplemented
1682
+
1683
+ # for binary ops, use our custom dunder methods
1684
+ result = arraylike.maybe_dispatch_ufunc_to_dunder_op(
1685
+ self, ufunc, method, *inputs, **kwargs
1686
+ )
1687
+ if result is not NotImplemented:
1688
+ return result
1689
+
1690
+ if "out" in kwargs:
1691
+ # e.g. tests.arrays.sparse.test_arithmetics.test_ndarray_inplace
1692
+ res = arraylike.dispatch_ufunc_with_out(
1693
+ self, ufunc, method, *inputs, **kwargs
1694
+ )
1695
+ return res
1696
+
1697
+ if method == "reduce":
1698
+ result = arraylike.dispatch_reduction_ufunc(
1699
+ self, ufunc, method, *inputs, **kwargs
1700
+ )
1701
+ if result is not NotImplemented:
1702
+ # e.g. tests.series.test_ufunc.TestNumpyReductions
1703
+ return result
1704
+
1705
+ if len(inputs) == 1:
1706
+ # No alignment necessary.
1707
+ sp_values = getattr(ufunc, method)(self.sp_values, **kwargs)
1708
+ fill_value = getattr(ufunc, method)(self.fill_value, **kwargs)
1709
+
1710
+ if ufunc.nout > 1:
1711
+ # multiple outputs. e.g. modf
1712
+ arrays = tuple(
1713
+ self._simple_new(
1714
+ sp_value, self.sp_index, SparseDtype(sp_value.dtype, fv)
1715
+ )
1716
+ for sp_value, fv in zip(sp_values, fill_value)
1717
+ )
1718
+ return arrays
1719
+ elif method == "reduce":
1720
+ # e.g. reductions
1721
+ return sp_values
1722
+
1723
+ return self._simple_new(
1724
+ sp_values, self.sp_index, SparseDtype(sp_values.dtype, fill_value)
1725
+ )
1726
+
1727
+ new_inputs = tuple(np.asarray(x) for x in inputs)
1728
+ result = getattr(ufunc, method)(*new_inputs, **kwargs)
1729
+ if out:
1730
+ if len(out) == 1:
1731
+ out = out[0]
1732
+ return out
1733
+
1734
+ if ufunc.nout > 1:
1735
+ return tuple(type(self)(x) for x in result)
1736
+ elif method == "at":
1737
+ # no return value
1738
+ return None
1739
+ else:
1740
+ return type(self)(result)
1741
+
1742
+ # ------------------------------------------------------------------------
1743
+ # Ops
1744
+ # ------------------------------------------------------------------------
1745
+
1746
+ def _arith_method(self, other, op):
1747
+ op_name = op.__name__
1748
+
1749
+ if isinstance(other, SparseArray):
1750
+ return _sparse_array_op(self, other, op, op_name)
1751
+
1752
+ elif is_scalar(other):
1753
+ with np.errstate(all="ignore"):
1754
+ fill = op(_get_fill(self), np.asarray(other))
1755
+ result = op(self.sp_values, other)
1756
+
1757
+ if op_name == "divmod":
1758
+ left, right = result
1759
+ lfill, rfill = fill
1760
+ return (
1761
+ _wrap_result(op_name, left, self.sp_index, lfill),
1762
+ _wrap_result(op_name, right, self.sp_index, rfill),
1763
+ )
1764
+
1765
+ return _wrap_result(op_name, result, self.sp_index, fill)
1766
+
1767
+ else:
1768
+ other = np.asarray(other)
1769
+ with np.errstate(all="ignore"):
1770
+ if len(self) != len(other):
1771
+ raise AssertionError(
1772
+ f"length mismatch: {len(self)} vs. {len(other)}"
1773
+ )
1774
+ if not isinstance(other, SparseArray):
1775
+ dtype = getattr(other, "dtype", None)
1776
+ other = SparseArray(other, fill_value=self.fill_value, dtype=dtype)
1777
+ return _sparse_array_op(self, other, op, op_name)
1778
+
1779
+ def _cmp_method(self, other, op) -> SparseArray:
1780
+ if not is_scalar(other) and not isinstance(other, type(self)):
1781
+ # convert list-like to ndarray
1782
+ other = np.asarray(other)
1783
+
1784
+ if isinstance(other, np.ndarray):
1785
+ # TODO: make this more flexible than just ndarray...
1786
+ other = SparseArray(other, fill_value=self.fill_value)
1787
+
1788
+ if isinstance(other, SparseArray):
1789
+ if len(self) != len(other):
1790
+ raise ValueError(
1791
+ f"operands have mismatched length {len(self)} and {len(other)}"
1792
+ )
1793
+
1794
+ op_name = op.__name__.strip("_")
1795
+ return _sparse_array_op(self, other, op, op_name)
1796
+ else:
1797
+ # scalar
1798
+ fill_value = op(self.fill_value, other)
1799
+ result = np.full(len(self), fill_value, dtype=np.bool_)
1800
+ result[self.sp_index.indices] = op(self.sp_values, other)
1801
+
1802
+ return type(self)(
1803
+ result,
1804
+ fill_value=fill_value,
1805
+ dtype=np.bool_,
1806
+ )
1807
+
1808
+ _logical_method = _cmp_method
1809
+
1810
+ def _unary_method(self, op) -> SparseArray:
1811
+ fill_value = op(np.array(self.fill_value)).item()
1812
+ dtype = SparseDtype(self.dtype.subtype, fill_value)
1813
+ # NOTE: if fill_value doesn't change
1814
+ # we just have to apply op to sp_values
1815
+ if isna(self.fill_value) or fill_value == self.fill_value:
1816
+ values = op(self.sp_values)
1817
+ return type(self)._simple_new(values, self.sp_index, self.dtype)
1818
+ # In the other case we have to recalc indexes
1819
+ return type(self)(op(self.to_dense()), dtype=dtype)
1820
+
1821
+ def __pos__(self) -> SparseArray:
1822
+ return self._unary_method(operator.pos)
1823
+
1824
+ def __neg__(self) -> SparseArray:
1825
+ return self._unary_method(operator.neg)
1826
+
1827
+ def __invert__(self) -> SparseArray:
1828
+ return self._unary_method(operator.invert)
1829
+
1830
+ def __abs__(self) -> SparseArray:
1831
+ return self._unary_method(operator.abs)
1832
+
1833
+ # ----------
1834
+ # Formatting
1835
+ # -----------
1836
+ def __repr__(self) -> str:
1837
+ pp_str = printing.pprint_thing(self)
1838
+ pp_fill = printing.pprint_thing(self.fill_value)
1839
+ pp_index = printing.pprint_thing(self.sp_index)
1840
+ return f"{pp_str}\nFill: {pp_fill}\n{pp_index}"
1841
+
1842
+ def _formatter(self, boxed: bool = False):
1843
+ # Defer to the formatter from the GenericArrayFormatter calling us.
1844
+ # This will infer the correct formatter from the dtype of the values.
1845
+ return None
1846
+
1847
+
1848
+ def _make_sparse(
1849
+ arr: np.ndarray,
1850
+ kind: SparseIndexKind = "block",
1851
+ fill_value=None,
1852
+ dtype: np.dtype | None = None,
1853
+ ):
1854
+ """
1855
+ Convert ndarray to sparse format
1856
+
1857
+ Parameters
1858
+ ----------
1859
+ arr : ndarray
1860
+ kind : {'block', 'integer'}
1861
+ fill_value : NaN or another value
1862
+ dtype : np.dtype, optional
1863
+ copy : bool, default False
1864
+
1865
+ Returns
1866
+ -------
1867
+ (sparse_values, index, fill_value) : (ndarray, SparseIndex, Scalar)
1868
+ """
1869
+ assert isinstance(arr, np.ndarray)
1870
+
1871
+ if arr.ndim > 1:
1872
+ raise TypeError("expected dimension <= 1 data")
1873
+
1874
+ if fill_value is None:
1875
+ fill_value = na_value_for_dtype(arr.dtype)
1876
+
1877
+ if isna(fill_value):
1878
+ mask = notna(arr)
1879
+ else:
1880
+ # cast to object comparison to be safe
1881
+ if is_string_dtype(arr.dtype):
1882
+ arr = arr.astype(object)
1883
+
1884
+ if is_object_dtype(arr.dtype):
1885
+ # element-wise equality check method in numpy doesn't treat
1886
+ # each element type, eg. 0, 0.0, and False are treated as
1887
+ # same. So we have to check the both of its type and value.
1888
+ mask = splib.make_mask_object_ndarray(arr, fill_value)
1889
+ else:
1890
+ mask = arr != fill_value
1891
+
1892
+ length = len(arr)
1893
+ if length != len(mask):
1894
+ # the arr is a SparseArray
1895
+ indices = mask.sp_index.indices
1896
+ else:
1897
+ indices = mask.nonzero()[0].astype(np.int32)
1898
+
1899
+ index = make_sparse_index(length, indices, kind)
1900
+ sparsified_values = arr[mask]
1901
+ if dtype is not None:
1902
+ sparsified_values = ensure_wrapped_if_datetimelike(sparsified_values)
1903
+ sparsified_values = astype_array(sparsified_values, dtype=dtype)
1904
+ sparsified_values = np.asarray(sparsified_values)
1905
+
1906
+ # TODO: copy
1907
+ return sparsified_values, index, fill_value
1908
+
1909
+
1910
+ @overload
1911
+ def make_sparse_index(length: int, indices, kind: Literal["block"]) -> BlockIndex:
1912
+ ...
1913
+
1914
+
1915
+ @overload
1916
+ def make_sparse_index(length: int, indices, kind: Literal["integer"]) -> IntIndex:
1917
+ ...
1918
+
1919
+
1920
+ def make_sparse_index(length: int, indices, kind: SparseIndexKind) -> SparseIndex:
1921
+ index: SparseIndex
1922
+ if kind == "block":
1923
+ locs, lens = splib.get_blocks(indices)
1924
+ index = BlockIndex(length, locs, lens)
1925
+ elif kind == "integer":
1926
+ index = IntIndex(length, indices)
1927
+ else: # pragma: no cover
1928
+ raise ValueError("must be block or integer type")
1929
+ return index
env-llmeval/lib/python3.10/site-packages/pandas/core/dtypes/__init__.py ADDED
File without changes
env-llmeval/lib/python3.10/site-packages/pandas/core/dtypes/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (183 Bytes). View file
 
env-llmeval/lib/python3.10/site-packages/pandas/core/dtypes/__pycache__/api.cpython-310.pyc ADDED
Binary file (1.28 kB). View file
 
env-llmeval/lib/python3.10/site-packages/pandas/core/dtypes/__pycache__/astype.cpython-310.pyc ADDED
Binary file (6.71 kB). View file
 
env-llmeval/lib/python3.10/site-packages/pandas/core/dtypes/__pycache__/base.cpython-310.pyc ADDED
Binary file (18.5 kB). View file
 
env-llmeval/lib/python3.10/site-packages/pandas/core/dtypes/__pycache__/cast.cpython-310.pyc ADDED
Binary file (39 kB). View file
 
env-llmeval/lib/python3.10/site-packages/pandas/core/dtypes/__pycache__/common.cpython-310.pyc ADDED
Binary file (42 kB). View file
 
env-llmeval/lib/python3.10/site-packages/pandas/core/dtypes/__pycache__/concat.cpython-310.pyc ADDED
Binary file (10.6 kB). View file
 
env-llmeval/lib/python3.10/site-packages/pandas/core/dtypes/__pycache__/dtypes.cpython-310.pyc ADDED
Binary file (62.6 kB). View file
 
env-llmeval/lib/python3.10/site-packages/pandas/core/dtypes/__pycache__/generic.cpython-310.pyc ADDED
Binary file (3.22 kB). View file
 
env-llmeval/lib/python3.10/site-packages/pandas/core/dtypes/__pycache__/inference.cpython-310.pyc ADDED
Binary file (9.55 kB). View file
 
env-llmeval/lib/python3.10/site-packages/pandas/core/dtypes/__pycache__/missing.cpython-310.pyc ADDED
Binary file (19.5 kB). View file
 
env-llmeval/lib/python3.10/site-packages/pandas/core/dtypes/api.py ADDED
@@ -0,0 +1,85 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from pandas.core.dtypes.common import (
2
+ is_any_real_numeric_dtype,
3
+ is_array_like,
4
+ is_bool,
5
+ is_bool_dtype,
6
+ is_categorical_dtype,
7
+ is_complex,
8
+ is_complex_dtype,
9
+ is_datetime64_any_dtype,
10
+ is_datetime64_dtype,
11
+ is_datetime64_ns_dtype,
12
+ is_datetime64tz_dtype,
13
+ is_dict_like,
14
+ is_dtype_equal,
15
+ is_extension_array_dtype,
16
+ is_file_like,
17
+ is_float,
18
+ is_float_dtype,
19
+ is_hashable,
20
+ is_int64_dtype,
21
+ is_integer,
22
+ is_integer_dtype,
23
+ is_interval,
24
+ is_interval_dtype,
25
+ is_iterator,
26
+ is_list_like,
27
+ is_named_tuple,
28
+ is_number,
29
+ is_numeric_dtype,
30
+ is_object_dtype,
31
+ is_period_dtype,
32
+ is_re,
33
+ is_re_compilable,
34
+ is_scalar,
35
+ is_signed_integer_dtype,
36
+ is_sparse,
37
+ is_string_dtype,
38
+ is_timedelta64_dtype,
39
+ is_timedelta64_ns_dtype,
40
+ is_unsigned_integer_dtype,
41
+ pandas_dtype,
42
+ )
43
+
44
+ __all__ = [
45
+ "is_any_real_numeric_dtype",
46
+ "is_array_like",
47
+ "is_bool",
48
+ "is_bool_dtype",
49
+ "is_categorical_dtype",
50
+ "is_complex",
51
+ "is_complex_dtype",
52
+ "is_datetime64_any_dtype",
53
+ "is_datetime64_dtype",
54
+ "is_datetime64_ns_dtype",
55
+ "is_datetime64tz_dtype",
56
+ "is_dict_like",
57
+ "is_dtype_equal",
58
+ "is_extension_array_dtype",
59
+ "is_file_like",
60
+ "is_float",
61
+ "is_float_dtype",
62
+ "is_hashable",
63
+ "is_int64_dtype",
64
+ "is_integer",
65
+ "is_integer_dtype",
66
+ "is_interval",
67
+ "is_interval_dtype",
68
+ "is_iterator",
69
+ "is_list_like",
70
+ "is_named_tuple",
71
+ "is_number",
72
+ "is_numeric_dtype",
73
+ "is_object_dtype",
74
+ "is_period_dtype",
75
+ "is_re",
76
+ "is_re_compilable",
77
+ "is_scalar",
78
+ "is_signed_integer_dtype",
79
+ "is_sparse",
80
+ "is_string_dtype",
81
+ "is_timedelta64_dtype",
82
+ "is_timedelta64_ns_dtype",
83
+ "is_unsigned_integer_dtype",
84
+ "pandas_dtype",
85
+ ]
env-llmeval/lib/python3.10/site-packages/pandas/core/dtypes/astype.py ADDED
@@ -0,0 +1,301 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ Functions for implementing 'astype' methods according to pandas conventions,
3
+ particularly ones that differ from numpy.
4
+ """
5
+ from __future__ import annotations
6
+
7
+ import inspect
8
+ from typing import (
9
+ TYPE_CHECKING,
10
+ overload,
11
+ )
12
+ import warnings
13
+
14
+ import numpy as np
15
+
16
+ from pandas._libs import lib
17
+ from pandas._libs.tslibs.timedeltas import array_to_timedelta64
18
+ from pandas.errors import IntCastingNaNError
19
+
20
+ from pandas.core.dtypes.common import (
21
+ is_object_dtype,
22
+ is_string_dtype,
23
+ pandas_dtype,
24
+ )
25
+ from pandas.core.dtypes.dtypes import (
26
+ ExtensionDtype,
27
+ NumpyEADtype,
28
+ )
29
+
30
+ if TYPE_CHECKING:
31
+ from pandas._typing import (
32
+ ArrayLike,
33
+ DtypeObj,
34
+ IgnoreRaise,
35
+ )
36
+
37
+ from pandas.core.arrays import ExtensionArray
38
+
39
+ _dtype_obj = np.dtype(object)
40
+
41
+
42
+ @overload
43
+ def _astype_nansafe(
44
+ arr: np.ndarray, dtype: np.dtype, copy: bool = ..., skipna: bool = ...
45
+ ) -> np.ndarray:
46
+ ...
47
+
48
+
49
+ @overload
50
+ def _astype_nansafe(
51
+ arr: np.ndarray, dtype: ExtensionDtype, copy: bool = ..., skipna: bool = ...
52
+ ) -> ExtensionArray:
53
+ ...
54
+
55
+
56
+ def _astype_nansafe(
57
+ arr: np.ndarray, dtype: DtypeObj, copy: bool = True, skipna: bool = False
58
+ ) -> ArrayLike:
59
+ """
60
+ Cast the elements of an array to a given dtype a nan-safe manner.
61
+
62
+ Parameters
63
+ ----------
64
+ arr : ndarray
65
+ dtype : np.dtype or ExtensionDtype
66
+ copy : bool, default True
67
+ If False, a view will be attempted but may fail, if
68
+ e.g. the item sizes don't align.
69
+ skipna: bool, default False
70
+ Whether or not we should skip NaN when casting as a string-type.
71
+
72
+ Raises
73
+ ------
74
+ ValueError
75
+ The dtype was a datetime64/timedelta64 dtype, but it had no unit.
76
+ """
77
+
78
+ # dispatch on extension dtype if needed
79
+ if isinstance(dtype, ExtensionDtype):
80
+ return dtype.construct_array_type()._from_sequence(arr, dtype=dtype, copy=copy)
81
+
82
+ elif not isinstance(dtype, np.dtype): # pragma: no cover
83
+ raise ValueError("dtype must be np.dtype or ExtensionDtype")
84
+
85
+ if arr.dtype.kind in "mM":
86
+ from pandas.core.construction import ensure_wrapped_if_datetimelike
87
+
88
+ arr = ensure_wrapped_if_datetimelike(arr)
89
+ res = arr.astype(dtype, copy=copy)
90
+ return np.asarray(res)
91
+
92
+ if issubclass(dtype.type, str):
93
+ shape = arr.shape
94
+ if arr.ndim > 1:
95
+ arr = arr.ravel()
96
+ return lib.ensure_string_array(
97
+ arr, skipna=skipna, convert_na_value=False
98
+ ).reshape(shape)
99
+
100
+ elif np.issubdtype(arr.dtype, np.floating) and dtype.kind in "iu":
101
+ return _astype_float_to_int_nansafe(arr, dtype, copy)
102
+
103
+ elif arr.dtype == object:
104
+ # if we have a datetime/timedelta array of objects
105
+ # then coerce to datetime64[ns] and use DatetimeArray.astype
106
+
107
+ if lib.is_np_dtype(dtype, "M"):
108
+ from pandas.core.arrays import DatetimeArray
109
+
110
+ dta = DatetimeArray._from_sequence(arr, dtype=dtype)
111
+ return dta._ndarray
112
+
113
+ elif lib.is_np_dtype(dtype, "m"):
114
+ from pandas.core.construction import ensure_wrapped_if_datetimelike
115
+
116
+ # bc we know arr.dtype == object, this is equivalent to
117
+ # `np.asarray(to_timedelta(arr))`, but using a lower-level API that
118
+ # does not require a circular import.
119
+ tdvals = array_to_timedelta64(arr).view("m8[ns]")
120
+
121
+ tda = ensure_wrapped_if_datetimelike(tdvals)
122
+ return tda.astype(dtype, copy=False)._ndarray
123
+
124
+ if dtype.name in ("datetime64", "timedelta64"):
125
+ msg = (
126
+ f"The '{dtype.name}' dtype has no unit. Please pass in "
127
+ f"'{dtype.name}[ns]' instead."
128
+ )
129
+ raise ValueError(msg)
130
+
131
+ if copy or arr.dtype == object or dtype == object:
132
+ # Explicit copy, or required since NumPy can't view from / to object.
133
+ return arr.astype(dtype, copy=True)
134
+
135
+ return arr.astype(dtype, copy=copy)
136
+
137
+
138
+ def _astype_float_to_int_nansafe(
139
+ values: np.ndarray, dtype: np.dtype, copy: bool
140
+ ) -> np.ndarray:
141
+ """
142
+ astype with a check preventing converting NaN to an meaningless integer value.
143
+ """
144
+ if not np.isfinite(values).all():
145
+ raise IntCastingNaNError(
146
+ "Cannot convert non-finite values (NA or inf) to integer"
147
+ )
148
+ if dtype.kind == "u":
149
+ # GH#45151
150
+ if not (values >= 0).all():
151
+ raise ValueError(f"Cannot losslessly cast from {values.dtype} to {dtype}")
152
+ with warnings.catch_warnings():
153
+ warnings.filterwarnings("ignore", category=RuntimeWarning)
154
+ return values.astype(dtype, copy=copy)
155
+
156
+
157
+ def astype_array(values: ArrayLike, dtype: DtypeObj, copy: bool = False) -> ArrayLike:
158
+ """
159
+ Cast array (ndarray or ExtensionArray) to the new dtype.
160
+
161
+ Parameters
162
+ ----------
163
+ values : ndarray or ExtensionArray
164
+ dtype : dtype object
165
+ copy : bool, default False
166
+ copy if indicated
167
+
168
+ Returns
169
+ -------
170
+ ndarray or ExtensionArray
171
+ """
172
+ if values.dtype == dtype:
173
+ if copy:
174
+ return values.copy()
175
+ return values
176
+
177
+ if not isinstance(values, np.ndarray):
178
+ # i.e. ExtensionArray
179
+ values = values.astype(dtype, copy=copy)
180
+
181
+ else:
182
+ values = _astype_nansafe(values, dtype, copy=copy)
183
+
184
+ # in pandas we don't store numpy str dtypes, so convert to object
185
+ if isinstance(dtype, np.dtype) and issubclass(values.dtype.type, str):
186
+ values = np.array(values, dtype=object)
187
+
188
+ return values
189
+
190
+
191
+ def astype_array_safe(
192
+ values: ArrayLike, dtype, copy: bool = False, errors: IgnoreRaise = "raise"
193
+ ) -> ArrayLike:
194
+ """
195
+ Cast array (ndarray or ExtensionArray) to the new dtype.
196
+
197
+ This basically is the implementation for DataFrame/Series.astype and
198
+ includes all custom logic for pandas (NaN-safety, converting str to object,
199
+ not allowing )
200
+
201
+ Parameters
202
+ ----------
203
+ values : ndarray or ExtensionArray
204
+ dtype : str, dtype convertible
205
+ copy : bool, default False
206
+ copy if indicated
207
+ errors : str, {'raise', 'ignore'}, default 'raise'
208
+ - ``raise`` : allow exceptions to be raised
209
+ - ``ignore`` : suppress exceptions. On error return original object
210
+
211
+ Returns
212
+ -------
213
+ ndarray or ExtensionArray
214
+ """
215
+ errors_legal_values = ("raise", "ignore")
216
+
217
+ if errors not in errors_legal_values:
218
+ invalid_arg = (
219
+ "Expected value of kwarg 'errors' to be one of "
220
+ f"{list(errors_legal_values)}. Supplied value is '{errors}'"
221
+ )
222
+ raise ValueError(invalid_arg)
223
+
224
+ if inspect.isclass(dtype) and issubclass(dtype, ExtensionDtype):
225
+ msg = (
226
+ f"Expected an instance of {dtype.__name__}, "
227
+ "but got the class instead. Try instantiating 'dtype'."
228
+ )
229
+ raise TypeError(msg)
230
+
231
+ dtype = pandas_dtype(dtype)
232
+ if isinstance(dtype, NumpyEADtype):
233
+ # Ensure we don't end up with a NumpyExtensionArray
234
+ dtype = dtype.numpy_dtype
235
+
236
+ try:
237
+ new_values = astype_array(values, dtype, copy=copy)
238
+ except (ValueError, TypeError):
239
+ # e.g. _astype_nansafe can fail on object-dtype of strings
240
+ # trying to convert to float
241
+ if errors == "ignore":
242
+ new_values = values
243
+ else:
244
+ raise
245
+
246
+ return new_values
247
+
248
+
249
+ def astype_is_view(dtype: DtypeObj, new_dtype: DtypeObj) -> bool:
250
+ """Checks if astype avoided copying the data.
251
+
252
+ Parameters
253
+ ----------
254
+ dtype : Original dtype
255
+ new_dtype : target dtype
256
+
257
+ Returns
258
+ -------
259
+ True if new data is a view or not guaranteed to be a copy, False otherwise
260
+ """
261
+ if isinstance(dtype, np.dtype) and not isinstance(new_dtype, np.dtype):
262
+ new_dtype, dtype = dtype, new_dtype
263
+
264
+ if dtype == new_dtype:
265
+ return True
266
+
267
+ elif isinstance(dtype, np.dtype) and isinstance(new_dtype, np.dtype):
268
+ # Only equal numpy dtypes avoid a copy
269
+ return False
270
+
271
+ elif is_string_dtype(dtype) and is_string_dtype(new_dtype):
272
+ # Potentially! a view when converting from object to string
273
+ return True
274
+
275
+ elif is_object_dtype(dtype) and new_dtype.kind == "O":
276
+ # When the underlying array has dtype object, we don't have to make a copy
277
+ return True
278
+
279
+ elif dtype.kind in "mM" and new_dtype.kind in "mM":
280
+ dtype = getattr(dtype, "numpy_dtype", dtype)
281
+ new_dtype = getattr(new_dtype, "numpy_dtype", new_dtype)
282
+ return getattr(dtype, "unit", None) == getattr(new_dtype, "unit", None)
283
+
284
+ numpy_dtype = getattr(dtype, "numpy_dtype", None)
285
+ new_numpy_dtype = getattr(new_dtype, "numpy_dtype", None)
286
+
287
+ if numpy_dtype is None and isinstance(dtype, np.dtype):
288
+ numpy_dtype = dtype
289
+
290
+ if new_numpy_dtype is None and isinstance(new_dtype, np.dtype):
291
+ new_numpy_dtype = new_dtype
292
+
293
+ if numpy_dtype is not None and new_numpy_dtype is not None:
294
+ # if both have NumPy dtype or one of them is a numpy dtype
295
+ # they are only a view when the numpy dtypes are equal, e.g.
296
+ # int64 -> Int64 or int64[pyarrow]
297
+ # int64 -> Int32 copies
298
+ return numpy_dtype == new_numpy_dtype
299
+
300
+ # Assume this is a view since we don't know for sure if a copy was made
301
+ return True
env-llmeval/lib/python3.10/site-packages/pandas/core/dtypes/base.py ADDED
@@ -0,0 +1,583 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ Extend pandas with custom array types.
3
+ """
4
+ from __future__ import annotations
5
+
6
+ from typing import (
7
+ TYPE_CHECKING,
8
+ Any,
9
+ TypeVar,
10
+ cast,
11
+ overload,
12
+ )
13
+
14
+ import numpy as np
15
+
16
+ from pandas._libs import missing as libmissing
17
+ from pandas._libs.hashtable import object_hash
18
+ from pandas._libs.properties import cache_readonly
19
+ from pandas.errors import AbstractMethodError
20
+
21
+ from pandas.core.dtypes.generic import (
22
+ ABCDataFrame,
23
+ ABCIndex,
24
+ ABCSeries,
25
+ )
26
+
27
+ if TYPE_CHECKING:
28
+ from pandas._typing import (
29
+ DtypeObj,
30
+ Self,
31
+ Shape,
32
+ npt,
33
+ type_t,
34
+ )
35
+
36
+ from pandas import Index
37
+ from pandas.core.arrays import ExtensionArray
38
+
39
+ # To parameterize on same ExtensionDtype
40
+ ExtensionDtypeT = TypeVar("ExtensionDtypeT", bound="ExtensionDtype")
41
+
42
+
43
+ class ExtensionDtype:
44
+ """
45
+ A custom data type, to be paired with an ExtensionArray.
46
+
47
+ See Also
48
+ --------
49
+ extensions.register_extension_dtype: Register an ExtensionType
50
+ with pandas as class decorator.
51
+ extensions.ExtensionArray: Abstract base class for custom 1-D array types.
52
+
53
+ Notes
54
+ -----
55
+ The interface includes the following abstract methods that must
56
+ be implemented by subclasses:
57
+
58
+ * type
59
+ * name
60
+ * construct_array_type
61
+
62
+ The following attributes and methods influence the behavior of the dtype in
63
+ pandas operations
64
+
65
+ * _is_numeric
66
+ * _is_boolean
67
+ * _get_common_dtype
68
+
69
+ The `na_value` class attribute can be used to set the default NA value
70
+ for this type. :attr:`numpy.nan` is used by default.
71
+
72
+ ExtensionDtypes are required to be hashable. The base class provides
73
+ a default implementation, which relies on the ``_metadata`` class
74
+ attribute. ``_metadata`` should be a tuple containing the strings
75
+ that define your data type. For example, with ``PeriodDtype`` that's
76
+ the ``freq`` attribute.
77
+
78
+ **If you have a parametrized dtype you should set the ``_metadata``
79
+ class property**.
80
+
81
+ Ideally, the attributes in ``_metadata`` will match the
82
+ parameters to your ``ExtensionDtype.__init__`` (if any). If any of
83
+ the attributes in ``_metadata`` don't implement the standard
84
+ ``__eq__`` or ``__hash__``, the default implementations here will not
85
+ work.
86
+
87
+ Examples
88
+ --------
89
+
90
+ For interaction with Apache Arrow (pyarrow), a ``__from_arrow__`` method
91
+ can be implemented: this method receives a pyarrow Array or ChunkedArray
92
+ as only argument and is expected to return the appropriate pandas
93
+ ExtensionArray for this dtype and the passed values:
94
+
95
+ >>> import pyarrow
96
+ >>> from pandas.api.extensions import ExtensionArray
97
+ >>> class ExtensionDtype:
98
+ ... def __from_arrow__(
99
+ ... self,
100
+ ... array: pyarrow.Array | pyarrow.ChunkedArray
101
+ ... ) -> ExtensionArray:
102
+ ... ...
103
+
104
+ This class does not inherit from 'abc.ABCMeta' for performance reasons.
105
+ Methods and properties required by the interface raise
106
+ ``pandas.errors.AbstractMethodError`` and no ``register`` method is
107
+ provided for registering virtual subclasses.
108
+ """
109
+
110
+ _metadata: tuple[str, ...] = ()
111
+
112
+ def __str__(self) -> str:
113
+ return self.name
114
+
115
+ def __eq__(self, other: object) -> bool:
116
+ """
117
+ Check whether 'other' is equal to self.
118
+
119
+ By default, 'other' is considered equal if either
120
+
121
+ * it's a string matching 'self.name'.
122
+ * it's an instance of this type and all of the attributes
123
+ in ``self._metadata`` are equal between `self` and `other`.
124
+
125
+ Parameters
126
+ ----------
127
+ other : Any
128
+
129
+ Returns
130
+ -------
131
+ bool
132
+ """
133
+ if isinstance(other, str):
134
+ try:
135
+ other = self.construct_from_string(other)
136
+ except TypeError:
137
+ return False
138
+ if isinstance(other, type(self)):
139
+ return all(
140
+ getattr(self, attr) == getattr(other, attr) for attr in self._metadata
141
+ )
142
+ return False
143
+
144
+ def __hash__(self) -> int:
145
+ # for python>=3.10, different nan objects have different hashes
146
+ # we need to avoid that and thus use hash function with old behavior
147
+ return object_hash(tuple(getattr(self, attr) for attr in self._metadata))
148
+
149
+ def __ne__(self, other: object) -> bool:
150
+ return not self.__eq__(other)
151
+
152
+ @property
153
+ def na_value(self) -> object:
154
+ """
155
+ Default NA value to use for this type.
156
+
157
+ This is used in e.g. ExtensionArray.take. This should be the
158
+ user-facing "boxed" version of the NA value, not the physical NA value
159
+ for storage. e.g. for JSONArray, this is an empty dictionary.
160
+ """
161
+ return np.nan
162
+
163
+ @property
164
+ def type(self) -> type_t[Any]:
165
+ """
166
+ The scalar type for the array, e.g. ``int``
167
+
168
+ It's expected ``ExtensionArray[item]`` returns an instance
169
+ of ``ExtensionDtype.type`` for scalar ``item``, assuming
170
+ that value is valid (not NA). NA values do not need to be
171
+ instances of `type`.
172
+ """
173
+ raise AbstractMethodError(self)
174
+
175
+ @property
176
+ def kind(self) -> str:
177
+ """
178
+ A character code (one of 'biufcmMOSUV'), default 'O'
179
+
180
+ This should match the NumPy dtype used when the array is
181
+ converted to an ndarray, which is probably 'O' for object if
182
+ the extension type cannot be represented as a built-in NumPy
183
+ type.
184
+
185
+ See Also
186
+ --------
187
+ numpy.dtype.kind
188
+ """
189
+ return "O"
190
+
191
+ @property
192
+ def name(self) -> str:
193
+ """
194
+ A string identifying the data type.
195
+
196
+ Will be used for display in, e.g. ``Series.dtype``
197
+ """
198
+ raise AbstractMethodError(self)
199
+
200
+ @property
201
+ def names(self) -> list[str] | None:
202
+ """
203
+ Ordered list of field names, or None if there are no fields.
204
+
205
+ This is for compatibility with NumPy arrays, and may be removed in the
206
+ future.
207
+ """
208
+ return None
209
+
210
+ @classmethod
211
+ def construct_array_type(cls) -> type_t[ExtensionArray]:
212
+ """
213
+ Return the array type associated with this dtype.
214
+
215
+ Returns
216
+ -------
217
+ type
218
+ """
219
+ raise AbstractMethodError(cls)
220
+
221
+ def empty(self, shape: Shape) -> ExtensionArray:
222
+ """
223
+ Construct an ExtensionArray of this dtype with the given shape.
224
+
225
+ Analogous to numpy.empty.
226
+
227
+ Parameters
228
+ ----------
229
+ shape : int or tuple[int]
230
+
231
+ Returns
232
+ -------
233
+ ExtensionArray
234
+ """
235
+ cls = self.construct_array_type()
236
+ return cls._empty(shape, dtype=self)
237
+
238
+ @classmethod
239
+ def construct_from_string(cls, string: str) -> Self:
240
+ r"""
241
+ Construct this type from a string.
242
+
243
+ This is useful mainly for data types that accept parameters.
244
+ For example, a period dtype accepts a frequency parameter that
245
+ can be set as ``period[h]`` (where H means hourly frequency).
246
+
247
+ By default, in the abstract class, just the name of the type is
248
+ expected. But subclasses can overwrite this method to accept
249
+ parameters.
250
+
251
+ Parameters
252
+ ----------
253
+ string : str
254
+ The name of the type, for example ``category``.
255
+
256
+ Returns
257
+ -------
258
+ ExtensionDtype
259
+ Instance of the dtype.
260
+
261
+ Raises
262
+ ------
263
+ TypeError
264
+ If a class cannot be constructed from this 'string'.
265
+
266
+ Examples
267
+ --------
268
+ For extension dtypes with arguments the following may be an
269
+ adequate implementation.
270
+
271
+ >>> import re
272
+ >>> @classmethod
273
+ ... def construct_from_string(cls, string):
274
+ ... pattern = re.compile(r"^my_type\[(?P<arg_name>.+)\]$")
275
+ ... match = pattern.match(string)
276
+ ... if match:
277
+ ... return cls(**match.groupdict())
278
+ ... else:
279
+ ... raise TypeError(
280
+ ... f"Cannot construct a '{cls.__name__}' from '{string}'"
281
+ ... )
282
+ """
283
+ if not isinstance(string, str):
284
+ raise TypeError(
285
+ f"'construct_from_string' expects a string, got {type(string)}"
286
+ )
287
+ # error: Non-overlapping equality check (left operand type: "str", right
288
+ # operand type: "Callable[[ExtensionDtype], str]") [comparison-overlap]
289
+ assert isinstance(cls.name, str), (cls, type(cls.name))
290
+ if string != cls.name:
291
+ raise TypeError(f"Cannot construct a '{cls.__name__}' from '{string}'")
292
+ return cls()
293
+
294
+ @classmethod
295
+ def is_dtype(cls, dtype: object) -> bool:
296
+ """
297
+ Check if we match 'dtype'.
298
+
299
+ Parameters
300
+ ----------
301
+ dtype : object
302
+ The object to check.
303
+
304
+ Returns
305
+ -------
306
+ bool
307
+
308
+ Notes
309
+ -----
310
+ The default implementation is True if
311
+
312
+ 1. ``cls.construct_from_string(dtype)`` is an instance
313
+ of ``cls``.
314
+ 2. ``dtype`` is an object and is an instance of ``cls``
315
+ 3. ``dtype`` has a ``dtype`` attribute, and any of the above
316
+ conditions is true for ``dtype.dtype``.
317
+ """
318
+ dtype = getattr(dtype, "dtype", dtype)
319
+
320
+ if isinstance(dtype, (ABCSeries, ABCIndex, ABCDataFrame, np.dtype)):
321
+ # https://github.com/pandas-dev/pandas/issues/22960
322
+ # avoid passing data to `construct_from_string`. This could
323
+ # cause a FutureWarning from numpy about failing elementwise
324
+ # comparison from, e.g., comparing DataFrame == 'category'.
325
+ return False
326
+ elif dtype is None:
327
+ return False
328
+ elif isinstance(dtype, cls):
329
+ return True
330
+ if isinstance(dtype, str):
331
+ try:
332
+ return cls.construct_from_string(dtype) is not None
333
+ except TypeError:
334
+ return False
335
+ return False
336
+
337
+ @property
338
+ def _is_numeric(self) -> bool:
339
+ """
340
+ Whether columns with this dtype should be considered numeric.
341
+
342
+ By default ExtensionDtypes are assumed to be non-numeric.
343
+ They'll be excluded from operations that exclude non-numeric
344
+ columns, like (groupby) reductions, plotting, etc.
345
+ """
346
+ return False
347
+
348
+ @property
349
+ def _is_boolean(self) -> bool:
350
+ """
351
+ Whether this dtype should be considered boolean.
352
+
353
+ By default, ExtensionDtypes are assumed to be non-numeric.
354
+ Setting this to True will affect the behavior of several places,
355
+ e.g.
356
+
357
+ * is_bool
358
+ * boolean indexing
359
+
360
+ Returns
361
+ -------
362
+ bool
363
+ """
364
+ return False
365
+
366
+ def _get_common_dtype(self, dtypes: list[DtypeObj]) -> DtypeObj | None:
367
+ """
368
+ Return the common dtype, if one exists.
369
+
370
+ Used in `find_common_type` implementation. This is for example used
371
+ to determine the resulting dtype in a concat operation.
372
+
373
+ If no common dtype exists, return None (which gives the other dtypes
374
+ the chance to determine a common dtype). If all dtypes in the list
375
+ return None, then the common dtype will be "object" dtype (this means
376
+ it is never needed to return "object" dtype from this method itself).
377
+
378
+ Parameters
379
+ ----------
380
+ dtypes : list of dtypes
381
+ The dtypes for which to determine a common dtype. This is a list
382
+ of np.dtype or ExtensionDtype instances.
383
+
384
+ Returns
385
+ -------
386
+ Common dtype (np.dtype or ExtensionDtype) or None
387
+ """
388
+ if len(set(dtypes)) == 1:
389
+ # only itself
390
+ return self
391
+ else:
392
+ return None
393
+
394
+ @property
395
+ def _can_hold_na(self) -> bool:
396
+ """
397
+ Can arrays of this dtype hold NA values?
398
+ """
399
+ return True
400
+
401
+ @property
402
+ def _is_immutable(self) -> bool:
403
+ """
404
+ Can arrays with this dtype be modified with __setitem__? If not, return
405
+ True.
406
+
407
+ Immutable arrays are expected to raise TypeError on __setitem__ calls.
408
+ """
409
+ return False
410
+
411
+ @cache_readonly
412
+ def index_class(self) -> type_t[Index]:
413
+ """
414
+ The Index subclass to return from Index.__new__ when this dtype is
415
+ encountered.
416
+ """
417
+ from pandas import Index
418
+
419
+ return Index
420
+
421
+ @property
422
+ def _supports_2d(self) -> bool:
423
+ """
424
+ Do ExtensionArrays with this dtype support 2D arrays?
425
+
426
+ Historically ExtensionArrays were limited to 1D. By returning True here,
427
+ authors can indicate that their arrays support 2D instances. This can
428
+ improve performance in some cases, particularly operations with `axis=1`.
429
+
430
+ Arrays that support 2D values should:
431
+
432
+ - implement Array.reshape
433
+ - subclass the Dim2CompatTests in tests.extension.base
434
+ - _concat_same_type should support `axis` keyword
435
+ - _reduce and reductions should support `axis` keyword
436
+ """
437
+ return False
438
+
439
+ @property
440
+ def _can_fast_transpose(self) -> bool:
441
+ """
442
+ Is transposing an array with this dtype zero-copy?
443
+
444
+ Only relevant for cases where _supports_2d is True.
445
+ """
446
+ return False
447
+
448
+
449
+ class StorageExtensionDtype(ExtensionDtype):
450
+ """ExtensionDtype that may be backed by more than one implementation."""
451
+
452
+ name: str
453
+ _metadata = ("storage",)
454
+
455
+ def __init__(self, storage: str | None = None) -> None:
456
+ self.storage = storage
457
+
458
+ def __repr__(self) -> str:
459
+ return f"{self.name}[{self.storage}]"
460
+
461
+ def __str__(self) -> str:
462
+ return self.name
463
+
464
+ def __eq__(self, other: object) -> bool:
465
+ if isinstance(other, str) and other == self.name:
466
+ return True
467
+ return super().__eq__(other)
468
+
469
+ def __hash__(self) -> int:
470
+ # custom __eq__ so have to override __hash__
471
+ return super().__hash__()
472
+
473
+ @property
474
+ def na_value(self) -> libmissing.NAType:
475
+ return libmissing.NA
476
+
477
+
478
+ def register_extension_dtype(cls: type_t[ExtensionDtypeT]) -> type_t[ExtensionDtypeT]:
479
+ """
480
+ Register an ExtensionType with pandas as class decorator.
481
+
482
+ This enables operations like ``.astype(name)`` for the name
483
+ of the ExtensionDtype.
484
+
485
+ Returns
486
+ -------
487
+ callable
488
+ A class decorator.
489
+
490
+ Examples
491
+ --------
492
+ >>> from pandas.api.extensions import register_extension_dtype, ExtensionDtype
493
+ >>> @register_extension_dtype
494
+ ... class MyExtensionDtype(ExtensionDtype):
495
+ ... name = "myextension"
496
+ """
497
+ _registry.register(cls)
498
+ return cls
499
+
500
+
501
+ class Registry:
502
+ """
503
+ Registry for dtype inference.
504
+
505
+ The registry allows one to map a string repr of a extension
506
+ dtype to an extension dtype. The string alias can be used in several
507
+ places, including
508
+
509
+ * Series and Index constructors
510
+ * :meth:`pandas.array`
511
+ * :meth:`pandas.Series.astype`
512
+
513
+ Multiple extension types can be registered.
514
+ These are tried in order.
515
+ """
516
+
517
+ def __init__(self) -> None:
518
+ self.dtypes: list[type_t[ExtensionDtype]] = []
519
+
520
+ def register(self, dtype: type_t[ExtensionDtype]) -> None:
521
+ """
522
+ Parameters
523
+ ----------
524
+ dtype : ExtensionDtype class
525
+ """
526
+ if not issubclass(dtype, ExtensionDtype):
527
+ raise ValueError("can only register pandas extension dtypes")
528
+
529
+ self.dtypes.append(dtype)
530
+
531
+ @overload
532
+ def find(self, dtype: type_t[ExtensionDtypeT]) -> type_t[ExtensionDtypeT]:
533
+ ...
534
+
535
+ @overload
536
+ def find(self, dtype: ExtensionDtypeT) -> ExtensionDtypeT:
537
+ ...
538
+
539
+ @overload
540
+ def find(self, dtype: str) -> ExtensionDtype | None:
541
+ ...
542
+
543
+ @overload
544
+ def find(
545
+ self, dtype: npt.DTypeLike
546
+ ) -> type_t[ExtensionDtype] | ExtensionDtype | None:
547
+ ...
548
+
549
+ def find(
550
+ self, dtype: type_t[ExtensionDtype] | ExtensionDtype | npt.DTypeLike
551
+ ) -> type_t[ExtensionDtype] | ExtensionDtype | None:
552
+ """
553
+ Parameters
554
+ ----------
555
+ dtype : ExtensionDtype class or instance or str or numpy dtype or python type
556
+
557
+ Returns
558
+ -------
559
+ return the first matching dtype, otherwise return None
560
+ """
561
+ if not isinstance(dtype, str):
562
+ dtype_type: type_t
563
+ if not isinstance(dtype, type):
564
+ dtype_type = type(dtype)
565
+ else:
566
+ dtype_type = dtype
567
+ if issubclass(dtype_type, ExtensionDtype):
568
+ # cast needed here as mypy doesn't know we have figured
569
+ # out it is an ExtensionDtype or type_t[ExtensionDtype]
570
+ return cast("ExtensionDtype | type_t[ExtensionDtype]", dtype)
571
+
572
+ return None
573
+
574
+ for dtype_type in self.dtypes:
575
+ try:
576
+ return dtype_type.construct_from_string(dtype)
577
+ except TypeError:
578
+ pass
579
+
580
+ return None
581
+
582
+
583
+ _registry = Registry()
env-llmeval/lib/python3.10/site-packages/pandas/core/dtypes/cast.py ADDED
@@ -0,0 +1,1973 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ Routines for casting.
3
+ """
4
+
5
+ from __future__ import annotations
6
+
7
+ import datetime as dt
8
+ import functools
9
+ from typing import (
10
+ TYPE_CHECKING,
11
+ Any,
12
+ Literal,
13
+ TypeVar,
14
+ cast,
15
+ overload,
16
+ )
17
+ import warnings
18
+
19
+ import numpy as np
20
+
21
+ from pandas._config import using_pyarrow_string_dtype
22
+
23
+ from pandas._libs import (
24
+ Interval,
25
+ Period,
26
+ lib,
27
+ )
28
+ from pandas._libs.missing import (
29
+ NA,
30
+ NAType,
31
+ checknull,
32
+ )
33
+ from pandas._libs.tslibs import (
34
+ NaT,
35
+ OutOfBoundsDatetime,
36
+ OutOfBoundsTimedelta,
37
+ Timedelta,
38
+ Timestamp,
39
+ is_supported_dtype,
40
+ )
41
+ from pandas._libs.tslibs.timedeltas import array_to_timedelta64
42
+ from pandas.compat.numpy import np_version_gt2
43
+ from pandas.errors import (
44
+ IntCastingNaNError,
45
+ LossySetitemError,
46
+ )
47
+
48
+ from pandas.core.dtypes.common import (
49
+ ensure_int8,
50
+ ensure_int16,
51
+ ensure_int32,
52
+ ensure_int64,
53
+ ensure_object,
54
+ ensure_str,
55
+ is_bool,
56
+ is_complex,
57
+ is_float,
58
+ is_integer,
59
+ is_object_dtype,
60
+ is_scalar,
61
+ is_string_dtype,
62
+ pandas_dtype as pandas_dtype_func,
63
+ )
64
+ from pandas.core.dtypes.dtypes import (
65
+ ArrowDtype,
66
+ BaseMaskedDtype,
67
+ CategoricalDtype,
68
+ DatetimeTZDtype,
69
+ ExtensionDtype,
70
+ IntervalDtype,
71
+ PandasExtensionDtype,
72
+ PeriodDtype,
73
+ )
74
+ from pandas.core.dtypes.generic import (
75
+ ABCExtensionArray,
76
+ ABCIndex,
77
+ ABCSeries,
78
+ )
79
+ from pandas.core.dtypes.inference import is_list_like
80
+ from pandas.core.dtypes.missing import (
81
+ is_valid_na_for_dtype,
82
+ isna,
83
+ na_value_for_dtype,
84
+ notna,
85
+ )
86
+
87
+ from pandas.io._util import _arrow_dtype_mapping
88
+
89
+ if TYPE_CHECKING:
90
+ from collections.abc import (
91
+ Sequence,
92
+ Sized,
93
+ )
94
+
95
+ from pandas._typing import (
96
+ ArrayLike,
97
+ Dtype,
98
+ DtypeObj,
99
+ NumpyIndexT,
100
+ Scalar,
101
+ npt,
102
+ )
103
+
104
+ from pandas import Index
105
+ from pandas.core.arrays import (
106
+ Categorical,
107
+ DatetimeArray,
108
+ ExtensionArray,
109
+ IntervalArray,
110
+ PeriodArray,
111
+ TimedeltaArray,
112
+ )
113
+
114
+
115
+ _int8_max = np.iinfo(np.int8).max
116
+ _int16_max = np.iinfo(np.int16).max
117
+ _int32_max = np.iinfo(np.int32).max
118
+
119
+ _dtype_obj = np.dtype(object)
120
+
121
+ NumpyArrayT = TypeVar("NumpyArrayT", bound=np.ndarray)
122
+
123
+
124
+ def maybe_convert_platform(
125
+ values: list | tuple | range | np.ndarray | ExtensionArray,
126
+ ) -> ArrayLike:
127
+ """try to do platform conversion, allow ndarray or list here"""
128
+ arr: ArrayLike
129
+
130
+ if isinstance(values, (list, tuple, range)):
131
+ arr = construct_1d_object_array_from_listlike(values)
132
+ else:
133
+ # The caller is responsible for ensuring that we have np.ndarray
134
+ # or ExtensionArray here.
135
+ arr = values
136
+
137
+ if arr.dtype == _dtype_obj:
138
+ arr = cast(np.ndarray, arr)
139
+ arr = lib.maybe_convert_objects(arr)
140
+
141
+ return arr
142
+
143
+
144
+ def is_nested_object(obj) -> bool:
145
+ """
146
+ return a boolean if we have a nested object, e.g. a Series with 1 or
147
+ more Series elements
148
+
149
+ This may not be necessarily be performant.
150
+
151
+ """
152
+ return bool(
153
+ isinstance(obj, ABCSeries)
154
+ and is_object_dtype(obj.dtype)
155
+ and any(isinstance(v, ABCSeries) for v in obj._values)
156
+ )
157
+
158
+
159
+ def maybe_box_datetimelike(value: Scalar, dtype: Dtype | None = None) -> Scalar:
160
+ """
161
+ Cast scalar to Timestamp or Timedelta if scalar is datetime-like
162
+ and dtype is not object.
163
+
164
+ Parameters
165
+ ----------
166
+ value : scalar
167
+ dtype : Dtype, optional
168
+
169
+ Returns
170
+ -------
171
+ scalar
172
+ """
173
+ if dtype == _dtype_obj:
174
+ pass
175
+ elif isinstance(value, (np.datetime64, dt.datetime)):
176
+ value = Timestamp(value)
177
+ elif isinstance(value, (np.timedelta64, dt.timedelta)):
178
+ value = Timedelta(value)
179
+
180
+ return value
181
+
182
+
183
+ def maybe_box_native(value: Scalar | None | NAType) -> Scalar | None | NAType:
184
+ """
185
+ If passed a scalar cast the scalar to a python native type.
186
+
187
+ Parameters
188
+ ----------
189
+ value : scalar or Series
190
+
191
+ Returns
192
+ -------
193
+ scalar or Series
194
+ """
195
+ if is_float(value):
196
+ value = float(value)
197
+ elif is_integer(value):
198
+ value = int(value)
199
+ elif is_bool(value):
200
+ value = bool(value)
201
+ elif isinstance(value, (np.datetime64, np.timedelta64)):
202
+ value = maybe_box_datetimelike(value)
203
+ elif value is NA:
204
+ value = None
205
+ return value
206
+
207
+
208
+ def _maybe_unbox_datetimelike(value: Scalar, dtype: DtypeObj) -> Scalar:
209
+ """
210
+ Convert a Timedelta or Timestamp to timedelta64 or datetime64 for setting
211
+ into a numpy array. Failing to unbox would risk dropping nanoseconds.
212
+
213
+ Notes
214
+ -----
215
+ Caller is responsible for checking dtype.kind in "mM"
216
+ """
217
+ if is_valid_na_for_dtype(value, dtype):
218
+ # GH#36541: can't fill array directly with pd.NaT
219
+ # > np.empty(10, dtype="datetime64[ns]").fill(pd.NaT)
220
+ # ValueError: cannot convert float NaN to integer
221
+ value = dtype.type("NaT", "ns")
222
+ elif isinstance(value, Timestamp):
223
+ if value.tz is None:
224
+ value = value.to_datetime64()
225
+ elif not isinstance(dtype, DatetimeTZDtype):
226
+ raise TypeError("Cannot unbox tzaware Timestamp to tznaive dtype")
227
+ elif isinstance(value, Timedelta):
228
+ value = value.to_timedelta64()
229
+
230
+ _disallow_mismatched_datetimelike(value, dtype)
231
+ return value
232
+
233
+
234
+ def _disallow_mismatched_datetimelike(value, dtype: DtypeObj):
235
+ """
236
+ numpy allows np.array(dt64values, dtype="timedelta64[ns]") and
237
+ vice-versa, but we do not want to allow this, so we need to
238
+ check explicitly
239
+ """
240
+ vdtype = getattr(value, "dtype", None)
241
+ if vdtype is None:
242
+ return
243
+ elif (vdtype.kind == "m" and dtype.kind == "M") or (
244
+ vdtype.kind == "M" and dtype.kind == "m"
245
+ ):
246
+ raise TypeError(f"Cannot cast {repr(value)} to {dtype}")
247
+
248
+
249
+ @overload
250
+ def maybe_downcast_to_dtype(result: np.ndarray, dtype: str | np.dtype) -> np.ndarray:
251
+ ...
252
+
253
+
254
+ @overload
255
+ def maybe_downcast_to_dtype(result: ExtensionArray, dtype: str | np.dtype) -> ArrayLike:
256
+ ...
257
+
258
+
259
+ def maybe_downcast_to_dtype(result: ArrayLike, dtype: str | np.dtype) -> ArrayLike:
260
+ """
261
+ try to cast to the specified dtype (e.g. convert back to bool/int
262
+ or could be an astype of float64->float32
263
+ """
264
+ if isinstance(result, ABCSeries):
265
+ result = result._values
266
+ do_round = False
267
+
268
+ if isinstance(dtype, str):
269
+ if dtype == "infer":
270
+ inferred_type = lib.infer_dtype(result, skipna=False)
271
+ if inferred_type == "boolean":
272
+ dtype = "bool"
273
+ elif inferred_type == "integer":
274
+ dtype = "int64"
275
+ elif inferred_type == "datetime64":
276
+ dtype = "datetime64[ns]"
277
+ elif inferred_type in ["timedelta", "timedelta64"]:
278
+ dtype = "timedelta64[ns]"
279
+
280
+ # try to upcast here
281
+ elif inferred_type == "floating":
282
+ dtype = "int64"
283
+ if issubclass(result.dtype.type, np.number):
284
+ do_round = True
285
+
286
+ else:
287
+ # TODO: complex? what if result is already non-object?
288
+ dtype = "object"
289
+
290
+ dtype = np.dtype(dtype)
291
+
292
+ if not isinstance(dtype, np.dtype):
293
+ # enforce our signature annotation
294
+ raise TypeError(dtype) # pragma: no cover
295
+
296
+ converted = maybe_downcast_numeric(result, dtype, do_round)
297
+ if converted is not result:
298
+ return converted
299
+
300
+ # a datetimelike
301
+ # GH12821, iNaT is cast to float
302
+ if dtype.kind in "mM" and result.dtype.kind in "if":
303
+ result = result.astype(dtype)
304
+
305
+ elif dtype.kind == "m" and result.dtype == _dtype_obj:
306
+ # test_where_downcast_to_td64
307
+ result = cast(np.ndarray, result)
308
+ result = array_to_timedelta64(result)
309
+
310
+ elif dtype == np.dtype("M8[ns]") and result.dtype == _dtype_obj:
311
+ result = cast(np.ndarray, result)
312
+ return np.asarray(maybe_cast_to_datetime(result, dtype=dtype))
313
+
314
+ return result
315
+
316
+
317
+ @overload
318
+ def maybe_downcast_numeric(
319
+ result: np.ndarray, dtype: np.dtype, do_round: bool = False
320
+ ) -> np.ndarray:
321
+ ...
322
+
323
+
324
+ @overload
325
+ def maybe_downcast_numeric(
326
+ result: ExtensionArray, dtype: DtypeObj, do_round: bool = False
327
+ ) -> ArrayLike:
328
+ ...
329
+
330
+
331
+ def maybe_downcast_numeric(
332
+ result: ArrayLike, dtype: DtypeObj, do_round: bool = False
333
+ ) -> ArrayLike:
334
+ """
335
+ Subset of maybe_downcast_to_dtype restricted to numeric dtypes.
336
+
337
+ Parameters
338
+ ----------
339
+ result : ndarray or ExtensionArray
340
+ dtype : np.dtype or ExtensionDtype
341
+ do_round : bool
342
+
343
+ Returns
344
+ -------
345
+ ndarray or ExtensionArray
346
+ """
347
+ if not isinstance(dtype, np.dtype) or not isinstance(result.dtype, np.dtype):
348
+ # e.g. SparseDtype has no itemsize attr
349
+ return result
350
+
351
+ def trans(x):
352
+ if do_round:
353
+ return x.round()
354
+ return x
355
+
356
+ if dtype.kind == result.dtype.kind:
357
+ # don't allow upcasts here (except if empty)
358
+ if result.dtype.itemsize <= dtype.itemsize and result.size:
359
+ return result
360
+
361
+ if dtype.kind in "biu":
362
+ if not result.size:
363
+ # if we don't have any elements, just astype it
364
+ return trans(result).astype(dtype)
365
+
366
+ if isinstance(result, np.ndarray):
367
+ element = result.item(0)
368
+ else:
369
+ element = result.iloc[0]
370
+ if not isinstance(element, (np.integer, np.floating, int, float, bool)):
371
+ # a comparable, e.g. a Decimal may slip in here
372
+ return result
373
+
374
+ if (
375
+ issubclass(result.dtype.type, (np.object_, np.number))
376
+ and notna(result).all()
377
+ ):
378
+ new_result = trans(result).astype(dtype)
379
+ if new_result.dtype.kind == "O" or result.dtype.kind == "O":
380
+ # np.allclose may raise TypeError on object-dtype
381
+ if (new_result == result).all():
382
+ return new_result
383
+ else:
384
+ if np.allclose(new_result, result, rtol=0):
385
+ return new_result
386
+
387
+ elif (
388
+ issubclass(dtype.type, np.floating)
389
+ and result.dtype.kind != "b"
390
+ and not is_string_dtype(result.dtype)
391
+ ):
392
+ with warnings.catch_warnings():
393
+ warnings.filterwarnings(
394
+ "ignore", "overflow encountered in cast", RuntimeWarning
395
+ )
396
+ new_result = result.astype(dtype)
397
+
398
+ # Adjust tolerances based on floating point size
399
+ size_tols = {4: 5e-4, 8: 5e-8, 16: 5e-16}
400
+
401
+ atol = size_tols.get(new_result.dtype.itemsize, 0.0)
402
+
403
+ # Check downcast float values are still equal within 7 digits when
404
+ # converting from float64 to float32
405
+ if np.allclose(new_result, result, equal_nan=True, rtol=0.0, atol=atol):
406
+ return new_result
407
+
408
+ elif dtype.kind == result.dtype.kind == "c":
409
+ new_result = result.astype(dtype)
410
+
411
+ if np.array_equal(new_result, result, equal_nan=True):
412
+ # TODO: use tolerance like we do for float?
413
+ return new_result
414
+
415
+ return result
416
+
417
+
418
+ def maybe_upcast_numeric_to_64bit(arr: NumpyIndexT) -> NumpyIndexT:
419
+ """
420
+ If array is a int/uint/float bit size lower than 64 bit, upcast it to 64 bit.
421
+
422
+ Parameters
423
+ ----------
424
+ arr : ndarray or ExtensionArray
425
+
426
+ Returns
427
+ -------
428
+ ndarray or ExtensionArray
429
+ """
430
+ dtype = arr.dtype
431
+ if dtype.kind == "i" and dtype != np.int64:
432
+ return arr.astype(np.int64)
433
+ elif dtype.kind == "u" and dtype != np.uint64:
434
+ return arr.astype(np.uint64)
435
+ elif dtype.kind == "f" and dtype != np.float64:
436
+ return arr.astype(np.float64)
437
+ else:
438
+ return arr
439
+
440
+
441
+ def maybe_cast_pointwise_result(
442
+ result: ArrayLike,
443
+ dtype: DtypeObj,
444
+ numeric_only: bool = False,
445
+ same_dtype: bool = True,
446
+ ) -> ArrayLike:
447
+ """
448
+ Try casting result of a pointwise operation back to the original dtype if
449
+ appropriate.
450
+
451
+ Parameters
452
+ ----------
453
+ result : array-like
454
+ Result to cast.
455
+ dtype : np.dtype or ExtensionDtype
456
+ Input Series from which result was calculated.
457
+ numeric_only : bool, default False
458
+ Whether to cast only numerics or datetimes as well.
459
+ same_dtype : bool, default True
460
+ Specify dtype when calling _from_sequence
461
+
462
+ Returns
463
+ -------
464
+ result : array-like
465
+ result maybe casted to the dtype.
466
+ """
467
+
468
+ if isinstance(dtype, ExtensionDtype):
469
+ cls = dtype.construct_array_type()
470
+ if same_dtype:
471
+ result = _maybe_cast_to_extension_array(cls, result, dtype=dtype)
472
+ else:
473
+ result = _maybe_cast_to_extension_array(cls, result)
474
+
475
+ elif (numeric_only and dtype.kind in "iufcb") or not numeric_only:
476
+ result = maybe_downcast_to_dtype(result, dtype)
477
+
478
+ return result
479
+
480
+
481
+ def _maybe_cast_to_extension_array(
482
+ cls: type[ExtensionArray], obj: ArrayLike, dtype: ExtensionDtype | None = None
483
+ ) -> ArrayLike:
484
+ """
485
+ Call to `_from_sequence` that returns the object unchanged on Exception.
486
+
487
+ Parameters
488
+ ----------
489
+ cls : class, subclass of ExtensionArray
490
+ obj : arraylike
491
+ Values to pass to cls._from_sequence
492
+ dtype : ExtensionDtype, optional
493
+
494
+ Returns
495
+ -------
496
+ ExtensionArray or obj
497
+ """
498
+ result: ArrayLike
499
+
500
+ if dtype is not None:
501
+ try:
502
+ result = cls._from_scalars(obj, dtype=dtype)
503
+ except (TypeError, ValueError):
504
+ return obj
505
+ return result
506
+
507
+ try:
508
+ result = cls._from_sequence(obj, dtype=dtype)
509
+ except Exception:
510
+ # We can't predict what downstream EA constructors may raise
511
+ result = obj
512
+ return result
513
+
514
+
515
+ @overload
516
+ def ensure_dtype_can_hold_na(dtype: np.dtype) -> np.dtype:
517
+ ...
518
+
519
+
520
+ @overload
521
+ def ensure_dtype_can_hold_na(dtype: ExtensionDtype) -> ExtensionDtype:
522
+ ...
523
+
524
+
525
+ def ensure_dtype_can_hold_na(dtype: DtypeObj) -> DtypeObj:
526
+ """
527
+ If we have a dtype that cannot hold NA values, find the best match that can.
528
+ """
529
+ if isinstance(dtype, ExtensionDtype):
530
+ if dtype._can_hold_na:
531
+ return dtype
532
+ elif isinstance(dtype, IntervalDtype):
533
+ # TODO(GH#45349): don't special-case IntervalDtype, allow
534
+ # overriding instead of returning object below.
535
+ return IntervalDtype(np.float64, closed=dtype.closed)
536
+ return _dtype_obj
537
+ elif dtype.kind == "b":
538
+ return _dtype_obj
539
+ elif dtype.kind in "iu":
540
+ return np.dtype(np.float64)
541
+ return dtype
542
+
543
+
544
+ _canonical_nans = {
545
+ np.datetime64: np.datetime64("NaT", "ns"),
546
+ np.timedelta64: np.timedelta64("NaT", "ns"),
547
+ type(np.nan): np.nan,
548
+ }
549
+
550
+
551
+ def maybe_promote(dtype: np.dtype, fill_value=np.nan):
552
+ """
553
+ Find the minimal dtype that can hold both the given dtype and fill_value.
554
+
555
+ Parameters
556
+ ----------
557
+ dtype : np.dtype
558
+ fill_value : scalar, default np.nan
559
+
560
+ Returns
561
+ -------
562
+ dtype
563
+ Upcasted from dtype argument if necessary.
564
+ fill_value
565
+ Upcasted from fill_value argument if necessary.
566
+
567
+ Raises
568
+ ------
569
+ ValueError
570
+ If fill_value is a non-scalar and dtype is not object.
571
+ """
572
+ orig = fill_value
573
+ orig_is_nat = False
574
+ if checknull(fill_value):
575
+ # https://github.com/pandas-dev/pandas/pull/39692#issuecomment-1441051740
576
+ # avoid cache misses with NaN/NaT values that are not singletons
577
+ if fill_value is not NA:
578
+ try:
579
+ orig_is_nat = np.isnat(fill_value)
580
+ except TypeError:
581
+ pass
582
+
583
+ fill_value = _canonical_nans.get(type(fill_value), fill_value)
584
+
585
+ # for performance, we are using a cached version of the actual implementation
586
+ # of the function in _maybe_promote. However, this doesn't always work (in case
587
+ # of non-hashable arguments), so we fallback to the actual implementation if needed
588
+ try:
589
+ # error: Argument 3 to "__call__" of "_lru_cache_wrapper" has incompatible type
590
+ # "Type[Any]"; expected "Hashable" [arg-type]
591
+ dtype, fill_value = _maybe_promote_cached(
592
+ dtype, fill_value, type(fill_value) # type: ignore[arg-type]
593
+ )
594
+ except TypeError:
595
+ # if fill_value is not hashable (required for caching)
596
+ dtype, fill_value = _maybe_promote(dtype, fill_value)
597
+
598
+ if (dtype == _dtype_obj and orig is not None) or (
599
+ orig_is_nat and np.datetime_data(orig)[0] != "ns"
600
+ ):
601
+ # GH#51592,53497 restore our potentially non-canonical fill_value
602
+ fill_value = orig
603
+ return dtype, fill_value
604
+
605
+
606
+ @functools.lru_cache
607
+ def _maybe_promote_cached(dtype, fill_value, fill_value_type):
608
+ # The cached version of _maybe_promote below
609
+ # This also use fill_value_type as (unused) argument to use this in the
610
+ # cache lookup -> to differentiate 1 and True
611
+ return _maybe_promote(dtype, fill_value)
612
+
613
+
614
+ def _maybe_promote(dtype: np.dtype, fill_value=np.nan):
615
+ # The actual implementation of the function, use `maybe_promote` above for
616
+ # a cached version.
617
+ if not is_scalar(fill_value):
618
+ # with object dtype there is nothing to promote, and the user can
619
+ # pass pretty much any weird fill_value they like
620
+ if dtype != object:
621
+ # with object dtype there is nothing to promote, and the user can
622
+ # pass pretty much any weird fill_value they like
623
+ raise ValueError("fill_value must be a scalar")
624
+ dtype = _dtype_obj
625
+ return dtype, fill_value
626
+
627
+ if is_valid_na_for_dtype(fill_value, dtype) and dtype.kind in "iufcmM":
628
+ dtype = ensure_dtype_can_hold_na(dtype)
629
+ fv = na_value_for_dtype(dtype)
630
+ return dtype, fv
631
+
632
+ elif isinstance(dtype, CategoricalDtype):
633
+ if fill_value in dtype.categories or isna(fill_value):
634
+ return dtype, fill_value
635
+ else:
636
+ return object, ensure_object(fill_value)
637
+
638
+ elif isna(fill_value):
639
+ dtype = _dtype_obj
640
+ if fill_value is None:
641
+ # but we retain e.g. pd.NA
642
+ fill_value = np.nan
643
+ return dtype, fill_value
644
+
645
+ # returns tuple of (dtype, fill_value)
646
+ if issubclass(dtype.type, np.datetime64):
647
+ inferred, fv = infer_dtype_from_scalar(fill_value)
648
+ if inferred == dtype:
649
+ return dtype, fv
650
+
651
+ from pandas.core.arrays import DatetimeArray
652
+
653
+ dta = DatetimeArray._from_sequence([], dtype="M8[ns]")
654
+ try:
655
+ fv = dta._validate_setitem_value(fill_value)
656
+ return dta.dtype, fv
657
+ except (ValueError, TypeError):
658
+ return _dtype_obj, fill_value
659
+
660
+ elif issubclass(dtype.type, np.timedelta64):
661
+ inferred, fv = infer_dtype_from_scalar(fill_value)
662
+ if inferred == dtype:
663
+ return dtype, fv
664
+
665
+ elif inferred.kind == "m":
666
+ # different unit, e.g. passed np.timedelta64(24, "h") with dtype=m8[ns]
667
+ # see if we can losslessly cast it to our dtype
668
+ unit = np.datetime_data(dtype)[0]
669
+ try:
670
+ td = Timedelta(fill_value).as_unit(unit, round_ok=False)
671
+ except OutOfBoundsTimedelta:
672
+ return _dtype_obj, fill_value
673
+ else:
674
+ return dtype, td.asm8
675
+
676
+ return _dtype_obj, fill_value
677
+
678
+ elif is_float(fill_value):
679
+ if issubclass(dtype.type, np.bool_):
680
+ dtype = np.dtype(np.object_)
681
+
682
+ elif issubclass(dtype.type, np.integer):
683
+ dtype = np.dtype(np.float64)
684
+
685
+ elif dtype.kind == "f":
686
+ mst = np.min_scalar_type(fill_value)
687
+ if mst > dtype:
688
+ # e.g. mst is np.float64 and dtype is np.float32
689
+ dtype = mst
690
+
691
+ elif dtype.kind == "c":
692
+ mst = np.min_scalar_type(fill_value)
693
+ dtype = np.promote_types(dtype, mst)
694
+
695
+ elif is_bool(fill_value):
696
+ if not issubclass(dtype.type, np.bool_):
697
+ dtype = np.dtype(np.object_)
698
+
699
+ elif is_integer(fill_value):
700
+ if issubclass(dtype.type, np.bool_):
701
+ dtype = np.dtype(np.object_)
702
+
703
+ elif issubclass(dtype.type, np.integer):
704
+ if not np_can_cast_scalar(fill_value, dtype): # type: ignore[arg-type]
705
+ # upcast to prevent overflow
706
+ mst = np.min_scalar_type(fill_value)
707
+ dtype = np.promote_types(dtype, mst)
708
+ if dtype.kind == "f":
709
+ # Case where we disagree with numpy
710
+ dtype = np.dtype(np.object_)
711
+
712
+ elif is_complex(fill_value):
713
+ if issubclass(dtype.type, np.bool_):
714
+ dtype = np.dtype(np.object_)
715
+
716
+ elif issubclass(dtype.type, (np.integer, np.floating)):
717
+ mst = np.min_scalar_type(fill_value)
718
+ dtype = np.promote_types(dtype, mst)
719
+
720
+ elif dtype.kind == "c":
721
+ mst = np.min_scalar_type(fill_value)
722
+ if mst > dtype:
723
+ # e.g. mst is np.complex128 and dtype is np.complex64
724
+ dtype = mst
725
+
726
+ else:
727
+ dtype = np.dtype(np.object_)
728
+
729
+ # in case we have a string that looked like a number
730
+ if issubclass(dtype.type, (bytes, str)):
731
+ dtype = np.dtype(np.object_)
732
+
733
+ fill_value = _ensure_dtype_type(fill_value, dtype)
734
+ return dtype, fill_value
735
+
736
+
737
+ def _ensure_dtype_type(value, dtype: np.dtype):
738
+ """
739
+ Ensure that the given value is an instance of the given dtype.
740
+
741
+ e.g. if out dtype is np.complex64_, we should have an instance of that
742
+ as opposed to a python complex object.
743
+
744
+ Parameters
745
+ ----------
746
+ value : object
747
+ dtype : np.dtype
748
+
749
+ Returns
750
+ -------
751
+ object
752
+ """
753
+ # Start with exceptions in which we do _not_ cast to numpy types
754
+
755
+ if dtype == _dtype_obj:
756
+ return value
757
+
758
+ # Note: before we get here we have already excluded isna(value)
759
+ return dtype.type(value)
760
+
761
+
762
+ def infer_dtype_from(val) -> tuple[DtypeObj, Any]:
763
+ """
764
+ Interpret the dtype from a scalar or array.
765
+
766
+ Parameters
767
+ ----------
768
+ val : object
769
+ """
770
+ if not is_list_like(val):
771
+ return infer_dtype_from_scalar(val)
772
+ return infer_dtype_from_array(val)
773
+
774
+
775
+ def infer_dtype_from_scalar(val) -> tuple[DtypeObj, Any]:
776
+ """
777
+ Interpret the dtype from a scalar.
778
+
779
+ Parameters
780
+ ----------
781
+ val : object
782
+ """
783
+ dtype: DtypeObj = _dtype_obj
784
+
785
+ # a 1-element ndarray
786
+ if isinstance(val, np.ndarray):
787
+ if val.ndim != 0:
788
+ msg = "invalid ndarray passed to infer_dtype_from_scalar"
789
+ raise ValueError(msg)
790
+
791
+ dtype = val.dtype
792
+ val = lib.item_from_zerodim(val)
793
+
794
+ elif isinstance(val, str):
795
+ # If we create an empty array using a string to infer
796
+ # the dtype, NumPy will only allocate one character per entry
797
+ # so this is kind of bad. Alternately we could use np.repeat
798
+ # instead of np.empty (but then you still don't want things
799
+ # coming out as np.str_!
800
+
801
+ dtype = _dtype_obj
802
+ if using_pyarrow_string_dtype():
803
+ from pandas.core.arrays.string_ import StringDtype
804
+
805
+ dtype = StringDtype(storage="pyarrow_numpy")
806
+
807
+ elif isinstance(val, (np.datetime64, dt.datetime)):
808
+ try:
809
+ val = Timestamp(val)
810
+ except OutOfBoundsDatetime:
811
+ return _dtype_obj, val
812
+
813
+ if val is NaT or val.tz is None:
814
+ val = val.to_datetime64()
815
+ dtype = val.dtype
816
+ # TODO: test with datetime(2920, 10, 1) based on test_replace_dtypes
817
+ else:
818
+ dtype = DatetimeTZDtype(unit=val.unit, tz=val.tz)
819
+
820
+ elif isinstance(val, (np.timedelta64, dt.timedelta)):
821
+ try:
822
+ val = Timedelta(val)
823
+ except (OutOfBoundsTimedelta, OverflowError):
824
+ dtype = _dtype_obj
825
+ else:
826
+ if val is NaT:
827
+ val = np.timedelta64("NaT", "ns")
828
+ else:
829
+ val = val.asm8
830
+ dtype = val.dtype
831
+
832
+ elif is_bool(val):
833
+ dtype = np.dtype(np.bool_)
834
+
835
+ elif is_integer(val):
836
+ if isinstance(val, np.integer):
837
+ dtype = np.dtype(type(val))
838
+ else:
839
+ dtype = np.dtype(np.int64)
840
+
841
+ try:
842
+ np.array(val, dtype=dtype)
843
+ except OverflowError:
844
+ dtype = np.array(val).dtype
845
+
846
+ elif is_float(val):
847
+ if isinstance(val, np.floating):
848
+ dtype = np.dtype(type(val))
849
+ else:
850
+ dtype = np.dtype(np.float64)
851
+
852
+ elif is_complex(val):
853
+ dtype = np.dtype(np.complex128)
854
+
855
+ if isinstance(val, Period):
856
+ dtype = PeriodDtype(freq=val.freq)
857
+ elif isinstance(val, Interval):
858
+ subtype = infer_dtype_from_scalar(val.left)[0]
859
+ dtype = IntervalDtype(subtype=subtype, closed=val.closed)
860
+
861
+ return dtype, val
862
+
863
+
864
+ def dict_compat(d: dict[Scalar, Scalar]) -> dict[Scalar, Scalar]:
865
+ """
866
+ Convert datetimelike-keyed dicts to a Timestamp-keyed dict.
867
+
868
+ Parameters
869
+ ----------
870
+ d: dict-like object
871
+
872
+ Returns
873
+ -------
874
+ dict
875
+ """
876
+ return {maybe_box_datetimelike(key): value for key, value in d.items()}
877
+
878
+
879
+ def infer_dtype_from_array(arr) -> tuple[DtypeObj, ArrayLike]:
880
+ """
881
+ Infer the dtype from an array.
882
+
883
+ Parameters
884
+ ----------
885
+ arr : array
886
+
887
+ Returns
888
+ -------
889
+ tuple (pandas-compat dtype, array)
890
+
891
+
892
+ Examples
893
+ --------
894
+ >>> np.asarray([1, '1'])
895
+ array(['1', '1'], dtype='<U21')
896
+
897
+ >>> infer_dtype_from_array([1, '1'])
898
+ (dtype('O'), [1, '1'])
899
+ """
900
+ if isinstance(arr, np.ndarray):
901
+ return arr.dtype, arr
902
+
903
+ if not is_list_like(arr):
904
+ raise TypeError("'arr' must be list-like")
905
+
906
+ arr_dtype = getattr(arr, "dtype", None)
907
+ if isinstance(arr_dtype, ExtensionDtype):
908
+ return arr.dtype, arr
909
+
910
+ elif isinstance(arr, ABCSeries):
911
+ return arr.dtype, np.asarray(arr)
912
+
913
+ # don't force numpy coerce with nan's
914
+ inferred = lib.infer_dtype(arr, skipna=False)
915
+ if inferred in ["string", "bytes", "mixed", "mixed-integer"]:
916
+ return (np.dtype(np.object_), arr)
917
+
918
+ arr = np.asarray(arr)
919
+ return arr.dtype, arr
920
+
921
+
922
+ def _maybe_infer_dtype_type(element):
923
+ """
924
+ Try to infer an object's dtype, for use in arithmetic ops.
925
+
926
+ Uses `element.dtype` if that's available.
927
+ Objects implementing the iterator protocol are cast to a NumPy array,
928
+ and from there the array's type is used.
929
+
930
+ Parameters
931
+ ----------
932
+ element : object
933
+ Possibly has a `.dtype` attribute, and possibly the iterator
934
+ protocol.
935
+
936
+ Returns
937
+ -------
938
+ tipo : type
939
+
940
+ Examples
941
+ --------
942
+ >>> from collections import namedtuple
943
+ >>> Foo = namedtuple("Foo", "dtype")
944
+ >>> _maybe_infer_dtype_type(Foo(np.dtype("i8")))
945
+ dtype('int64')
946
+ """
947
+ tipo = None
948
+ if hasattr(element, "dtype"):
949
+ tipo = element.dtype
950
+ elif is_list_like(element):
951
+ element = np.asarray(element)
952
+ tipo = element.dtype
953
+ return tipo
954
+
955
+
956
+ def invalidate_string_dtypes(dtype_set: set[DtypeObj]) -> None:
957
+ """
958
+ Change string like dtypes to object for
959
+ ``DataFrame.select_dtypes()``.
960
+ """
961
+ # error: Argument 1 to <set> has incompatible type "Type[generic]"; expected
962
+ # "Union[dtype[Any], ExtensionDtype, None]"
963
+ # error: Argument 2 to <set> has incompatible type "Type[generic]"; expected
964
+ # "Union[dtype[Any], ExtensionDtype, None]"
965
+ non_string_dtypes = dtype_set - {
966
+ np.dtype("S").type, # type: ignore[arg-type]
967
+ np.dtype("<U").type, # type: ignore[arg-type]
968
+ }
969
+ if non_string_dtypes != dtype_set:
970
+ raise TypeError("string dtypes are not allowed, use 'object' instead")
971
+
972
+
973
+ def coerce_indexer_dtype(indexer, categories) -> np.ndarray:
974
+ """coerce the indexer input array to the smallest dtype possible"""
975
+ length = len(categories)
976
+ if length < _int8_max:
977
+ return ensure_int8(indexer)
978
+ elif length < _int16_max:
979
+ return ensure_int16(indexer)
980
+ elif length < _int32_max:
981
+ return ensure_int32(indexer)
982
+ return ensure_int64(indexer)
983
+
984
+
985
+ def convert_dtypes(
986
+ input_array: ArrayLike,
987
+ convert_string: bool = True,
988
+ convert_integer: bool = True,
989
+ convert_boolean: bool = True,
990
+ convert_floating: bool = True,
991
+ infer_objects: bool = False,
992
+ dtype_backend: Literal["numpy_nullable", "pyarrow"] = "numpy_nullable",
993
+ ) -> DtypeObj:
994
+ """
995
+ Convert objects to best possible type, and optionally,
996
+ to types supporting ``pd.NA``.
997
+
998
+ Parameters
999
+ ----------
1000
+ input_array : ExtensionArray or np.ndarray
1001
+ convert_string : bool, default True
1002
+ Whether object dtypes should be converted to ``StringDtype()``.
1003
+ convert_integer : bool, default True
1004
+ Whether, if possible, conversion can be done to integer extension types.
1005
+ convert_boolean : bool, defaults True
1006
+ Whether object dtypes should be converted to ``BooleanDtypes()``.
1007
+ convert_floating : bool, defaults True
1008
+ Whether, if possible, conversion can be done to floating extension types.
1009
+ If `convert_integer` is also True, preference will be give to integer
1010
+ dtypes if the floats can be faithfully casted to integers.
1011
+ infer_objects : bool, defaults False
1012
+ Whether to also infer objects to float/int if possible. Is only hit if the
1013
+ object array contains pd.NA.
1014
+ dtype_backend : {'numpy_nullable', 'pyarrow'}, default 'numpy_nullable'
1015
+ Back-end data type applied to the resultant :class:`DataFrame`
1016
+ (still experimental). Behaviour is as follows:
1017
+
1018
+ * ``"numpy_nullable"``: returns nullable-dtype-backed :class:`DataFrame`
1019
+ (default).
1020
+ * ``"pyarrow"``: returns pyarrow-backed nullable :class:`ArrowDtype`
1021
+ DataFrame.
1022
+
1023
+ .. versionadded:: 2.0
1024
+
1025
+ Returns
1026
+ -------
1027
+ np.dtype, or ExtensionDtype
1028
+ """
1029
+ inferred_dtype: str | DtypeObj
1030
+
1031
+ if (
1032
+ convert_string or convert_integer or convert_boolean or convert_floating
1033
+ ) and isinstance(input_array, np.ndarray):
1034
+ if input_array.dtype == object:
1035
+ inferred_dtype = lib.infer_dtype(input_array)
1036
+ else:
1037
+ inferred_dtype = input_array.dtype
1038
+
1039
+ if is_string_dtype(inferred_dtype):
1040
+ if not convert_string or inferred_dtype == "bytes":
1041
+ inferred_dtype = input_array.dtype
1042
+ else:
1043
+ inferred_dtype = pandas_dtype_func("string")
1044
+
1045
+ if convert_integer:
1046
+ target_int_dtype = pandas_dtype_func("Int64")
1047
+
1048
+ if input_array.dtype.kind in "iu":
1049
+ from pandas.core.arrays.integer import NUMPY_INT_TO_DTYPE
1050
+
1051
+ inferred_dtype = NUMPY_INT_TO_DTYPE.get(
1052
+ input_array.dtype, target_int_dtype
1053
+ )
1054
+ elif input_array.dtype.kind in "fcb":
1055
+ # TODO: de-dup with maybe_cast_to_integer_array?
1056
+ arr = input_array[notna(input_array)]
1057
+ if (arr.astype(int) == arr).all():
1058
+ inferred_dtype = target_int_dtype
1059
+ else:
1060
+ inferred_dtype = input_array.dtype
1061
+ elif (
1062
+ infer_objects
1063
+ and input_array.dtype == object
1064
+ and (isinstance(inferred_dtype, str) and inferred_dtype == "integer")
1065
+ ):
1066
+ inferred_dtype = target_int_dtype
1067
+
1068
+ if convert_floating:
1069
+ if input_array.dtype.kind in "fcb":
1070
+ # i.e. numeric but not integer
1071
+ from pandas.core.arrays.floating import NUMPY_FLOAT_TO_DTYPE
1072
+
1073
+ inferred_float_dtype: DtypeObj = NUMPY_FLOAT_TO_DTYPE.get(
1074
+ input_array.dtype, pandas_dtype_func("Float64")
1075
+ )
1076
+ # if we could also convert to integer, check if all floats
1077
+ # are actually integers
1078
+ if convert_integer:
1079
+ # TODO: de-dup with maybe_cast_to_integer_array?
1080
+ arr = input_array[notna(input_array)]
1081
+ if (arr.astype(int) == arr).all():
1082
+ inferred_dtype = pandas_dtype_func("Int64")
1083
+ else:
1084
+ inferred_dtype = inferred_float_dtype
1085
+ else:
1086
+ inferred_dtype = inferred_float_dtype
1087
+ elif (
1088
+ infer_objects
1089
+ and input_array.dtype == object
1090
+ and (
1091
+ isinstance(inferred_dtype, str)
1092
+ and inferred_dtype == "mixed-integer-float"
1093
+ )
1094
+ ):
1095
+ inferred_dtype = pandas_dtype_func("Float64")
1096
+
1097
+ if convert_boolean:
1098
+ if input_array.dtype.kind == "b":
1099
+ inferred_dtype = pandas_dtype_func("boolean")
1100
+ elif isinstance(inferred_dtype, str) and inferred_dtype == "boolean":
1101
+ inferred_dtype = pandas_dtype_func("boolean")
1102
+
1103
+ if isinstance(inferred_dtype, str):
1104
+ # If we couldn't do anything else, then we retain the dtype
1105
+ inferred_dtype = input_array.dtype
1106
+
1107
+ else:
1108
+ inferred_dtype = input_array.dtype
1109
+
1110
+ if dtype_backend == "pyarrow":
1111
+ from pandas.core.arrays.arrow.array import to_pyarrow_type
1112
+ from pandas.core.arrays.string_ import StringDtype
1113
+
1114
+ assert not isinstance(inferred_dtype, str)
1115
+
1116
+ if (
1117
+ (convert_integer and inferred_dtype.kind in "iu")
1118
+ or (convert_floating and inferred_dtype.kind in "fc")
1119
+ or (convert_boolean and inferred_dtype.kind == "b")
1120
+ or (convert_string and isinstance(inferred_dtype, StringDtype))
1121
+ or (
1122
+ inferred_dtype.kind not in "iufcb"
1123
+ and not isinstance(inferred_dtype, StringDtype)
1124
+ )
1125
+ ):
1126
+ if isinstance(inferred_dtype, PandasExtensionDtype) and not isinstance(
1127
+ inferred_dtype, DatetimeTZDtype
1128
+ ):
1129
+ base_dtype = inferred_dtype.base
1130
+ elif isinstance(inferred_dtype, (BaseMaskedDtype, ArrowDtype)):
1131
+ base_dtype = inferred_dtype.numpy_dtype
1132
+ elif isinstance(inferred_dtype, StringDtype):
1133
+ base_dtype = np.dtype(str)
1134
+ else:
1135
+ base_dtype = inferred_dtype
1136
+ if (
1137
+ base_dtype.kind == "O" # type: ignore[union-attr]
1138
+ and input_array.size > 0
1139
+ and isna(input_array).all()
1140
+ ):
1141
+ import pyarrow as pa
1142
+
1143
+ pa_type = pa.null()
1144
+ else:
1145
+ pa_type = to_pyarrow_type(base_dtype)
1146
+ if pa_type is not None:
1147
+ inferred_dtype = ArrowDtype(pa_type)
1148
+ elif dtype_backend == "numpy_nullable" and isinstance(inferred_dtype, ArrowDtype):
1149
+ # GH 53648
1150
+ inferred_dtype = _arrow_dtype_mapping()[inferred_dtype.pyarrow_dtype]
1151
+
1152
+ # error: Incompatible return value type (got "Union[str, Union[dtype[Any],
1153
+ # ExtensionDtype]]", expected "Union[dtype[Any], ExtensionDtype]")
1154
+ return inferred_dtype # type: ignore[return-value]
1155
+
1156
+
1157
+ def maybe_infer_to_datetimelike(
1158
+ value: npt.NDArray[np.object_],
1159
+ ) -> np.ndarray | DatetimeArray | TimedeltaArray | PeriodArray | IntervalArray:
1160
+ """
1161
+ we might have a array (or single object) that is datetime like,
1162
+ and no dtype is passed don't change the value unless we find a
1163
+ datetime/timedelta set
1164
+
1165
+ this is pretty strict in that a datetime/timedelta is REQUIRED
1166
+ in addition to possible nulls/string likes
1167
+
1168
+ Parameters
1169
+ ----------
1170
+ value : np.ndarray[object]
1171
+
1172
+ Returns
1173
+ -------
1174
+ np.ndarray, DatetimeArray, TimedeltaArray, PeriodArray, or IntervalArray
1175
+
1176
+ """
1177
+ if not isinstance(value, np.ndarray) or value.dtype != object:
1178
+ # Caller is responsible for passing only ndarray[object]
1179
+ raise TypeError(type(value)) # pragma: no cover
1180
+ if value.ndim != 1:
1181
+ # Caller is responsible
1182
+ raise ValueError(value.ndim) # pragma: no cover
1183
+
1184
+ if not len(value):
1185
+ return value
1186
+
1187
+ # error: Incompatible return value type (got "Union[ExtensionArray,
1188
+ # ndarray[Any, Any]]", expected "Union[ndarray[Any, Any], DatetimeArray,
1189
+ # TimedeltaArray, PeriodArray, IntervalArray]")
1190
+ return lib.maybe_convert_objects( # type: ignore[return-value]
1191
+ value,
1192
+ # Here we do not convert numeric dtypes, as if we wanted that,
1193
+ # numpy would have done it for us.
1194
+ convert_numeric=False,
1195
+ convert_non_numeric=True,
1196
+ dtype_if_all_nat=np.dtype("M8[ns]"),
1197
+ )
1198
+
1199
+
1200
+ def maybe_cast_to_datetime(
1201
+ value: np.ndarray | list, dtype: np.dtype
1202
+ ) -> ExtensionArray | np.ndarray:
1203
+ """
1204
+ try to cast the array/value to a datetimelike dtype, converting float
1205
+ nan to iNaT
1206
+
1207
+ Caller is responsible for handling ExtensionDtype cases and non dt64/td64
1208
+ cases.
1209
+ """
1210
+ from pandas.core.arrays.datetimes import DatetimeArray
1211
+ from pandas.core.arrays.timedeltas import TimedeltaArray
1212
+
1213
+ assert dtype.kind in "mM"
1214
+ if not is_list_like(value):
1215
+ raise TypeError("value must be listlike")
1216
+
1217
+ # TODO: _from_sequence would raise ValueError in cases where
1218
+ # _ensure_nanosecond_dtype raises TypeError
1219
+ _ensure_nanosecond_dtype(dtype)
1220
+
1221
+ if lib.is_np_dtype(dtype, "m"):
1222
+ res = TimedeltaArray._from_sequence(value, dtype=dtype)
1223
+ return res
1224
+ else:
1225
+ try:
1226
+ dta = DatetimeArray._from_sequence(value, dtype=dtype)
1227
+ except ValueError as err:
1228
+ # We can give a Series-specific exception message.
1229
+ if "cannot supply both a tz and a timezone-naive dtype" in str(err):
1230
+ raise ValueError(
1231
+ "Cannot convert timezone-aware data to "
1232
+ "timezone-naive dtype. Use "
1233
+ "pd.Series(values).dt.tz_localize(None) instead."
1234
+ ) from err
1235
+ raise
1236
+
1237
+ return dta
1238
+
1239
+
1240
+ def _ensure_nanosecond_dtype(dtype: DtypeObj) -> None:
1241
+ """
1242
+ Convert dtypes with granularity less than nanosecond to nanosecond
1243
+
1244
+ >>> _ensure_nanosecond_dtype(np.dtype("M8[us]"))
1245
+
1246
+ >>> _ensure_nanosecond_dtype(np.dtype("M8[D]"))
1247
+ Traceback (most recent call last):
1248
+ ...
1249
+ TypeError: dtype=datetime64[D] is not supported. Supported resolutions are 's', 'ms', 'us', and 'ns'
1250
+
1251
+ >>> _ensure_nanosecond_dtype(np.dtype("m8[ps]"))
1252
+ Traceback (most recent call last):
1253
+ ...
1254
+ TypeError: dtype=timedelta64[ps] is not supported. Supported resolutions are 's', 'ms', 'us', and 'ns'
1255
+ """ # noqa: E501
1256
+ msg = (
1257
+ f"The '{dtype.name}' dtype has no unit. "
1258
+ f"Please pass in '{dtype.name}[ns]' instead."
1259
+ )
1260
+
1261
+ # unpack e.g. SparseDtype
1262
+ dtype = getattr(dtype, "subtype", dtype)
1263
+
1264
+ if not isinstance(dtype, np.dtype):
1265
+ # i.e. datetime64tz
1266
+ pass
1267
+
1268
+ elif dtype.kind in "mM":
1269
+ if not is_supported_dtype(dtype):
1270
+ # pre-2.0 we would silently swap in nanos for lower-resolutions,
1271
+ # raise for above-nano resolutions
1272
+ if dtype.name in ["datetime64", "timedelta64"]:
1273
+ raise ValueError(msg)
1274
+ # TODO: ValueError or TypeError? existing test
1275
+ # test_constructor_generic_timestamp_bad_frequency expects TypeError
1276
+ raise TypeError(
1277
+ f"dtype={dtype} is not supported. Supported resolutions are 's', "
1278
+ "'ms', 'us', and 'ns'"
1279
+ )
1280
+
1281
+
1282
+ # TODO: other value-dependent functions to standardize here include
1283
+ # Index._find_common_type_compat
1284
+ def find_result_type(left_dtype: DtypeObj, right: Any) -> DtypeObj:
1285
+ """
1286
+ Find the type/dtype for the result of an operation between objects.
1287
+
1288
+ This is similar to find_common_type, but looks at the right object instead
1289
+ of just its dtype. This can be useful in particular when the right
1290
+ object does not have a `dtype`.
1291
+
1292
+ Parameters
1293
+ ----------
1294
+ left_dtype : np.dtype or ExtensionDtype
1295
+ right : Any
1296
+
1297
+ Returns
1298
+ -------
1299
+ np.dtype or ExtensionDtype
1300
+
1301
+ See also
1302
+ --------
1303
+ find_common_type
1304
+ numpy.result_type
1305
+ """
1306
+ new_dtype: DtypeObj
1307
+
1308
+ if (
1309
+ isinstance(left_dtype, np.dtype)
1310
+ and left_dtype.kind in "iuc"
1311
+ and (lib.is_integer(right) or lib.is_float(right))
1312
+ ):
1313
+ # e.g. with int8 dtype and right=512, we want to end up with
1314
+ # np.int16, whereas infer_dtype_from(512) gives np.int64,
1315
+ # which will make us upcast too far.
1316
+ if lib.is_float(right) and right.is_integer() and left_dtype.kind != "f":
1317
+ right = int(right)
1318
+ # After NEP 50, numpy won't inspect Python scalars
1319
+ # TODO: do we need to recreate numpy's inspection logic for floats too
1320
+ # (this breaks some tests)
1321
+ if isinstance(right, int) and not isinstance(right, np.integer):
1322
+ # This gives an unsigned type by default
1323
+ # (if our number is positive)
1324
+
1325
+ # If our left dtype is signed, we might not want this since
1326
+ # this might give us 1 dtype too big
1327
+ # We should check if the corresponding int dtype (e.g. int64 for uint64)
1328
+ # can hold the number
1329
+ right_dtype = np.min_scalar_type(right)
1330
+ if right == 0:
1331
+ # Special case 0
1332
+ right = left_dtype
1333
+ elif (
1334
+ not np.issubdtype(left_dtype, np.unsignedinteger)
1335
+ and 0 < right <= np.iinfo(right_dtype).max
1336
+ ):
1337
+ # If left dtype isn't unsigned, check if it fits in the signed dtype
1338
+ right = np.dtype(f"i{right_dtype.itemsize}")
1339
+ else:
1340
+ right = right_dtype
1341
+
1342
+ new_dtype = np.result_type(left_dtype, right)
1343
+
1344
+ elif is_valid_na_for_dtype(right, left_dtype):
1345
+ # e.g. IntervalDtype[int] and None/np.nan
1346
+ new_dtype = ensure_dtype_can_hold_na(left_dtype)
1347
+
1348
+ else:
1349
+ dtype, _ = infer_dtype_from(right)
1350
+ new_dtype = find_common_type([left_dtype, dtype])
1351
+
1352
+ return new_dtype
1353
+
1354
+
1355
+ def common_dtype_categorical_compat(
1356
+ objs: Sequence[Index | ArrayLike], dtype: DtypeObj
1357
+ ) -> DtypeObj:
1358
+ """
1359
+ Update the result of find_common_type to account for NAs in a Categorical.
1360
+
1361
+ Parameters
1362
+ ----------
1363
+ objs : list[np.ndarray | ExtensionArray | Index]
1364
+ dtype : np.dtype or ExtensionDtype
1365
+
1366
+ Returns
1367
+ -------
1368
+ np.dtype or ExtensionDtype
1369
+ """
1370
+ # GH#38240
1371
+
1372
+ # TODO: more generally, could do `not can_hold_na(dtype)`
1373
+ if lib.is_np_dtype(dtype, "iu"):
1374
+ for obj in objs:
1375
+ # We don't want to accientally allow e.g. "categorical" str here
1376
+ obj_dtype = getattr(obj, "dtype", None)
1377
+ if isinstance(obj_dtype, CategoricalDtype):
1378
+ if isinstance(obj, ABCIndex):
1379
+ # This check may already be cached
1380
+ hasnas = obj.hasnans
1381
+ else:
1382
+ # Categorical
1383
+ hasnas = cast("Categorical", obj)._hasna
1384
+
1385
+ if hasnas:
1386
+ # see test_union_int_categorical_with_nan
1387
+ dtype = np.dtype(np.float64)
1388
+ break
1389
+ return dtype
1390
+
1391
+
1392
+ def np_find_common_type(*dtypes: np.dtype) -> np.dtype:
1393
+ """
1394
+ np.find_common_type implementation pre-1.25 deprecation using np.result_type
1395
+ https://github.com/pandas-dev/pandas/pull/49569#issuecomment-1308300065
1396
+
1397
+ Parameters
1398
+ ----------
1399
+ dtypes : np.dtypes
1400
+
1401
+ Returns
1402
+ -------
1403
+ np.dtype
1404
+ """
1405
+ try:
1406
+ common_dtype = np.result_type(*dtypes)
1407
+ if common_dtype.kind in "mMSU":
1408
+ # NumPy promotion currently (1.25) misbehaves for for times and strings,
1409
+ # so fall back to object (find_common_dtype did unless there
1410
+ # was only one dtype)
1411
+ common_dtype = np.dtype("O")
1412
+
1413
+ except TypeError:
1414
+ common_dtype = np.dtype("O")
1415
+ return common_dtype
1416
+
1417
+
1418
+ @overload
1419
+ def find_common_type(types: list[np.dtype]) -> np.dtype:
1420
+ ...
1421
+
1422
+
1423
+ @overload
1424
+ def find_common_type(types: list[ExtensionDtype]) -> DtypeObj:
1425
+ ...
1426
+
1427
+
1428
+ @overload
1429
+ def find_common_type(types: list[DtypeObj]) -> DtypeObj:
1430
+ ...
1431
+
1432
+
1433
+ def find_common_type(types):
1434
+ """
1435
+ Find a common data type among the given dtypes.
1436
+
1437
+ Parameters
1438
+ ----------
1439
+ types : list of dtypes
1440
+
1441
+ Returns
1442
+ -------
1443
+ pandas extension or numpy dtype
1444
+
1445
+ See Also
1446
+ --------
1447
+ numpy.find_common_type
1448
+
1449
+ """
1450
+ if not types:
1451
+ raise ValueError("no types given")
1452
+
1453
+ first = types[0]
1454
+
1455
+ # workaround for find_common_type([np.dtype('datetime64[ns]')] * 2)
1456
+ # => object
1457
+ if lib.dtypes_all_equal(list(types)):
1458
+ return first
1459
+
1460
+ # get unique types (dict.fromkeys is used as order-preserving set())
1461
+ types = list(dict.fromkeys(types).keys())
1462
+
1463
+ if any(isinstance(t, ExtensionDtype) for t in types):
1464
+ for t in types:
1465
+ if isinstance(t, ExtensionDtype):
1466
+ res = t._get_common_dtype(types)
1467
+ if res is not None:
1468
+ return res
1469
+ return np.dtype("object")
1470
+
1471
+ # take lowest unit
1472
+ if all(lib.is_np_dtype(t, "M") for t in types):
1473
+ return np.dtype(max(types))
1474
+ if all(lib.is_np_dtype(t, "m") for t in types):
1475
+ return np.dtype(max(types))
1476
+
1477
+ # don't mix bool / int or float or complex
1478
+ # this is different from numpy, which casts bool with float/int as int
1479
+ has_bools = any(t.kind == "b" for t in types)
1480
+ if has_bools:
1481
+ for t in types:
1482
+ if t.kind in "iufc":
1483
+ return np.dtype("object")
1484
+
1485
+ return np_find_common_type(*types)
1486
+
1487
+
1488
+ def construct_2d_arraylike_from_scalar(
1489
+ value: Scalar, length: int, width: int, dtype: np.dtype, copy: bool
1490
+ ) -> np.ndarray:
1491
+ shape = (length, width)
1492
+
1493
+ if dtype.kind in "mM":
1494
+ value = _maybe_box_and_unbox_datetimelike(value, dtype)
1495
+ elif dtype == _dtype_obj:
1496
+ if isinstance(value, (np.timedelta64, np.datetime64)):
1497
+ # calling np.array below would cast to pytimedelta/pydatetime
1498
+ out = np.empty(shape, dtype=object)
1499
+ out.fill(value)
1500
+ return out
1501
+
1502
+ # Attempt to coerce to a numpy array
1503
+ try:
1504
+ if not copy:
1505
+ arr = np.asarray(value, dtype=dtype)
1506
+ else:
1507
+ arr = np.array(value, dtype=dtype, copy=copy)
1508
+ except (ValueError, TypeError) as err:
1509
+ raise TypeError(
1510
+ f"DataFrame constructor called with incompatible data and dtype: {err}"
1511
+ ) from err
1512
+
1513
+ if arr.ndim != 0:
1514
+ raise ValueError("DataFrame constructor not properly called!")
1515
+
1516
+ return np.full(shape, arr)
1517
+
1518
+
1519
+ def construct_1d_arraylike_from_scalar(
1520
+ value: Scalar, length: int, dtype: DtypeObj | None
1521
+ ) -> ArrayLike:
1522
+ """
1523
+ create a np.ndarray / pandas type of specified shape and dtype
1524
+ filled with values
1525
+
1526
+ Parameters
1527
+ ----------
1528
+ value : scalar value
1529
+ length : int
1530
+ dtype : pandas_dtype or np.dtype
1531
+
1532
+ Returns
1533
+ -------
1534
+ np.ndarray / pandas type of length, filled with value
1535
+
1536
+ """
1537
+
1538
+ if dtype is None:
1539
+ try:
1540
+ dtype, value = infer_dtype_from_scalar(value)
1541
+ except OutOfBoundsDatetime:
1542
+ dtype = _dtype_obj
1543
+
1544
+ if isinstance(dtype, ExtensionDtype):
1545
+ cls = dtype.construct_array_type()
1546
+ seq = [] if length == 0 else [value]
1547
+ subarr = cls._from_sequence(seq, dtype=dtype).repeat(length)
1548
+
1549
+ else:
1550
+ if length and dtype.kind in "iu" and isna(value):
1551
+ # coerce if we have nan for an integer dtype
1552
+ dtype = np.dtype("float64")
1553
+ elif lib.is_np_dtype(dtype, "US"):
1554
+ # we need to coerce to object dtype to avoid
1555
+ # to allow numpy to take our string as a scalar value
1556
+ dtype = np.dtype("object")
1557
+ if not isna(value):
1558
+ value = ensure_str(value)
1559
+ elif dtype.kind in "mM":
1560
+ value = _maybe_box_and_unbox_datetimelike(value, dtype)
1561
+
1562
+ subarr = np.empty(length, dtype=dtype)
1563
+ if length:
1564
+ # GH 47391: numpy > 1.24 will raise filling np.nan into int dtypes
1565
+ subarr.fill(value)
1566
+
1567
+ return subarr
1568
+
1569
+
1570
+ def _maybe_box_and_unbox_datetimelike(value: Scalar, dtype: DtypeObj):
1571
+ # Caller is responsible for checking dtype.kind in "mM"
1572
+
1573
+ if isinstance(value, dt.datetime):
1574
+ # we dont want to box dt64, in particular datetime64("NaT")
1575
+ value = maybe_box_datetimelike(value, dtype)
1576
+
1577
+ return _maybe_unbox_datetimelike(value, dtype)
1578
+
1579
+
1580
+ def construct_1d_object_array_from_listlike(values: Sized) -> np.ndarray:
1581
+ """
1582
+ Transform any list-like object in a 1-dimensional numpy array of object
1583
+ dtype.
1584
+
1585
+ Parameters
1586
+ ----------
1587
+ values : any iterable which has a len()
1588
+
1589
+ Raises
1590
+ ------
1591
+ TypeError
1592
+ * If `values` does not have a len()
1593
+
1594
+ Returns
1595
+ -------
1596
+ 1-dimensional numpy array of dtype object
1597
+ """
1598
+ # numpy will try to interpret nested lists as further dimensions, hence
1599
+ # making a 1D array that contains list-likes is a bit tricky:
1600
+ result = np.empty(len(values), dtype="object")
1601
+ result[:] = values
1602
+ return result
1603
+
1604
+
1605
+ def maybe_cast_to_integer_array(arr: list | np.ndarray, dtype: np.dtype) -> np.ndarray:
1606
+ """
1607
+ Takes any dtype and returns the casted version, raising for when data is
1608
+ incompatible with integer/unsigned integer dtypes.
1609
+
1610
+ Parameters
1611
+ ----------
1612
+ arr : np.ndarray or list
1613
+ The array to cast.
1614
+ dtype : np.dtype
1615
+ The integer dtype to cast the array to.
1616
+
1617
+ Returns
1618
+ -------
1619
+ ndarray
1620
+ Array of integer or unsigned integer dtype.
1621
+
1622
+ Raises
1623
+ ------
1624
+ OverflowError : the dtype is incompatible with the data
1625
+ ValueError : loss of precision has occurred during casting
1626
+
1627
+ Examples
1628
+ --------
1629
+ If you try to coerce negative values to unsigned integers, it raises:
1630
+
1631
+ >>> pd.Series([-1], dtype="uint64")
1632
+ Traceback (most recent call last):
1633
+ ...
1634
+ OverflowError: Trying to coerce negative values to unsigned integers
1635
+
1636
+ Also, if you try to coerce float values to integers, it raises:
1637
+
1638
+ >>> maybe_cast_to_integer_array([1, 2, 3.5], dtype=np.dtype("int64"))
1639
+ Traceback (most recent call last):
1640
+ ...
1641
+ ValueError: Trying to coerce float values to integers
1642
+ """
1643
+ assert dtype.kind in "iu"
1644
+
1645
+ try:
1646
+ if not isinstance(arr, np.ndarray):
1647
+ with warnings.catch_warnings():
1648
+ # We already disallow dtype=uint w/ negative numbers
1649
+ # (test_constructor_coercion_signed_to_unsigned) so safe to ignore.
1650
+ if not np_version_gt2:
1651
+ warnings.filterwarnings(
1652
+ "ignore",
1653
+ "NumPy will stop allowing conversion of "
1654
+ "out-of-bound Python int",
1655
+ DeprecationWarning,
1656
+ )
1657
+ casted = np.asarray(arr, dtype=dtype)
1658
+ else:
1659
+ with warnings.catch_warnings():
1660
+ warnings.filterwarnings("ignore", category=RuntimeWarning)
1661
+ casted = arr.astype(dtype, copy=False)
1662
+ except OverflowError as err:
1663
+ raise OverflowError(
1664
+ "The elements provided in the data cannot all be "
1665
+ f"casted to the dtype {dtype}"
1666
+ ) from err
1667
+
1668
+ if isinstance(arr, np.ndarray) and arr.dtype == dtype:
1669
+ # avoid expensive array_equal check
1670
+ return casted
1671
+
1672
+ with warnings.catch_warnings():
1673
+ warnings.filterwarnings("ignore", category=RuntimeWarning)
1674
+ warnings.filterwarnings(
1675
+ "ignore", "elementwise comparison failed", FutureWarning
1676
+ )
1677
+ if np.array_equal(arr, casted):
1678
+ return casted
1679
+
1680
+ # We do this casting to allow for proper
1681
+ # data and dtype checking.
1682
+ #
1683
+ # We didn't do this earlier because NumPy
1684
+ # doesn't handle `uint64` correctly.
1685
+ arr = np.asarray(arr)
1686
+
1687
+ if np.issubdtype(arr.dtype, str):
1688
+ # TODO(numpy-2.0 min): This case will raise an OverflowError above
1689
+ if (casted.astype(str) == arr).all():
1690
+ return casted
1691
+ raise ValueError(f"string values cannot be losslessly cast to {dtype}")
1692
+
1693
+ if dtype.kind == "u" and (arr < 0).any():
1694
+ # TODO: can this be hit anymore after numpy 2.0?
1695
+ raise OverflowError("Trying to coerce negative values to unsigned integers")
1696
+
1697
+ if arr.dtype.kind == "f":
1698
+ if not np.isfinite(arr).all():
1699
+ raise IntCastingNaNError(
1700
+ "Cannot convert non-finite values (NA or inf) to integer"
1701
+ )
1702
+ raise ValueError("Trying to coerce float values to integers")
1703
+ if arr.dtype == object:
1704
+ raise ValueError("Trying to coerce float values to integers")
1705
+
1706
+ if casted.dtype < arr.dtype:
1707
+ # TODO: Can this path be hit anymore with numpy > 2
1708
+ # GH#41734 e.g. [1, 200, 923442] and dtype="int8" -> overflows
1709
+ raise ValueError(
1710
+ f"Values are too large to be losslessly converted to {dtype}. "
1711
+ f"To cast anyway, use pd.Series(values).astype({dtype})"
1712
+ )
1713
+
1714
+ if arr.dtype.kind in "mM":
1715
+ # test_constructor_maskedarray_nonfloat
1716
+ raise TypeError(
1717
+ f"Constructing a Series or DataFrame from {arr.dtype} values and "
1718
+ f"dtype={dtype} is not supported. Use values.view({dtype}) instead."
1719
+ )
1720
+
1721
+ # No known cases that get here, but raising explicitly to cover our bases.
1722
+ raise ValueError(f"values cannot be losslessly cast to {dtype}")
1723
+
1724
+
1725
+ def can_hold_element(arr: ArrayLike, element: Any) -> bool:
1726
+ """
1727
+ Can we do an inplace setitem with this element in an array with this dtype?
1728
+
1729
+ Parameters
1730
+ ----------
1731
+ arr : np.ndarray or ExtensionArray
1732
+ element : Any
1733
+
1734
+ Returns
1735
+ -------
1736
+ bool
1737
+ """
1738
+ dtype = arr.dtype
1739
+ if not isinstance(dtype, np.dtype) or dtype.kind in "mM":
1740
+ if isinstance(dtype, (PeriodDtype, IntervalDtype, DatetimeTZDtype, np.dtype)):
1741
+ # np.dtype here catches datetime64ns and timedelta64ns; we assume
1742
+ # in this case that we have DatetimeArray/TimedeltaArray
1743
+ arr = cast(
1744
+ "PeriodArray | DatetimeArray | TimedeltaArray | IntervalArray", arr
1745
+ )
1746
+ try:
1747
+ arr._validate_setitem_value(element)
1748
+ return True
1749
+ except (ValueError, TypeError):
1750
+ return False
1751
+
1752
+ # This is technically incorrect, but maintains the behavior of
1753
+ # ExtensionBlock._can_hold_element
1754
+ return True
1755
+
1756
+ try:
1757
+ np_can_hold_element(dtype, element)
1758
+ return True
1759
+ except (TypeError, LossySetitemError):
1760
+ return False
1761
+
1762
+
1763
+ def np_can_hold_element(dtype: np.dtype, element: Any) -> Any:
1764
+ """
1765
+ Raise if we cannot losslessly set this element into an ndarray with this dtype.
1766
+
1767
+ Specifically about places where we disagree with numpy. i.e. there are
1768
+ cases where numpy will raise in doing the setitem that we do not check
1769
+ for here, e.g. setting str "X" into a numeric ndarray.
1770
+
1771
+ Returns
1772
+ -------
1773
+ Any
1774
+ The element, potentially cast to the dtype.
1775
+
1776
+ Raises
1777
+ ------
1778
+ ValueError : If we cannot losslessly store this element with this dtype.
1779
+ """
1780
+ if dtype == _dtype_obj:
1781
+ return element
1782
+
1783
+ tipo = _maybe_infer_dtype_type(element)
1784
+
1785
+ if dtype.kind in "iu":
1786
+ if isinstance(element, range):
1787
+ if _dtype_can_hold_range(element, dtype):
1788
+ return element
1789
+ raise LossySetitemError
1790
+
1791
+ if is_integer(element) or (is_float(element) and element.is_integer()):
1792
+ # e.g. test_setitem_series_int8 if we have a python int 1
1793
+ # tipo may be np.int32, despite the fact that it will fit
1794
+ # in smaller int dtypes.
1795
+ info = np.iinfo(dtype)
1796
+ if info.min <= element <= info.max:
1797
+ return dtype.type(element)
1798
+ raise LossySetitemError
1799
+
1800
+ if tipo is not None:
1801
+ if tipo.kind not in "iu":
1802
+ if isinstance(element, np.ndarray) and element.dtype.kind == "f":
1803
+ # If all can be losslessly cast to integers, then we can hold them
1804
+ with np.errstate(invalid="ignore"):
1805
+ # We check afterwards if cast was losslessly, so no need to show
1806
+ # the warning
1807
+ casted = element.astype(dtype)
1808
+ comp = casted == element
1809
+ if comp.all():
1810
+ # Return the casted values bc they can be passed to
1811
+ # np.putmask, whereas the raw values cannot.
1812
+ # see TestSetitemFloatNDarrayIntoIntegerSeries
1813
+ return casted
1814
+ raise LossySetitemError
1815
+
1816
+ elif isinstance(element, ABCExtensionArray) and isinstance(
1817
+ element.dtype, CategoricalDtype
1818
+ ):
1819
+ # GH#52927 setting Categorical value into non-EA frame
1820
+ # TODO: general-case for EAs?
1821
+ try:
1822
+ casted = element.astype(dtype)
1823
+ except (ValueError, TypeError):
1824
+ raise LossySetitemError
1825
+ # Check for cases of either
1826
+ # a) lossy overflow/rounding or
1827
+ # b) semantic changes like dt64->int64
1828
+ comp = casted == element
1829
+ if not comp.all():
1830
+ raise LossySetitemError
1831
+ return casted
1832
+
1833
+ # Anything other than integer we cannot hold
1834
+ raise LossySetitemError
1835
+ if (
1836
+ dtype.kind == "u"
1837
+ and isinstance(element, np.ndarray)
1838
+ and element.dtype.kind == "i"
1839
+ ):
1840
+ # see test_where_uint64
1841
+ casted = element.astype(dtype)
1842
+ if (casted == element).all():
1843
+ # TODO: faster to check (element >=0).all()? potential
1844
+ # itemsize issues there?
1845
+ return casted
1846
+ raise LossySetitemError
1847
+ if dtype.itemsize < tipo.itemsize:
1848
+ raise LossySetitemError
1849
+ if not isinstance(tipo, np.dtype):
1850
+ # i.e. nullable IntegerDtype; we can put this into an ndarray
1851
+ # losslessly iff it has no NAs
1852
+ arr = element._values if isinstance(element, ABCSeries) else element
1853
+ if arr._hasna:
1854
+ raise LossySetitemError
1855
+ return element
1856
+
1857
+ return element
1858
+
1859
+ raise LossySetitemError
1860
+
1861
+ if dtype.kind == "f":
1862
+ if lib.is_integer(element) or lib.is_float(element):
1863
+ casted = dtype.type(element)
1864
+ if np.isnan(casted) or casted == element:
1865
+ return casted
1866
+ # otherwise e.g. overflow see TestCoercionFloat32
1867
+ raise LossySetitemError
1868
+
1869
+ if tipo is not None:
1870
+ # TODO: itemsize check?
1871
+ if tipo.kind not in "iuf":
1872
+ # Anything other than float/integer we cannot hold
1873
+ raise LossySetitemError
1874
+ if not isinstance(tipo, np.dtype):
1875
+ # i.e. nullable IntegerDtype or FloatingDtype;
1876
+ # we can put this into an ndarray losslessly iff it has no NAs
1877
+ if element._hasna:
1878
+ raise LossySetitemError
1879
+ return element
1880
+ elif tipo.itemsize > dtype.itemsize or tipo.kind != dtype.kind:
1881
+ if isinstance(element, np.ndarray):
1882
+ # e.g. TestDataFrameIndexingWhere::test_where_alignment
1883
+ casted = element.astype(dtype)
1884
+ if np.array_equal(casted, element, equal_nan=True):
1885
+ return casted
1886
+ raise LossySetitemError
1887
+
1888
+ return element
1889
+
1890
+ raise LossySetitemError
1891
+
1892
+ if dtype.kind == "c":
1893
+ if lib.is_integer(element) or lib.is_complex(element) or lib.is_float(element):
1894
+ if np.isnan(element):
1895
+ # see test_where_complex GH#6345
1896
+ return dtype.type(element)
1897
+
1898
+ with warnings.catch_warnings():
1899
+ warnings.filterwarnings("ignore")
1900
+ casted = dtype.type(element)
1901
+ if casted == element:
1902
+ return casted
1903
+ # otherwise e.g. overflow see test_32878_complex_itemsize
1904
+ raise LossySetitemError
1905
+
1906
+ if tipo is not None:
1907
+ if tipo.kind in "iufc":
1908
+ return element
1909
+ raise LossySetitemError
1910
+ raise LossySetitemError
1911
+
1912
+ if dtype.kind == "b":
1913
+ if tipo is not None:
1914
+ if tipo.kind == "b":
1915
+ if not isinstance(tipo, np.dtype):
1916
+ # i.e. we have a BooleanArray
1917
+ if element._hasna:
1918
+ # i.e. there are pd.NA elements
1919
+ raise LossySetitemError
1920
+ return element
1921
+ raise LossySetitemError
1922
+ if lib.is_bool(element):
1923
+ return element
1924
+ raise LossySetitemError
1925
+
1926
+ if dtype.kind == "S":
1927
+ # TODO: test tests.frame.methods.test_replace tests get here,
1928
+ # need more targeted tests. xref phofl has a PR about this
1929
+ if tipo is not None:
1930
+ if tipo.kind == "S" and tipo.itemsize <= dtype.itemsize:
1931
+ return element
1932
+ raise LossySetitemError
1933
+ if isinstance(element, bytes) and len(element) <= dtype.itemsize:
1934
+ return element
1935
+ raise LossySetitemError
1936
+
1937
+ if dtype.kind == "V":
1938
+ # i.e. np.void, which cannot hold _anything_
1939
+ raise LossySetitemError
1940
+
1941
+ raise NotImplementedError(dtype)
1942
+
1943
+
1944
+ def _dtype_can_hold_range(rng: range, dtype: np.dtype) -> bool:
1945
+ """
1946
+ _maybe_infer_dtype_type infers to int64 (and float64 for very large endpoints),
1947
+ but in many cases a range can be held by a smaller integer dtype.
1948
+ Check if this is one of those cases.
1949
+ """
1950
+ if not len(rng):
1951
+ return True
1952
+ return np_can_cast_scalar(rng.start, dtype) and np_can_cast_scalar(rng.stop, dtype)
1953
+
1954
+
1955
+ def np_can_cast_scalar(element: Scalar, dtype: np.dtype) -> bool:
1956
+ """
1957
+ np.can_cast pandas-equivalent for pre 2-0 behavior that allowed scalar
1958
+ inference
1959
+
1960
+ Parameters
1961
+ ----------
1962
+ element : Scalar
1963
+ dtype : np.dtype
1964
+
1965
+ Returns
1966
+ -------
1967
+ bool
1968
+ """
1969
+ try:
1970
+ np_can_hold_element(dtype, element)
1971
+ return True
1972
+ except (LossySetitemError, NotImplementedError):
1973
+ return False
env-llmeval/lib/python3.10/site-packages/pandas/core/dtypes/common.py ADDED
@@ -0,0 +1,1748 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ Common type operations.
3
+ """
4
+ from __future__ import annotations
5
+
6
+ from typing import (
7
+ TYPE_CHECKING,
8
+ Any,
9
+ Callable,
10
+ )
11
+ import warnings
12
+
13
+ import numpy as np
14
+
15
+ from pandas._libs import (
16
+ Interval,
17
+ Period,
18
+ algos,
19
+ lib,
20
+ )
21
+ from pandas._libs.tslibs import conversion
22
+ from pandas.util._exceptions import find_stack_level
23
+
24
+ from pandas.core.dtypes.base import _registry as registry
25
+ from pandas.core.dtypes.dtypes import (
26
+ CategoricalDtype,
27
+ DatetimeTZDtype,
28
+ ExtensionDtype,
29
+ IntervalDtype,
30
+ PeriodDtype,
31
+ SparseDtype,
32
+ )
33
+ from pandas.core.dtypes.generic import ABCIndex
34
+ from pandas.core.dtypes.inference import (
35
+ is_array_like,
36
+ is_bool,
37
+ is_complex,
38
+ is_dataclass,
39
+ is_decimal,
40
+ is_dict_like,
41
+ is_file_like,
42
+ is_float,
43
+ is_hashable,
44
+ is_integer,
45
+ is_interval,
46
+ is_iterator,
47
+ is_list_like,
48
+ is_named_tuple,
49
+ is_nested_list_like,
50
+ is_number,
51
+ is_re,
52
+ is_re_compilable,
53
+ is_scalar,
54
+ is_sequence,
55
+ )
56
+
57
+ if TYPE_CHECKING:
58
+ from pandas._typing import (
59
+ ArrayLike,
60
+ DtypeObj,
61
+ )
62
+
63
+ DT64NS_DTYPE = conversion.DT64NS_DTYPE
64
+ TD64NS_DTYPE = conversion.TD64NS_DTYPE
65
+ INT64_DTYPE = np.dtype(np.int64)
66
+
67
+ # oh the troubles to reduce import time
68
+ _is_scipy_sparse = None
69
+
70
+ ensure_float64 = algos.ensure_float64
71
+ ensure_int64 = algos.ensure_int64
72
+ ensure_int32 = algos.ensure_int32
73
+ ensure_int16 = algos.ensure_int16
74
+ ensure_int8 = algos.ensure_int8
75
+ ensure_platform_int = algos.ensure_platform_int
76
+ ensure_object = algos.ensure_object
77
+ ensure_uint64 = algos.ensure_uint64
78
+
79
+
80
+ def ensure_str(value: bytes | Any) -> str:
81
+ """
82
+ Ensure that bytes and non-strings get converted into ``str`` objects.
83
+ """
84
+ if isinstance(value, bytes):
85
+ value = value.decode("utf-8")
86
+ elif not isinstance(value, str):
87
+ value = str(value)
88
+ return value
89
+
90
+
91
+ def ensure_python_int(value: int | np.integer) -> int:
92
+ """
93
+ Ensure that a value is a python int.
94
+
95
+ Parameters
96
+ ----------
97
+ value: int or numpy.integer
98
+
99
+ Returns
100
+ -------
101
+ int
102
+
103
+ Raises
104
+ ------
105
+ TypeError: if the value isn't an int or can't be converted to one.
106
+ """
107
+ if not (is_integer(value) or is_float(value)):
108
+ if not is_scalar(value):
109
+ raise TypeError(
110
+ f"Value needs to be a scalar value, was type {type(value).__name__}"
111
+ )
112
+ raise TypeError(f"Wrong type {type(value)} for value {value}")
113
+ try:
114
+ new_value = int(value)
115
+ assert new_value == value
116
+ except (TypeError, ValueError, AssertionError) as err:
117
+ raise TypeError(f"Wrong type {type(value)} for value {value}") from err
118
+ return new_value
119
+
120
+
121
+ def classes(*klasses) -> Callable:
122
+ """Evaluate if the tipo is a subclass of the klasses."""
123
+ return lambda tipo: issubclass(tipo, klasses)
124
+
125
+
126
+ def _classes_and_not_datetimelike(*klasses) -> Callable:
127
+ """
128
+ Evaluate if the tipo is a subclass of the klasses
129
+ and not a datetimelike.
130
+ """
131
+ return lambda tipo: (
132
+ issubclass(tipo, klasses)
133
+ and not issubclass(tipo, (np.datetime64, np.timedelta64))
134
+ )
135
+
136
+
137
+ def is_object_dtype(arr_or_dtype) -> bool:
138
+ """
139
+ Check whether an array-like or dtype is of the object dtype.
140
+
141
+ Parameters
142
+ ----------
143
+ arr_or_dtype : array-like or dtype
144
+ The array-like or dtype to check.
145
+
146
+ Returns
147
+ -------
148
+ boolean
149
+ Whether or not the array-like or dtype is of the object dtype.
150
+
151
+ Examples
152
+ --------
153
+ >>> from pandas.api.types import is_object_dtype
154
+ >>> is_object_dtype(object)
155
+ True
156
+ >>> is_object_dtype(int)
157
+ False
158
+ >>> is_object_dtype(np.array([], dtype=object))
159
+ True
160
+ >>> is_object_dtype(np.array([], dtype=int))
161
+ False
162
+ >>> is_object_dtype([1, 2, 3])
163
+ False
164
+ """
165
+ return _is_dtype_type(arr_or_dtype, classes(np.object_))
166
+
167
+
168
+ def is_sparse(arr) -> bool:
169
+ """
170
+ Check whether an array-like is a 1-D pandas sparse array.
171
+
172
+ .. deprecated:: 2.1.0
173
+ Use isinstance(dtype, pd.SparseDtype) instead.
174
+
175
+ Check that the one-dimensional array-like is a pandas sparse array.
176
+ Returns True if it is a pandas sparse array, not another type of
177
+ sparse array.
178
+
179
+ Parameters
180
+ ----------
181
+ arr : array-like
182
+ Array-like to check.
183
+
184
+ Returns
185
+ -------
186
+ bool
187
+ Whether or not the array-like is a pandas sparse array.
188
+
189
+ Examples
190
+ --------
191
+ Returns `True` if the parameter is a 1-D pandas sparse array.
192
+
193
+ >>> from pandas.api.types import is_sparse
194
+ >>> is_sparse(pd.arrays.SparseArray([0, 0, 1, 0]))
195
+ True
196
+ >>> is_sparse(pd.Series(pd.arrays.SparseArray([0, 0, 1, 0])))
197
+ True
198
+
199
+ Returns `False` if the parameter is not sparse.
200
+
201
+ >>> is_sparse(np.array([0, 0, 1, 0]))
202
+ False
203
+ >>> is_sparse(pd.Series([0, 1, 0, 0]))
204
+ False
205
+
206
+ Returns `False` if the parameter is not a pandas sparse array.
207
+
208
+ >>> from scipy.sparse import bsr_matrix
209
+ >>> is_sparse(bsr_matrix([0, 1, 0, 0]))
210
+ False
211
+
212
+ Returns `False` if the parameter has more than one dimension.
213
+ """
214
+ warnings.warn(
215
+ "is_sparse is deprecated and will be removed in a future "
216
+ "version. Check `isinstance(dtype, pd.SparseDtype)` instead.",
217
+ DeprecationWarning,
218
+ stacklevel=2,
219
+ )
220
+
221
+ dtype = getattr(arr, "dtype", arr)
222
+ return isinstance(dtype, SparseDtype)
223
+
224
+
225
+ def is_scipy_sparse(arr) -> bool:
226
+ """
227
+ Check whether an array-like is a scipy.sparse.spmatrix instance.
228
+
229
+ Parameters
230
+ ----------
231
+ arr : array-like
232
+ The array-like to check.
233
+
234
+ Returns
235
+ -------
236
+ boolean
237
+ Whether or not the array-like is a scipy.sparse.spmatrix instance.
238
+
239
+ Notes
240
+ -----
241
+ If scipy is not installed, this function will always return False.
242
+
243
+ Examples
244
+ --------
245
+ >>> from scipy.sparse import bsr_matrix
246
+ >>> is_scipy_sparse(bsr_matrix([1, 2, 3]))
247
+ True
248
+ >>> is_scipy_sparse(pd.arrays.SparseArray([1, 2, 3]))
249
+ False
250
+ """
251
+ global _is_scipy_sparse
252
+
253
+ if _is_scipy_sparse is None: # pylint: disable=used-before-assignment
254
+ try:
255
+ from scipy.sparse import issparse as _is_scipy_sparse
256
+ except ImportError:
257
+ _is_scipy_sparse = lambda _: False
258
+
259
+ assert _is_scipy_sparse is not None
260
+ return _is_scipy_sparse(arr)
261
+
262
+
263
+ def is_datetime64_dtype(arr_or_dtype) -> bool:
264
+ """
265
+ Check whether an array-like or dtype is of the datetime64 dtype.
266
+
267
+ Parameters
268
+ ----------
269
+ arr_or_dtype : array-like or dtype
270
+ The array-like or dtype to check.
271
+
272
+ Returns
273
+ -------
274
+ boolean
275
+ Whether or not the array-like or dtype is of the datetime64 dtype.
276
+
277
+ Examples
278
+ --------
279
+ >>> from pandas.api.types import is_datetime64_dtype
280
+ >>> is_datetime64_dtype(object)
281
+ False
282
+ >>> is_datetime64_dtype(np.datetime64)
283
+ True
284
+ >>> is_datetime64_dtype(np.array([], dtype=int))
285
+ False
286
+ >>> is_datetime64_dtype(np.array([], dtype=np.datetime64))
287
+ True
288
+ >>> is_datetime64_dtype([1, 2, 3])
289
+ False
290
+ """
291
+ if isinstance(arr_or_dtype, np.dtype):
292
+ # GH#33400 fastpath for dtype object
293
+ return arr_or_dtype.kind == "M"
294
+ return _is_dtype_type(arr_or_dtype, classes(np.datetime64))
295
+
296
+
297
+ def is_datetime64tz_dtype(arr_or_dtype) -> bool:
298
+ """
299
+ Check whether an array-like or dtype is of a DatetimeTZDtype dtype.
300
+
301
+ .. deprecated:: 2.1.0
302
+ Use isinstance(dtype, pd.DatetimeTZDtype) instead.
303
+
304
+ Parameters
305
+ ----------
306
+ arr_or_dtype : array-like or dtype
307
+ The array-like or dtype to check.
308
+
309
+ Returns
310
+ -------
311
+ boolean
312
+ Whether or not the array-like or dtype is of a DatetimeTZDtype dtype.
313
+
314
+ Examples
315
+ --------
316
+ >>> from pandas.api.types import is_datetime64tz_dtype
317
+ >>> is_datetime64tz_dtype(object)
318
+ False
319
+ >>> is_datetime64tz_dtype([1, 2, 3])
320
+ False
321
+ >>> is_datetime64tz_dtype(pd.DatetimeIndex([1, 2, 3])) # tz-naive
322
+ False
323
+ >>> is_datetime64tz_dtype(pd.DatetimeIndex([1, 2, 3], tz="US/Eastern"))
324
+ True
325
+
326
+ >>> from pandas.core.dtypes.dtypes import DatetimeTZDtype
327
+ >>> dtype = DatetimeTZDtype("ns", tz="US/Eastern")
328
+ >>> s = pd.Series([], dtype=dtype)
329
+ >>> is_datetime64tz_dtype(dtype)
330
+ True
331
+ >>> is_datetime64tz_dtype(s)
332
+ True
333
+ """
334
+ # GH#52607
335
+ warnings.warn(
336
+ "is_datetime64tz_dtype is deprecated and will be removed in a future "
337
+ "version. Check `isinstance(dtype, pd.DatetimeTZDtype)` instead.",
338
+ DeprecationWarning,
339
+ stacklevel=2,
340
+ )
341
+ if isinstance(arr_or_dtype, DatetimeTZDtype):
342
+ # GH#33400 fastpath for dtype object
343
+ # GH 34986
344
+ return True
345
+
346
+ if arr_or_dtype is None:
347
+ return False
348
+ return DatetimeTZDtype.is_dtype(arr_or_dtype)
349
+
350
+
351
+ def is_timedelta64_dtype(arr_or_dtype) -> bool:
352
+ """
353
+ Check whether an array-like or dtype is of the timedelta64 dtype.
354
+
355
+ Parameters
356
+ ----------
357
+ arr_or_dtype : array-like or dtype
358
+ The array-like or dtype to check.
359
+
360
+ Returns
361
+ -------
362
+ boolean
363
+ Whether or not the array-like or dtype is of the timedelta64 dtype.
364
+
365
+ Examples
366
+ --------
367
+ >>> from pandas.core.dtypes.common import is_timedelta64_dtype
368
+ >>> is_timedelta64_dtype(object)
369
+ False
370
+ >>> is_timedelta64_dtype(np.timedelta64)
371
+ True
372
+ >>> is_timedelta64_dtype([1, 2, 3])
373
+ False
374
+ >>> is_timedelta64_dtype(pd.Series([], dtype="timedelta64[ns]"))
375
+ True
376
+ >>> is_timedelta64_dtype('0 days')
377
+ False
378
+ """
379
+ if isinstance(arr_or_dtype, np.dtype):
380
+ # GH#33400 fastpath for dtype object
381
+ return arr_or_dtype.kind == "m"
382
+
383
+ return _is_dtype_type(arr_or_dtype, classes(np.timedelta64))
384
+
385
+
386
+ def is_period_dtype(arr_or_dtype) -> bool:
387
+ """
388
+ Check whether an array-like or dtype is of the Period dtype.
389
+
390
+ .. deprecated:: 2.2.0
391
+ Use isinstance(dtype, pd.Period) instead.
392
+
393
+ Parameters
394
+ ----------
395
+ arr_or_dtype : array-like or dtype
396
+ The array-like or dtype to check.
397
+
398
+ Returns
399
+ -------
400
+ boolean
401
+ Whether or not the array-like or dtype is of the Period dtype.
402
+
403
+ Examples
404
+ --------
405
+ >>> from pandas.core.dtypes.common import is_period_dtype
406
+ >>> is_period_dtype(object)
407
+ False
408
+ >>> is_period_dtype(pd.PeriodDtype(freq="D"))
409
+ True
410
+ >>> is_period_dtype([1, 2, 3])
411
+ False
412
+ >>> is_period_dtype(pd.Period("2017-01-01"))
413
+ False
414
+ >>> is_period_dtype(pd.PeriodIndex([], freq="Y"))
415
+ True
416
+ """
417
+ warnings.warn(
418
+ "is_period_dtype is deprecated and will be removed in a future version. "
419
+ "Use `isinstance(dtype, pd.PeriodDtype)` instead",
420
+ DeprecationWarning,
421
+ stacklevel=2,
422
+ )
423
+ if isinstance(arr_or_dtype, ExtensionDtype):
424
+ # GH#33400 fastpath for dtype object
425
+ return arr_or_dtype.type is Period
426
+
427
+ if arr_or_dtype is None:
428
+ return False
429
+ return PeriodDtype.is_dtype(arr_or_dtype)
430
+
431
+
432
+ def is_interval_dtype(arr_or_dtype) -> bool:
433
+ """
434
+ Check whether an array-like or dtype is of the Interval dtype.
435
+
436
+ .. deprecated:: 2.2.0
437
+ Use isinstance(dtype, pd.IntervalDtype) instead.
438
+
439
+ Parameters
440
+ ----------
441
+ arr_or_dtype : array-like or dtype
442
+ The array-like or dtype to check.
443
+
444
+ Returns
445
+ -------
446
+ boolean
447
+ Whether or not the array-like or dtype is of the Interval dtype.
448
+
449
+ Examples
450
+ --------
451
+ >>> from pandas.core.dtypes.common import is_interval_dtype
452
+ >>> is_interval_dtype(object)
453
+ False
454
+ >>> is_interval_dtype(pd.IntervalDtype())
455
+ True
456
+ >>> is_interval_dtype([1, 2, 3])
457
+ False
458
+ >>>
459
+ >>> interval = pd.Interval(1, 2, closed="right")
460
+ >>> is_interval_dtype(interval)
461
+ False
462
+ >>> is_interval_dtype(pd.IntervalIndex([interval]))
463
+ True
464
+ """
465
+ # GH#52607
466
+ warnings.warn(
467
+ "is_interval_dtype is deprecated and will be removed in a future version. "
468
+ "Use `isinstance(dtype, pd.IntervalDtype)` instead",
469
+ DeprecationWarning,
470
+ stacklevel=2,
471
+ )
472
+ if isinstance(arr_or_dtype, ExtensionDtype):
473
+ # GH#33400 fastpath for dtype object
474
+ return arr_or_dtype.type is Interval
475
+
476
+ if arr_or_dtype is None:
477
+ return False
478
+ return IntervalDtype.is_dtype(arr_or_dtype)
479
+
480
+
481
+ def is_categorical_dtype(arr_or_dtype) -> bool:
482
+ """
483
+ Check whether an array-like or dtype is of the Categorical dtype.
484
+
485
+ .. deprecated:: 2.2.0
486
+ Use isinstance(dtype, pd.CategoricalDtype) instead.
487
+
488
+ Parameters
489
+ ----------
490
+ arr_or_dtype : array-like or dtype
491
+ The array-like or dtype to check.
492
+
493
+ Returns
494
+ -------
495
+ boolean
496
+ Whether or not the array-like or dtype is of the Categorical dtype.
497
+
498
+ Examples
499
+ --------
500
+ >>> from pandas.api.types import is_categorical_dtype
501
+ >>> from pandas import CategoricalDtype
502
+ >>> is_categorical_dtype(object)
503
+ False
504
+ >>> is_categorical_dtype(CategoricalDtype())
505
+ True
506
+ >>> is_categorical_dtype([1, 2, 3])
507
+ False
508
+ >>> is_categorical_dtype(pd.Categorical([1, 2, 3]))
509
+ True
510
+ >>> is_categorical_dtype(pd.CategoricalIndex([1, 2, 3]))
511
+ True
512
+ """
513
+ # GH#52527
514
+ warnings.warn(
515
+ "is_categorical_dtype is deprecated and will be removed in a future "
516
+ "version. Use isinstance(dtype, pd.CategoricalDtype) instead",
517
+ DeprecationWarning,
518
+ stacklevel=2,
519
+ )
520
+ if isinstance(arr_or_dtype, ExtensionDtype):
521
+ # GH#33400 fastpath for dtype object
522
+ return arr_or_dtype.name == "category"
523
+
524
+ if arr_or_dtype is None:
525
+ return False
526
+ return CategoricalDtype.is_dtype(arr_or_dtype)
527
+
528
+
529
+ def is_string_or_object_np_dtype(dtype: np.dtype) -> bool:
530
+ """
531
+ Faster alternative to is_string_dtype, assumes we have a np.dtype object.
532
+ """
533
+ return dtype == object or dtype.kind in "SU"
534
+
535
+
536
+ def is_string_dtype(arr_or_dtype) -> bool:
537
+ """
538
+ Check whether the provided array or dtype is of the string dtype.
539
+
540
+ If an array is passed with an object dtype, the elements must be
541
+ inferred as strings.
542
+
543
+ Parameters
544
+ ----------
545
+ arr_or_dtype : array-like or dtype
546
+ The array or dtype to check.
547
+
548
+ Returns
549
+ -------
550
+ boolean
551
+ Whether or not the array or dtype is of the string dtype.
552
+
553
+ Examples
554
+ --------
555
+ >>> from pandas.api.types import is_string_dtype
556
+ >>> is_string_dtype(str)
557
+ True
558
+ >>> is_string_dtype(object)
559
+ True
560
+ >>> is_string_dtype(int)
561
+ False
562
+ >>> is_string_dtype(np.array(['a', 'b']))
563
+ True
564
+ >>> is_string_dtype(pd.Series([1, 2]))
565
+ False
566
+ >>> is_string_dtype(pd.Series([1, 2], dtype=object))
567
+ False
568
+ """
569
+ if hasattr(arr_or_dtype, "dtype") and _get_dtype(arr_or_dtype).kind == "O":
570
+ return is_all_strings(arr_or_dtype)
571
+
572
+ def condition(dtype) -> bool:
573
+ if is_string_or_object_np_dtype(dtype):
574
+ return True
575
+ try:
576
+ return dtype == "string"
577
+ except TypeError:
578
+ return False
579
+
580
+ return _is_dtype(arr_or_dtype, condition)
581
+
582
+
583
+ def is_dtype_equal(source, target) -> bool:
584
+ """
585
+ Check if two dtypes are equal.
586
+
587
+ Parameters
588
+ ----------
589
+ source : The first dtype to compare
590
+ target : The second dtype to compare
591
+
592
+ Returns
593
+ -------
594
+ boolean
595
+ Whether or not the two dtypes are equal.
596
+
597
+ Examples
598
+ --------
599
+ >>> is_dtype_equal(int, float)
600
+ False
601
+ >>> is_dtype_equal("int", int)
602
+ True
603
+ >>> is_dtype_equal(object, "category")
604
+ False
605
+ >>> is_dtype_equal(CategoricalDtype(), "category")
606
+ True
607
+ >>> is_dtype_equal(DatetimeTZDtype(tz="UTC"), "datetime64")
608
+ False
609
+ """
610
+ if isinstance(target, str):
611
+ if not isinstance(source, str):
612
+ # GH#38516 ensure we get the same behavior from
613
+ # is_dtype_equal(CDT, "category") and CDT == "category"
614
+ try:
615
+ src = _get_dtype(source)
616
+ if isinstance(src, ExtensionDtype):
617
+ return src == target
618
+ except (TypeError, AttributeError, ImportError):
619
+ return False
620
+ elif isinstance(source, str):
621
+ return is_dtype_equal(target, source)
622
+
623
+ try:
624
+ source = _get_dtype(source)
625
+ target = _get_dtype(target)
626
+ return source == target
627
+ except (TypeError, AttributeError, ImportError):
628
+ # invalid comparison
629
+ # object == category will hit this
630
+ return False
631
+
632
+
633
+ def is_integer_dtype(arr_or_dtype) -> bool:
634
+ """
635
+ Check whether the provided array or dtype is of an integer dtype.
636
+
637
+ Unlike in `is_any_int_dtype`, timedelta64 instances will return False.
638
+
639
+ The nullable Integer dtypes (e.g. pandas.Int64Dtype) are also considered
640
+ as integer by this function.
641
+
642
+ Parameters
643
+ ----------
644
+ arr_or_dtype : array-like or dtype
645
+ The array or dtype to check.
646
+
647
+ Returns
648
+ -------
649
+ boolean
650
+ Whether or not the array or dtype is of an integer dtype and
651
+ not an instance of timedelta64.
652
+
653
+ Examples
654
+ --------
655
+ >>> from pandas.api.types import is_integer_dtype
656
+ >>> is_integer_dtype(str)
657
+ False
658
+ >>> is_integer_dtype(int)
659
+ True
660
+ >>> is_integer_dtype(float)
661
+ False
662
+ >>> is_integer_dtype(np.uint64)
663
+ True
664
+ >>> is_integer_dtype('int8')
665
+ True
666
+ >>> is_integer_dtype('Int8')
667
+ True
668
+ >>> is_integer_dtype(pd.Int8Dtype)
669
+ True
670
+ >>> is_integer_dtype(np.datetime64)
671
+ False
672
+ >>> is_integer_dtype(np.timedelta64)
673
+ False
674
+ >>> is_integer_dtype(np.array(['a', 'b']))
675
+ False
676
+ >>> is_integer_dtype(pd.Series([1, 2]))
677
+ True
678
+ >>> is_integer_dtype(np.array([], dtype=np.timedelta64))
679
+ False
680
+ >>> is_integer_dtype(pd.Index([1, 2.])) # float
681
+ False
682
+ """
683
+ return _is_dtype_type(
684
+ arr_or_dtype, _classes_and_not_datetimelike(np.integer)
685
+ ) or _is_dtype(
686
+ arr_or_dtype, lambda typ: isinstance(typ, ExtensionDtype) and typ.kind in "iu"
687
+ )
688
+
689
+
690
+ def is_signed_integer_dtype(arr_or_dtype) -> bool:
691
+ """
692
+ Check whether the provided array or dtype is of a signed integer dtype.
693
+
694
+ Unlike in `is_any_int_dtype`, timedelta64 instances will return False.
695
+
696
+ The nullable Integer dtypes (e.g. pandas.Int64Dtype) are also considered
697
+ as integer by this function.
698
+
699
+ Parameters
700
+ ----------
701
+ arr_or_dtype : array-like or dtype
702
+ The array or dtype to check.
703
+
704
+ Returns
705
+ -------
706
+ boolean
707
+ Whether or not the array or dtype is of a signed integer dtype
708
+ and not an instance of timedelta64.
709
+
710
+ Examples
711
+ --------
712
+ >>> from pandas.core.dtypes.common import is_signed_integer_dtype
713
+ >>> is_signed_integer_dtype(str)
714
+ False
715
+ >>> is_signed_integer_dtype(int)
716
+ True
717
+ >>> is_signed_integer_dtype(float)
718
+ False
719
+ >>> is_signed_integer_dtype(np.uint64) # unsigned
720
+ False
721
+ >>> is_signed_integer_dtype('int8')
722
+ True
723
+ >>> is_signed_integer_dtype('Int8')
724
+ True
725
+ >>> is_signed_integer_dtype(pd.Int8Dtype)
726
+ True
727
+ >>> is_signed_integer_dtype(np.datetime64)
728
+ False
729
+ >>> is_signed_integer_dtype(np.timedelta64)
730
+ False
731
+ >>> is_signed_integer_dtype(np.array(['a', 'b']))
732
+ False
733
+ >>> is_signed_integer_dtype(pd.Series([1, 2]))
734
+ True
735
+ >>> is_signed_integer_dtype(np.array([], dtype=np.timedelta64))
736
+ False
737
+ >>> is_signed_integer_dtype(pd.Index([1, 2.])) # float
738
+ False
739
+ >>> is_signed_integer_dtype(np.array([1, 2], dtype=np.uint32)) # unsigned
740
+ False
741
+ """
742
+ return _is_dtype_type(
743
+ arr_or_dtype, _classes_and_not_datetimelike(np.signedinteger)
744
+ ) or _is_dtype(
745
+ arr_or_dtype, lambda typ: isinstance(typ, ExtensionDtype) and typ.kind == "i"
746
+ )
747
+
748
+
749
+ def is_unsigned_integer_dtype(arr_or_dtype) -> bool:
750
+ """
751
+ Check whether the provided array or dtype is of an unsigned integer dtype.
752
+
753
+ The nullable Integer dtypes (e.g. pandas.UInt64Dtype) are also
754
+ considered as integer by this function.
755
+
756
+ Parameters
757
+ ----------
758
+ arr_or_dtype : array-like or dtype
759
+ The array or dtype to check.
760
+
761
+ Returns
762
+ -------
763
+ boolean
764
+ Whether or not the array or dtype is of an unsigned integer dtype.
765
+
766
+ Examples
767
+ --------
768
+ >>> from pandas.api.types import is_unsigned_integer_dtype
769
+ >>> is_unsigned_integer_dtype(str)
770
+ False
771
+ >>> is_unsigned_integer_dtype(int) # signed
772
+ False
773
+ >>> is_unsigned_integer_dtype(float)
774
+ False
775
+ >>> is_unsigned_integer_dtype(np.uint64)
776
+ True
777
+ >>> is_unsigned_integer_dtype('uint8')
778
+ True
779
+ >>> is_unsigned_integer_dtype('UInt8')
780
+ True
781
+ >>> is_unsigned_integer_dtype(pd.UInt8Dtype)
782
+ True
783
+ >>> is_unsigned_integer_dtype(np.array(['a', 'b']))
784
+ False
785
+ >>> is_unsigned_integer_dtype(pd.Series([1, 2])) # signed
786
+ False
787
+ >>> is_unsigned_integer_dtype(pd.Index([1, 2.])) # float
788
+ False
789
+ >>> is_unsigned_integer_dtype(np.array([1, 2], dtype=np.uint32))
790
+ True
791
+ """
792
+ return _is_dtype_type(
793
+ arr_or_dtype, _classes_and_not_datetimelike(np.unsignedinteger)
794
+ ) or _is_dtype(
795
+ arr_or_dtype, lambda typ: isinstance(typ, ExtensionDtype) and typ.kind == "u"
796
+ )
797
+
798
+
799
+ def is_int64_dtype(arr_or_dtype) -> bool:
800
+ """
801
+ Check whether the provided array or dtype is of the int64 dtype.
802
+
803
+ .. deprecated:: 2.1.0
804
+
805
+ is_int64_dtype is deprecated and will be removed in a future
806
+ version. Use dtype == np.int64 instead.
807
+
808
+ Parameters
809
+ ----------
810
+ arr_or_dtype : array-like or dtype
811
+ The array or dtype to check.
812
+
813
+ Returns
814
+ -------
815
+ boolean
816
+ Whether or not the array or dtype is of the int64 dtype.
817
+
818
+ Notes
819
+ -----
820
+ Depending on system architecture, the return value of `is_int64_dtype(
821
+ int)` will be True if the OS uses 64-bit integers and False if the OS
822
+ uses 32-bit integers.
823
+
824
+ Examples
825
+ --------
826
+ >>> from pandas.api.types import is_int64_dtype
827
+ >>> is_int64_dtype(str) # doctest: +SKIP
828
+ False
829
+ >>> is_int64_dtype(np.int32) # doctest: +SKIP
830
+ False
831
+ >>> is_int64_dtype(np.int64) # doctest: +SKIP
832
+ True
833
+ >>> is_int64_dtype('int8') # doctest: +SKIP
834
+ False
835
+ >>> is_int64_dtype('Int8') # doctest: +SKIP
836
+ False
837
+ >>> is_int64_dtype(pd.Int64Dtype) # doctest: +SKIP
838
+ True
839
+ >>> is_int64_dtype(float) # doctest: +SKIP
840
+ False
841
+ >>> is_int64_dtype(np.uint64) # unsigned # doctest: +SKIP
842
+ False
843
+ >>> is_int64_dtype(np.array(['a', 'b'])) # doctest: +SKIP
844
+ False
845
+ >>> is_int64_dtype(np.array([1, 2], dtype=np.int64)) # doctest: +SKIP
846
+ True
847
+ >>> is_int64_dtype(pd.Index([1, 2.])) # float # doctest: +SKIP
848
+ False
849
+ >>> is_int64_dtype(np.array([1, 2], dtype=np.uint32)) # unsigned # doctest: +SKIP
850
+ False
851
+ """
852
+ # GH#52564
853
+ warnings.warn(
854
+ "is_int64_dtype is deprecated and will be removed in a future "
855
+ "version. Use dtype == np.int64 instead.",
856
+ DeprecationWarning,
857
+ stacklevel=2,
858
+ )
859
+ return _is_dtype_type(arr_or_dtype, classes(np.int64))
860
+
861
+
862
+ def is_datetime64_any_dtype(arr_or_dtype) -> bool:
863
+ """
864
+ Check whether the provided array or dtype is of the datetime64 dtype.
865
+
866
+ Parameters
867
+ ----------
868
+ arr_or_dtype : array-like or dtype
869
+ The array or dtype to check.
870
+
871
+ Returns
872
+ -------
873
+ bool
874
+ Whether or not the array or dtype is of the datetime64 dtype.
875
+
876
+ Examples
877
+ --------
878
+ >>> from pandas.api.types import is_datetime64_any_dtype
879
+ >>> from pandas.core.dtypes.dtypes import DatetimeTZDtype
880
+ >>> is_datetime64_any_dtype(str)
881
+ False
882
+ >>> is_datetime64_any_dtype(int)
883
+ False
884
+ >>> is_datetime64_any_dtype(np.datetime64) # can be tz-naive
885
+ True
886
+ >>> is_datetime64_any_dtype(DatetimeTZDtype("ns", "US/Eastern"))
887
+ True
888
+ >>> is_datetime64_any_dtype(np.array(['a', 'b']))
889
+ False
890
+ >>> is_datetime64_any_dtype(np.array([1, 2]))
891
+ False
892
+ >>> is_datetime64_any_dtype(np.array([], dtype="datetime64[ns]"))
893
+ True
894
+ >>> is_datetime64_any_dtype(pd.DatetimeIndex([1, 2, 3], dtype="datetime64[ns]"))
895
+ True
896
+ """
897
+ if isinstance(arr_or_dtype, (np.dtype, ExtensionDtype)):
898
+ # GH#33400 fastpath for dtype object
899
+ return arr_or_dtype.kind == "M"
900
+
901
+ if arr_or_dtype is None:
902
+ return False
903
+
904
+ try:
905
+ tipo = _get_dtype(arr_or_dtype)
906
+ except TypeError:
907
+ return False
908
+ return lib.is_np_dtype(tipo, "M") or isinstance(tipo, DatetimeTZDtype)
909
+
910
+
911
+ def is_datetime64_ns_dtype(arr_or_dtype) -> bool:
912
+ """
913
+ Check whether the provided array or dtype is of the datetime64[ns] dtype.
914
+
915
+ Parameters
916
+ ----------
917
+ arr_or_dtype : array-like or dtype
918
+ The array or dtype to check.
919
+
920
+ Returns
921
+ -------
922
+ bool
923
+ Whether or not the array or dtype is of the datetime64[ns] dtype.
924
+
925
+ Examples
926
+ --------
927
+ >>> from pandas.api.types import is_datetime64_ns_dtype
928
+ >>> from pandas.core.dtypes.dtypes import DatetimeTZDtype
929
+ >>> is_datetime64_ns_dtype(str)
930
+ False
931
+ >>> is_datetime64_ns_dtype(int)
932
+ False
933
+ >>> is_datetime64_ns_dtype(np.datetime64) # no unit
934
+ False
935
+ >>> is_datetime64_ns_dtype(DatetimeTZDtype("ns", "US/Eastern"))
936
+ True
937
+ >>> is_datetime64_ns_dtype(np.array(['a', 'b']))
938
+ False
939
+ >>> is_datetime64_ns_dtype(np.array([1, 2]))
940
+ False
941
+ >>> is_datetime64_ns_dtype(np.array([], dtype="datetime64")) # no unit
942
+ False
943
+ >>> is_datetime64_ns_dtype(np.array([], dtype="datetime64[ps]")) # wrong unit
944
+ False
945
+ >>> is_datetime64_ns_dtype(pd.DatetimeIndex([1, 2, 3], dtype="datetime64[ns]"))
946
+ True
947
+ """
948
+ if arr_or_dtype is None:
949
+ return False
950
+ try:
951
+ tipo = _get_dtype(arr_or_dtype)
952
+ except TypeError:
953
+ return False
954
+ return tipo == DT64NS_DTYPE or (
955
+ isinstance(tipo, DatetimeTZDtype) and tipo.unit == "ns"
956
+ )
957
+
958
+
959
+ def is_timedelta64_ns_dtype(arr_or_dtype) -> bool:
960
+ """
961
+ Check whether the provided array or dtype is of the timedelta64[ns] dtype.
962
+
963
+ This is a very specific dtype, so generic ones like `np.timedelta64`
964
+ will return False if passed into this function.
965
+
966
+ Parameters
967
+ ----------
968
+ arr_or_dtype : array-like or dtype
969
+ The array or dtype to check.
970
+
971
+ Returns
972
+ -------
973
+ boolean
974
+ Whether or not the array or dtype is of the timedelta64[ns] dtype.
975
+
976
+ Examples
977
+ --------
978
+ >>> from pandas.core.dtypes.common import is_timedelta64_ns_dtype
979
+ >>> is_timedelta64_ns_dtype(np.dtype('m8[ns]'))
980
+ True
981
+ >>> is_timedelta64_ns_dtype(np.dtype('m8[ps]')) # Wrong frequency
982
+ False
983
+ >>> is_timedelta64_ns_dtype(np.array([1, 2], dtype='m8[ns]'))
984
+ True
985
+ >>> is_timedelta64_ns_dtype(np.array([1, 2], dtype=np.timedelta64))
986
+ False
987
+ """
988
+ return _is_dtype(arr_or_dtype, lambda dtype: dtype == TD64NS_DTYPE)
989
+
990
+
991
+ # This exists to silence numpy deprecation warnings, see GH#29553
992
+ def is_numeric_v_string_like(a: ArrayLike, b) -> bool:
993
+ """
994
+ Check if we are comparing a string-like object to a numeric ndarray.
995
+ NumPy doesn't like to compare such objects, especially numeric arrays
996
+ and scalar string-likes.
997
+
998
+ Parameters
999
+ ----------
1000
+ a : array-like, scalar
1001
+ The first object to check.
1002
+ b : array-like, scalar
1003
+ The second object to check.
1004
+
1005
+ Returns
1006
+ -------
1007
+ boolean
1008
+ Whether we return a comparing a string-like object to a numeric array.
1009
+
1010
+ Examples
1011
+ --------
1012
+ >>> is_numeric_v_string_like(np.array([1]), "foo")
1013
+ True
1014
+ >>> is_numeric_v_string_like(np.array([1, 2]), np.array(["foo"]))
1015
+ True
1016
+ >>> is_numeric_v_string_like(np.array(["foo"]), np.array([1, 2]))
1017
+ True
1018
+ >>> is_numeric_v_string_like(np.array([1]), np.array([2]))
1019
+ False
1020
+ >>> is_numeric_v_string_like(np.array(["foo"]), np.array(["foo"]))
1021
+ False
1022
+ """
1023
+ is_a_array = isinstance(a, np.ndarray)
1024
+ is_b_array = isinstance(b, np.ndarray)
1025
+
1026
+ is_a_numeric_array = is_a_array and a.dtype.kind in ("u", "i", "f", "c", "b")
1027
+ is_b_numeric_array = is_b_array and b.dtype.kind in ("u", "i", "f", "c", "b")
1028
+ is_a_string_array = is_a_array and a.dtype.kind in ("S", "U")
1029
+ is_b_string_array = is_b_array and b.dtype.kind in ("S", "U")
1030
+
1031
+ is_b_scalar_string_like = not is_b_array and isinstance(b, str)
1032
+
1033
+ return (
1034
+ (is_a_numeric_array and is_b_scalar_string_like)
1035
+ or (is_a_numeric_array and is_b_string_array)
1036
+ or (is_b_numeric_array and is_a_string_array)
1037
+ )
1038
+
1039
+
1040
+ def needs_i8_conversion(dtype: DtypeObj | None) -> bool:
1041
+ """
1042
+ Check whether the dtype should be converted to int64.
1043
+
1044
+ Dtype "needs" such a conversion if the dtype is of a datetime-like dtype
1045
+
1046
+ Parameters
1047
+ ----------
1048
+ dtype : np.dtype, ExtensionDtype, or None
1049
+
1050
+ Returns
1051
+ -------
1052
+ boolean
1053
+ Whether or not the dtype should be converted to int64.
1054
+
1055
+ Examples
1056
+ --------
1057
+ >>> needs_i8_conversion(str)
1058
+ False
1059
+ >>> needs_i8_conversion(np.int64)
1060
+ False
1061
+ >>> needs_i8_conversion(np.datetime64)
1062
+ False
1063
+ >>> needs_i8_conversion(np.dtype(np.datetime64))
1064
+ True
1065
+ >>> needs_i8_conversion(np.array(['a', 'b']))
1066
+ False
1067
+ >>> needs_i8_conversion(pd.Series([1, 2]))
1068
+ False
1069
+ >>> needs_i8_conversion(pd.Series([], dtype="timedelta64[ns]"))
1070
+ False
1071
+ >>> needs_i8_conversion(pd.DatetimeIndex([1, 2, 3], tz="US/Eastern"))
1072
+ False
1073
+ >>> needs_i8_conversion(pd.DatetimeIndex([1, 2, 3], tz="US/Eastern").dtype)
1074
+ True
1075
+ """
1076
+ if isinstance(dtype, np.dtype):
1077
+ return dtype.kind in "mM"
1078
+ return isinstance(dtype, (PeriodDtype, DatetimeTZDtype))
1079
+
1080
+
1081
+ def is_numeric_dtype(arr_or_dtype) -> bool:
1082
+ """
1083
+ Check whether the provided array or dtype is of a numeric dtype.
1084
+
1085
+ Parameters
1086
+ ----------
1087
+ arr_or_dtype : array-like or dtype
1088
+ The array or dtype to check.
1089
+
1090
+ Returns
1091
+ -------
1092
+ boolean
1093
+ Whether or not the array or dtype is of a numeric dtype.
1094
+
1095
+ Examples
1096
+ --------
1097
+ >>> from pandas.api.types import is_numeric_dtype
1098
+ >>> is_numeric_dtype(str)
1099
+ False
1100
+ >>> is_numeric_dtype(int)
1101
+ True
1102
+ >>> is_numeric_dtype(float)
1103
+ True
1104
+ >>> is_numeric_dtype(np.uint64)
1105
+ True
1106
+ >>> is_numeric_dtype(np.datetime64)
1107
+ False
1108
+ >>> is_numeric_dtype(np.timedelta64)
1109
+ False
1110
+ >>> is_numeric_dtype(np.array(['a', 'b']))
1111
+ False
1112
+ >>> is_numeric_dtype(pd.Series([1, 2]))
1113
+ True
1114
+ >>> is_numeric_dtype(pd.Index([1, 2.]))
1115
+ True
1116
+ >>> is_numeric_dtype(np.array([], dtype=np.timedelta64))
1117
+ False
1118
+ """
1119
+ return _is_dtype_type(
1120
+ arr_or_dtype, _classes_and_not_datetimelike(np.number, np.bool_)
1121
+ ) or _is_dtype(
1122
+ arr_or_dtype, lambda typ: isinstance(typ, ExtensionDtype) and typ._is_numeric
1123
+ )
1124
+
1125
+
1126
+ def is_any_real_numeric_dtype(arr_or_dtype) -> bool:
1127
+ """
1128
+ Check whether the provided array or dtype is of a real number dtype.
1129
+
1130
+ Parameters
1131
+ ----------
1132
+ arr_or_dtype : array-like or dtype
1133
+ The array or dtype to check.
1134
+
1135
+ Returns
1136
+ -------
1137
+ boolean
1138
+ Whether or not the array or dtype is of a real number dtype.
1139
+
1140
+ Examples
1141
+ --------
1142
+ >>> from pandas.api.types import is_any_real_numeric_dtype
1143
+ >>> is_any_real_numeric_dtype(int)
1144
+ True
1145
+ >>> is_any_real_numeric_dtype(float)
1146
+ True
1147
+ >>> is_any_real_numeric_dtype(object)
1148
+ False
1149
+ >>> is_any_real_numeric_dtype(str)
1150
+ False
1151
+ >>> is_any_real_numeric_dtype(complex(1, 2))
1152
+ False
1153
+ >>> is_any_real_numeric_dtype(bool)
1154
+ False
1155
+ """
1156
+ return (
1157
+ is_numeric_dtype(arr_or_dtype)
1158
+ and not is_complex_dtype(arr_or_dtype)
1159
+ and not is_bool_dtype(arr_or_dtype)
1160
+ )
1161
+
1162
+
1163
+ def is_float_dtype(arr_or_dtype) -> bool:
1164
+ """
1165
+ Check whether the provided array or dtype is of a float dtype.
1166
+
1167
+ Parameters
1168
+ ----------
1169
+ arr_or_dtype : array-like or dtype
1170
+ The array or dtype to check.
1171
+
1172
+ Returns
1173
+ -------
1174
+ boolean
1175
+ Whether or not the array or dtype is of a float dtype.
1176
+
1177
+ Examples
1178
+ --------
1179
+ >>> from pandas.api.types import is_float_dtype
1180
+ >>> is_float_dtype(str)
1181
+ False
1182
+ >>> is_float_dtype(int)
1183
+ False
1184
+ >>> is_float_dtype(float)
1185
+ True
1186
+ >>> is_float_dtype(np.array(['a', 'b']))
1187
+ False
1188
+ >>> is_float_dtype(pd.Series([1, 2]))
1189
+ False
1190
+ >>> is_float_dtype(pd.Index([1, 2.]))
1191
+ True
1192
+ """
1193
+ return _is_dtype_type(arr_or_dtype, classes(np.floating)) or _is_dtype(
1194
+ arr_or_dtype, lambda typ: isinstance(typ, ExtensionDtype) and typ.kind in "f"
1195
+ )
1196
+
1197
+
1198
+ def is_bool_dtype(arr_or_dtype) -> bool:
1199
+ """
1200
+ Check whether the provided array or dtype is of a boolean dtype.
1201
+
1202
+ Parameters
1203
+ ----------
1204
+ arr_or_dtype : array-like or dtype
1205
+ The array or dtype to check.
1206
+
1207
+ Returns
1208
+ -------
1209
+ boolean
1210
+ Whether or not the array or dtype is of a boolean dtype.
1211
+
1212
+ Notes
1213
+ -----
1214
+ An ExtensionArray is considered boolean when the ``_is_boolean``
1215
+ attribute is set to True.
1216
+
1217
+ Examples
1218
+ --------
1219
+ >>> from pandas.api.types import is_bool_dtype
1220
+ >>> is_bool_dtype(str)
1221
+ False
1222
+ >>> is_bool_dtype(int)
1223
+ False
1224
+ >>> is_bool_dtype(bool)
1225
+ True
1226
+ >>> is_bool_dtype(np.bool_)
1227
+ True
1228
+ >>> is_bool_dtype(np.array(['a', 'b']))
1229
+ False
1230
+ >>> is_bool_dtype(pd.Series([1, 2]))
1231
+ False
1232
+ >>> is_bool_dtype(np.array([True, False]))
1233
+ True
1234
+ >>> is_bool_dtype(pd.Categorical([True, False]))
1235
+ True
1236
+ >>> is_bool_dtype(pd.arrays.SparseArray([True, False]))
1237
+ True
1238
+ """
1239
+ if arr_or_dtype is None:
1240
+ return False
1241
+ try:
1242
+ dtype = _get_dtype(arr_or_dtype)
1243
+ except (TypeError, ValueError):
1244
+ return False
1245
+
1246
+ if isinstance(dtype, CategoricalDtype):
1247
+ arr_or_dtype = dtype.categories
1248
+ # now we use the special definition for Index
1249
+
1250
+ if isinstance(arr_or_dtype, ABCIndex):
1251
+ # Allow Index[object] that is all-bools or Index["boolean"]
1252
+ if arr_or_dtype.inferred_type == "boolean":
1253
+ if not is_bool_dtype(arr_or_dtype.dtype):
1254
+ # GH#52680
1255
+ warnings.warn(
1256
+ "The behavior of is_bool_dtype with an object-dtype Index "
1257
+ "of bool objects is deprecated. In a future version, "
1258
+ "this will return False. Cast the Index to a bool dtype instead.",
1259
+ DeprecationWarning,
1260
+ stacklevel=2,
1261
+ )
1262
+ return True
1263
+ return False
1264
+ elif isinstance(dtype, ExtensionDtype):
1265
+ return getattr(dtype, "_is_boolean", False)
1266
+
1267
+ return issubclass(dtype.type, np.bool_)
1268
+
1269
+
1270
+ def is_1d_only_ea_dtype(dtype: DtypeObj | None) -> bool:
1271
+ """
1272
+ Analogue to is_extension_array_dtype but excluding DatetimeTZDtype.
1273
+ """
1274
+ return isinstance(dtype, ExtensionDtype) and not dtype._supports_2d
1275
+
1276
+
1277
+ def is_extension_array_dtype(arr_or_dtype) -> bool:
1278
+ """
1279
+ Check if an object is a pandas extension array type.
1280
+
1281
+ See the :ref:`Use Guide <extending.extension-types>` for more.
1282
+
1283
+ Parameters
1284
+ ----------
1285
+ arr_or_dtype : object
1286
+ For array-like input, the ``.dtype`` attribute will
1287
+ be extracted.
1288
+
1289
+ Returns
1290
+ -------
1291
+ bool
1292
+ Whether the `arr_or_dtype` is an extension array type.
1293
+
1294
+ Notes
1295
+ -----
1296
+ This checks whether an object implements the pandas extension
1297
+ array interface. In pandas, this includes:
1298
+
1299
+ * Categorical
1300
+ * Sparse
1301
+ * Interval
1302
+ * Period
1303
+ * DatetimeArray
1304
+ * TimedeltaArray
1305
+
1306
+ Third-party libraries may implement arrays or types satisfying
1307
+ this interface as well.
1308
+
1309
+ Examples
1310
+ --------
1311
+ >>> from pandas.api.types import is_extension_array_dtype
1312
+ >>> arr = pd.Categorical(['a', 'b'])
1313
+ >>> is_extension_array_dtype(arr)
1314
+ True
1315
+ >>> is_extension_array_dtype(arr.dtype)
1316
+ True
1317
+
1318
+ >>> arr = np.array(['a', 'b'])
1319
+ >>> is_extension_array_dtype(arr.dtype)
1320
+ False
1321
+ """
1322
+ dtype = getattr(arr_or_dtype, "dtype", arr_or_dtype)
1323
+ if isinstance(dtype, ExtensionDtype):
1324
+ return True
1325
+ elif isinstance(dtype, np.dtype):
1326
+ return False
1327
+ else:
1328
+ return registry.find(dtype) is not None
1329
+
1330
+
1331
+ def is_ea_or_datetimelike_dtype(dtype: DtypeObj | None) -> bool:
1332
+ """
1333
+ Check for ExtensionDtype, datetime64 dtype, or timedelta64 dtype.
1334
+
1335
+ Notes
1336
+ -----
1337
+ Checks only for dtype objects, not dtype-castable strings or types.
1338
+ """
1339
+ return isinstance(dtype, ExtensionDtype) or (lib.is_np_dtype(dtype, "mM"))
1340
+
1341
+
1342
+ def is_complex_dtype(arr_or_dtype) -> bool:
1343
+ """
1344
+ Check whether the provided array or dtype is of a complex dtype.
1345
+
1346
+ Parameters
1347
+ ----------
1348
+ arr_or_dtype : array-like or dtype
1349
+ The array or dtype to check.
1350
+
1351
+ Returns
1352
+ -------
1353
+ boolean
1354
+ Whether or not the array or dtype is of a complex dtype.
1355
+
1356
+ Examples
1357
+ --------
1358
+ >>> from pandas.api.types import is_complex_dtype
1359
+ >>> is_complex_dtype(str)
1360
+ False
1361
+ >>> is_complex_dtype(int)
1362
+ False
1363
+ >>> is_complex_dtype(np.complex128)
1364
+ True
1365
+ >>> is_complex_dtype(np.array(['a', 'b']))
1366
+ False
1367
+ >>> is_complex_dtype(pd.Series([1, 2]))
1368
+ False
1369
+ >>> is_complex_dtype(np.array([1 + 1j, 5]))
1370
+ True
1371
+ """
1372
+ return _is_dtype_type(arr_or_dtype, classes(np.complexfloating))
1373
+
1374
+
1375
+ def _is_dtype(arr_or_dtype, condition) -> bool:
1376
+ """
1377
+ Return true if the condition is satisfied for the arr_or_dtype.
1378
+
1379
+ Parameters
1380
+ ----------
1381
+ arr_or_dtype : array-like, str, np.dtype, or ExtensionArrayType
1382
+ The array-like or dtype object whose dtype we want to extract.
1383
+ condition : callable[Union[np.dtype, ExtensionDtype]]
1384
+
1385
+ Returns
1386
+ -------
1387
+ bool
1388
+
1389
+ """
1390
+ if arr_or_dtype is None:
1391
+ return False
1392
+ try:
1393
+ dtype = _get_dtype(arr_or_dtype)
1394
+ except (TypeError, ValueError):
1395
+ return False
1396
+ return condition(dtype)
1397
+
1398
+
1399
+ def _get_dtype(arr_or_dtype) -> DtypeObj:
1400
+ """
1401
+ Get the dtype instance associated with an array
1402
+ or dtype object.
1403
+
1404
+ Parameters
1405
+ ----------
1406
+ arr_or_dtype : array-like or dtype
1407
+ The array-like or dtype object whose dtype we want to extract.
1408
+
1409
+ Returns
1410
+ -------
1411
+ obj_dtype : The extract dtype instance from the
1412
+ passed in array or dtype object.
1413
+
1414
+ Raises
1415
+ ------
1416
+ TypeError : The passed in object is None.
1417
+ """
1418
+ if arr_or_dtype is None:
1419
+ raise TypeError("Cannot deduce dtype from null object")
1420
+
1421
+ # fastpath
1422
+ if isinstance(arr_or_dtype, np.dtype):
1423
+ return arr_or_dtype
1424
+ elif isinstance(arr_or_dtype, type):
1425
+ return np.dtype(arr_or_dtype)
1426
+
1427
+ # if we have an array-like
1428
+ elif hasattr(arr_or_dtype, "dtype"):
1429
+ arr_or_dtype = arr_or_dtype.dtype
1430
+
1431
+ return pandas_dtype(arr_or_dtype)
1432
+
1433
+
1434
+ def _is_dtype_type(arr_or_dtype, condition) -> bool:
1435
+ """
1436
+ Return true if the condition is satisfied for the arr_or_dtype.
1437
+
1438
+ Parameters
1439
+ ----------
1440
+ arr_or_dtype : array-like or dtype
1441
+ The array-like or dtype object whose dtype we want to extract.
1442
+ condition : callable[Union[np.dtype, ExtensionDtypeType]]
1443
+
1444
+ Returns
1445
+ -------
1446
+ bool : if the condition is satisfied for the arr_or_dtype
1447
+ """
1448
+ if arr_or_dtype is None:
1449
+ return condition(type(None))
1450
+
1451
+ # fastpath
1452
+ if isinstance(arr_or_dtype, np.dtype):
1453
+ return condition(arr_or_dtype.type)
1454
+ elif isinstance(arr_or_dtype, type):
1455
+ if issubclass(arr_or_dtype, ExtensionDtype):
1456
+ arr_or_dtype = arr_or_dtype.type
1457
+ return condition(np.dtype(arr_or_dtype).type)
1458
+
1459
+ # if we have an array-like
1460
+ if hasattr(arr_or_dtype, "dtype"):
1461
+ arr_or_dtype = arr_or_dtype.dtype
1462
+
1463
+ # we are not possibly a dtype
1464
+ elif is_list_like(arr_or_dtype):
1465
+ return condition(type(None))
1466
+
1467
+ try:
1468
+ tipo = pandas_dtype(arr_or_dtype).type
1469
+ except (TypeError, ValueError):
1470
+ if is_scalar(arr_or_dtype):
1471
+ return condition(type(None))
1472
+
1473
+ return False
1474
+
1475
+ return condition(tipo)
1476
+
1477
+
1478
+ def infer_dtype_from_object(dtype) -> type:
1479
+ """
1480
+ Get a numpy dtype.type-style object for a dtype object.
1481
+
1482
+ This methods also includes handling of the datetime64[ns] and
1483
+ datetime64[ns, TZ] objects.
1484
+
1485
+ If no dtype can be found, we return ``object``.
1486
+
1487
+ Parameters
1488
+ ----------
1489
+ dtype : dtype, type
1490
+ The dtype object whose numpy dtype.type-style
1491
+ object we want to extract.
1492
+
1493
+ Returns
1494
+ -------
1495
+ type
1496
+ """
1497
+ if isinstance(dtype, type) and issubclass(dtype, np.generic):
1498
+ # Type object from a dtype
1499
+
1500
+ return dtype
1501
+ elif isinstance(dtype, (np.dtype, ExtensionDtype)):
1502
+ # dtype object
1503
+ try:
1504
+ _validate_date_like_dtype(dtype)
1505
+ except TypeError:
1506
+ # Should still pass if we don't have a date-like
1507
+ pass
1508
+ if hasattr(dtype, "numpy_dtype"):
1509
+ # TODO: Implement this properly
1510
+ # https://github.com/pandas-dev/pandas/issues/52576
1511
+ return dtype.numpy_dtype.type
1512
+ return dtype.type
1513
+
1514
+ try:
1515
+ dtype = pandas_dtype(dtype)
1516
+ except TypeError:
1517
+ pass
1518
+
1519
+ if isinstance(dtype, ExtensionDtype):
1520
+ return dtype.type
1521
+ elif isinstance(dtype, str):
1522
+ # TODO(jreback)
1523
+ # should deprecate these
1524
+ if dtype in ["datetimetz", "datetime64tz"]:
1525
+ return DatetimeTZDtype.type
1526
+ elif dtype in ["period"]:
1527
+ raise NotImplementedError
1528
+
1529
+ if dtype in ["datetime", "timedelta"]:
1530
+ dtype += "64"
1531
+ try:
1532
+ return infer_dtype_from_object(getattr(np, dtype))
1533
+ except (AttributeError, TypeError):
1534
+ # Handles cases like _get_dtype(int) i.e.,
1535
+ # Python objects that are valid dtypes
1536
+ # (unlike user-defined types, in general)
1537
+ #
1538
+ # TypeError handles the float16 type code of 'e'
1539
+ # further handle internal types
1540
+ pass
1541
+
1542
+ return infer_dtype_from_object(np.dtype(dtype))
1543
+
1544
+
1545
+ def _validate_date_like_dtype(dtype) -> None:
1546
+ """
1547
+ Check whether the dtype is a date-like dtype. Raises an error if invalid.
1548
+
1549
+ Parameters
1550
+ ----------
1551
+ dtype : dtype, type
1552
+ The dtype to check.
1553
+
1554
+ Raises
1555
+ ------
1556
+ TypeError : The dtype could not be casted to a date-like dtype.
1557
+ ValueError : The dtype is an illegal date-like dtype (e.g. the
1558
+ frequency provided is too specific)
1559
+ """
1560
+ try:
1561
+ typ = np.datetime_data(dtype)[0]
1562
+ except ValueError as e:
1563
+ raise TypeError(e) from e
1564
+ if typ not in ["generic", "ns"]:
1565
+ raise ValueError(
1566
+ f"{repr(dtype.name)} is too specific of a frequency, "
1567
+ f"try passing {repr(dtype.type.__name__)}"
1568
+ )
1569
+
1570
+
1571
+ def validate_all_hashable(*args, error_name: str | None = None) -> None:
1572
+ """
1573
+ Return None if all args are hashable, else raise a TypeError.
1574
+
1575
+ Parameters
1576
+ ----------
1577
+ *args
1578
+ Arguments to validate.
1579
+ error_name : str, optional
1580
+ The name to use if error
1581
+
1582
+ Raises
1583
+ ------
1584
+ TypeError : If an argument is not hashable
1585
+
1586
+ Returns
1587
+ -------
1588
+ None
1589
+ """
1590
+ if not all(is_hashable(arg) for arg in args):
1591
+ if error_name:
1592
+ raise TypeError(f"{error_name} must be a hashable type")
1593
+ raise TypeError("All elements must be hashable")
1594
+
1595
+
1596
+ def pandas_dtype(dtype) -> DtypeObj:
1597
+ """
1598
+ Convert input into a pandas only dtype object or a numpy dtype object.
1599
+
1600
+ Parameters
1601
+ ----------
1602
+ dtype : object to be converted
1603
+
1604
+ Returns
1605
+ -------
1606
+ np.dtype or a pandas dtype
1607
+
1608
+ Raises
1609
+ ------
1610
+ TypeError if not a dtype
1611
+
1612
+ Examples
1613
+ --------
1614
+ >>> pd.api.types.pandas_dtype(int)
1615
+ dtype('int64')
1616
+ """
1617
+ # short-circuit
1618
+ if isinstance(dtype, np.ndarray):
1619
+ return dtype.dtype
1620
+ elif isinstance(dtype, (np.dtype, ExtensionDtype)):
1621
+ return dtype
1622
+
1623
+ # registered extension types
1624
+ result = registry.find(dtype)
1625
+ if result is not None:
1626
+ if isinstance(result, type):
1627
+ # GH 31356, GH 54592
1628
+ warnings.warn(
1629
+ f"Instantiating {result.__name__} without any arguments."
1630
+ f"Pass a {result.__name__} instance to silence this warning.",
1631
+ UserWarning,
1632
+ stacklevel=find_stack_level(),
1633
+ )
1634
+ result = result()
1635
+ return result
1636
+
1637
+ # try a numpy dtype
1638
+ # raise a consistent TypeError if failed
1639
+ try:
1640
+ with warnings.catch_warnings():
1641
+ # GH#51523 - Series.astype(np.integer) doesn't show
1642
+ # numpy deprecation warning of np.integer
1643
+ # Hence enabling DeprecationWarning
1644
+ warnings.simplefilter("always", DeprecationWarning)
1645
+ npdtype = np.dtype(dtype)
1646
+ except SyntaxError as err:
1647
+ # np.dtype uses `eval` which can raise SyntaxError
1648
+ raise TypeError(f"data type '{dtype}' not understood") from err
1649
+
1650
+ # Any invalid dtype (such as pd.Timestamp) should raise an error.
1651
+ # np.dtype(invalid_type).kind = 0 for such objects. However, this will
1652
+ # also catch some valid dtypes such as object, np.object_ and 'object'
1653
+ # which we safeguard against by catching them earlier and returning
1654
+ # np.dtype(valid_dtype) before this condition is evaluated.
1655
+ if is_hashable(dtype) and dtype in [
1656
+ object,
1657
+ np.object_,
1658
+ "object",
1659
+ "O",
1660
+ "object_",
1661
+ ]:
1662
+ # check hashability to avoid errors/DeprecationWarning when we get
1663
+ # here and `dtype` is an array
1664
+ return npdtype
1665
+ elif npdtype.kind == "O":
1666
+ raise TypeError(f"dtype '{dtype}' not understood")
1667
+
1668
+ return npdtype
1669
+
1670
+
1671
+ def is_all_strings(value: ArrayLike) -> bool:
1672
+ """
1673
+ Check if this is an array of strings that we should try parsing.
1674
+
1675
+ Includes object-dtype ndarray containing all-strings, StringArray,
1676
+ and Categorical with all-string categories.
1677
+ Does not include numpy string dtypes.
1678
+ """
1679
+ dtype = value.dtype
1680
+
1681
+ if isinstance(dtype, np.dtype):
1682
+ if len(value) == 0:
1683
+ return dtype == np.dtype("object")
1684
+ else:
1685
+ return dtype == np.dtype("object") and lib.is_string_array(
1686
+ np.asarray(value), skipna=False
1687
+ )
1688
+ elif isinstance(dtype, CategoricalDtype):
1689
+ return dtype.categories.inferred_type == "string"
1690
+ return dtype == "string"
1691
+
1692
+
1693
+ __all__ = [
1694
+ "classes",
1695
+ "DT64NS_DTYPE",
1696
+ "ensure_float64",
1697
+ "ensure_python_int",
1698
+ "ensure_str",
1699
+ "infer_dtype_from_object",
1700
+ "INT64_DTYPE",
1701
+ "is_1d_only_ea_dtype",
1702
+ "is_all_strings",
1703
+ "is_any_real_numeric_dtype",
1704
+ "is_array_like",
1705
+ "is_bool",
1706
+ "is_bool_dtype",
1707
+ "is_categorical_dtype",
1708
+ "is_complex",
1709
+ "is_complex_dtype",
1710
+ "is_dataclass",
1711
+ "is_datetime64_any_dtype",
1712
+ "is_datetime64_dtype",
1713
+ "is_datetime64_ns_dtype",
1714
+ "is_datetime64tz_dtype",
1715
+ "is_decimal",
1716
+ "is_dict_like",
1717
+ "is_dtype_equal",
1718
+ "is_ea_or_datetimelike_dtype",
1719
+ "is_extension_array_dtype",
1720
+ "is_file_like",
1721
+ "is_float_dtype",
1722
+ "is_int64_dtype",
1723
+ "is_integer_dtype",
1724
+ "is_interval",
1725
+ "is_interval_dtype",
1726
+ "is_iterator",
1727
+ "is_named_tuple",
1728
+ "is_nested_list_like",
1729
+ "is_number",
1730
+ "is_numeric_dtype",
1731
+ "is_object_dtype",
1732
+ "is_period_dtype",
1733
+ "is_re",
1734
+ "is_re_compilable",
1735
+ "is_scipy_sparse",
1736
+ "is_sequence",
1737
+ "is_signed_integer_dtype",
1738
+ "is_sparse",
1739
+ "is_string_dtype",
1740
+ "is_string_or_object_np_dtype",
1741
+ "is_timedelta64_dtype",
1742
+ "is_timedelta64_ns_dtype",
1743
+ "is_unsigned_integer_dtype",
1744
+ "needs_i8_conversion",
1745
+ "pandas_dtype",
1746
+ "TD64NS_DTYPE",
1747
+ "validate_all_hashable",
1748
+ ]
env-llmeval/lib/python3.10/site-packages/pandas/core/dtypes/concat.py ADDED
@@ -0,0 +1,348 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ Utility functions related to concat.
3
+ """
4
+ from __future__ import annotations
5
+
6
+ from typing import (
7
+ TYPE_CHECKING,
8
+ cast,
9
+ )
10
+ import warnings
11
+
12
+ import numpy as np
13
+
14
+ from pandas._libs import lib
15
+ from pandas.util._exceptions import find_stack_level
16
+
17
+ from pandas.core.dtypes.astype import astype_array
18
+ from pandas.core.dtypes.cast import (
19
+ common_dtype_categorical_compat,
20
+ find_common_type,
21
+ np_find_common_type,
22
+ )
23
+ from pandas.core.dtypes.dtypes import CategoricalDtype
24
+ from pandas.core.dtypes.generic import (
25
+ ABCCategoricalIndex,
26
+ ABCSeries,
27
+ )
28
+
29
+ if TYPE_CHECKING:
30
+ from collections.abc import Sequence
31
+
32
+ from pandas._typing import (
33
+ ArrayLike,
34
+ AxisInt,
35
+ DtypeObj,
36
+ )
37
+
38
+ from pandas.core.arrays import (
39
+ Categorical,
40
+ ExtensionArray,
41
+ )
42
+
43
+
44
+ def _is_nonempty(x, axis) -> bool:
45
+ # filter empty arrays
46
+ # 1-d dtypes always are included here
47
+ if x.ndim <= axis:
48
+ return True
49
+ return x.shape[axis] > 0
50
+
51
+
52
+ def concat_compat(
53
+ to_concat: Sequence[ArrayLike], axis: AxisInt = 0, ea_compat_axis: bool = False
54
+ ) -> ArrayLike:
55
+ """
56
+ provide concatenation of an array of arrays each of which is a single
57
+ 'normalized' dtypes (in that for example, if it's object, then it is a
58
+ non-datetimelike and provide a combined dtype for the resulting array that
59
+ preserves the overall dtype if possible)
60
+
61
+ Parameters
62
+ ----------
63
+ to_concat : sequence of arrays
64
+ axis : axis to provide concatenation
65
+ ea_compat_axis : bool, default False
66
+ For ExtensionArray compat, behave as if axis == 1 when determining
67
+ whether to drop empty arrays.
68
+
69
+ Returns
70
+ -------
71
+ a single array, preserving the combined dtypes
72
+ """
73
+ if len(to_concat) and lib.dtypes_all_equal([obj.dtype for obj in to_concat]):
74
+ # fastpath!
75
+ obj = to_concat[0]
76
+ if isinstance(obj, np.ndarray):
77
+ to_concat_arrs = cast("Sequence[np.ndarray]", to_concat)
78
+ return np.concatenate(to_concat_arrs, axis=axis)
79
+
80
+ to_concat_eas = cast("Sequence[ExtensionArray]", to_concat)
81
+ if ea_compat_axis:
82
+ # We have 1D objects, that don't support axis keyword
83
+ return obj._concat_same_type(to_concat_eas)
84
+ elif axis == 0:
85
+ return obj._concat_same_type(to_concat_eas)
86
+ else:
87
+ # e.g. DatetimeArray
88
+ # NB: We are assuming here that ensure_wrapped_if_arraylike has
89
+ # been called where relevant.
90
+ return obj._concat_same_type(
91
+ # error: Unexpected keyword argument "axis" for "_concat_same_type"
92
+ # of "ExtensionArray"
93
+ to_concat_eas,
94
+ axis=axis, # type: ignore[call-arg]
95
+ )
96
+
97
+ # If all arrays are empty, there's nothing to convert, just short-cut to
98
+ # the concatenation, #3121.
99
+ #
100
+ # Creating an empty array directly is tempting, but the winnings would be
101
+ # marginal given that it would still require shape & dtype calculation and
102
+ # np.concatenate which has them both implemented is compiled.
103
+ orig = to_concat
104
+ non_empties = [x for x in to_concat if _is_nonempty(x, axis)]
105
+ if non_empties and axis == 0 and not ea_compat_axis:
106
+ # ea_compat_axis see GH#39574
107
+ to_concat = non_empties
108
+
109
+ any_ea, kinds, target_dtype = _get_result_dtype(to_concat, non_empties)
110
+
111
+ if len(to_concat) < len(orig):
112
+ _, _, alt_dtype = _get_result_dtype(orig, non_empties)
113
+ if alt_dtype != target_dtype:
114
+ # GH#39122
115
+ warnings.warn(
116
+ "The behavior of array concatenation with empty entries is "
117
+ "deprecated. In a future version, this will no longer exclude "
118
+ "empty items when determining the result dtype. "
119
+ "To retain the old behavior, exclude the empty entries before "
120
+ "the concat operation.",
121
+ FutureWarning,
122
+ stacklevel=find_stack_level(),
123
+ )
124
+
125
+ if target_dtype is not None:
126
+ to_concat = [astype_array(arr, target_dtype, copy=False) for arr in to_concat]
127
+
128
+ if not isinstance(to_concat[0], np.ndarray):
129
+ # i.e. isinstance(to_concat[0], ExtensionArray)
130
+ to_concat_eas = cast("Sequence[ExtensionArray]", to_concat)
131
+ cls = type(to_concat[0])
132
+ # GH#53640: eg. for datetime array, axis=1 but 0 is default
133
+ # However, class method `_concat_same_type()` for some classes
134
+ # may not support the `axis` keyword
135
+ if ea_compat_axis or axis == 0:
136
+ return cls._concat_same_type(to_concat_eas)
137
+ else:
138
+ return cls._concat_same_type(
139
+ to_concat_eas,
140
+ axis=axis, # type: ignore[call-arg]
141
+ )
142
+ else:
143
+ to_concat_arrs = cast("Sequence[np.ndarray]", to_concat)
144
+ result = np.concatenate(to_concat_arrs, axis=axis)
145
+
146
+ if not any_ea and "b" in kinds and result.dtype.kind in "iuf":
147
+ # GH#39817 cast to object instead of casting bools to numeric
148
+ result = result.astype(object, copy=False)
149
+ return result
150
+
151
+
152
+ def _get_result_dtype(
153
+ to_concat: Sequence[ArrayLike], non_empties: Sequence[ArrayLike]
154
+ ) -> tuple[bool, set[str], DtypeObj | None]:
155
+ target_dtype = None
156
+
157
+ dtypes = {obj.dtype for obj in to_concat}
158
+ kinds = {obj.dtype.kind for obj in to_concat}
159
+
160
+ any_ea = any(not isinstance(x, np.ndarray) for x in to_concat)
161
+ if any_ea:
162
+ # i.e. any ExtensionArrays
163
+
164
+ # we ignore axis here, as internally concatting with EAs is always
165
+ # for axis=0
166
+ if len(dtypes) != 1:
167
+ target_dtype = find_common_type([x.dtype for x in to_concat])
168
+ target_dtype = common_dtype_categorical_compat(to_concat, target_dtype)
169
+
170
+ elif not len(non_empties):
171
+ # we have all empties, but may need to coerce the result dtype to
172
+ # object if we have non-numeric type operands (numpy would otherwise
173
+ # cast this to float)
174
+ if len(kinds) != 1:
175
+ if not len(kinds - {"i", "u", "f"}) or not len(kinds - {"b", "i", "u"}):
176
+ # let numpy coerce
177
+ pass
178
+ else:
179
+ # coerce to object
180
+ target_dtype = np.dtype(object)
181
+ kinds = {"o"}
182
+ else:
183
+ # error: Argument 1 to "np_find_common_type" has incompatible type
184
+ # "*Set[Union[ExtensionDtype, Any]]"; expected "dtype[Any]"
185
+ target_dtype = np_find_common_type(*dtypes) # type: ignore[arg-type]
186
+
187
+ return any_ea, kinds, target_dtype
188
+
189
+
190
+ def union_categoricals(
191
+ to_union, sort_categories: bool = False, ignore_order: bool = False
192
+ ) -> Categorical:
193
+ """
194
+ Combine list-like of Categorical-like, unioning categories.
195
+
196
+ All categories must have the same dtype.
197
+
198
+ Parameters
199
+ ----------
200
+ to_union : list-like
201
+ Categorical, CategoricalIndex, or Series with dtype='category'.
202
+ sort_categories : bool, default False
203
+ If true, resulting categories will be lexsorted, otherwise
204
+ they will be ordered as they appear in the data.
205
+ ignore_order : bool, default False
206
+ If true, the ordered attribute of the Categoricals will be ignored.
207
+ Results in an unordered categorical.
208
+
209
+ Returns
210
+ -------
211
+ Categorical
212
+
213
+ Raises
214
+ ------
215
+ TypeError
216
+ - all inputs do not have the same dtype
217
+ - all inputs do not have the same ordered property
218
+ - all inputs are ordered and their categories are not identical
219
+ - sort_categories=True and Categoricals are ordered
220
+ ValueError
221
+ Empty list of categoricals passed
222
+
223
+ Notes
224
+ -----
225
+ To learn more about categories, see `link
226
+ <https://pandas.pydata.org/pandas-docs/stable/user_guide/categorical.html#unioning>`__
227
+
228
+ Examples
229
+ --------
230
+ If you want to combine categoricals that do not necessarily have
231
+ the same categories, `union_categoricals` will combine a list-like
232
+ of categoricals. The new categories will be the union of the
233
+ categories being combined.
234
+
235
+ >>> a = pd.Categorical(["b", "c"])
236
+ >>> b = pd.Categorical(["a", "b"])
237
+ >>> pd.api.types.union_categoricals([a, b])
238
+ ['b', 'c', 'a', 'b']
239
+ Categories (3, object): ['b', 'c', 'a']
240
+
241
+ By default, the resulting categories will be ordered as they appear
242
+ in the `categories` of the data. If you want the categories to be
243
+ lexsorted, use `sort_categories=True` argument.
244
+
245
+ >>> pd.api.types.union_categoricals([a, b], sort_categories=True)
246
+ ['b', 'c', 'a', 'b']
247
+ Categories (3, object): ['a', 'b', 'c']
248
+
249
+ `union_categoricals` also works with the case of combining two
250
+ categoricals of the same categories and order information (e.g. what
251
+ you could also `append` for).
252
+
253
+ >>> a = pd.Categorical(["a", "b"], ordered=True)
254
+ >>> b = pd.Categorical(["a", "b", "a"], ordered=True)
255
+ >>> pd.api.types.union_categoricals([a, b])
256
+ ['a', 'b', 'a', 'b', 'a']
257
+ Categories (2, object): ['a' < 'b']
258
+
259
+ Raises `TypeError` because the categories are ordered and not identical.
260
+
261
+ >>> a = pd.Categorical(["a", "b"], ordered=True)
262
+ >>> b = pd.Categorical(["a", "b", "c"], ordered=True)
263
+ >>> pd.api.types.union_categoricals([a, b])
264
+ Traceback (most recent call last):
265
+ ...
266
+ TypeError: to union ordered Categoricals, all categories must be the same
267
+
268
+ Ordered categoricals with different categories or orderings can be
269
+ combined by using the `ignore_ordered=True` argument.
270
+
271
+ >>> a = pd.Categorical(["a", "b", "c"], ordered=True)
272
+ >>> b = pd.Categorical(["c", "b", "a"], ordered=True)
273
+ >>> pd.api.types.union_categoricals([a, b], ignore_order=True)
274
+ ['a', 'b', 'c', 'c', 'b', 'a']
275
+ Categories (3, object): ['a', 'b', 'c']
276
+
277
+ `union_categoricals` also works with a `CategoricalIndex`, or `Series`
278
+ containing categorical data, but note that the resulting array will
279
+ always be a plain `Categorical`
280
+
281
+ >>> a = pd.Series(["b", "c"], dtype='category')
282
+ >>> b = pd.Series(["a", "b"], dtype='category')
283
+ >>> pd.api.types.union_categoricals([a, b])
284
+ ['b', 'c', 'a', 'b']
285
+ Categories (3, object): ['b', 'c', 'a']
286
+ """
287
+ from pandas import Categorical
288
+ from pandas.core.arrays.categorical import recode_for_categories
289
+
290
+ if len(to_union) == 0:
291
+ raise ValueError("No Categoricals to union")
292
+
293
+ def _maybe_unwrap(x):
294
+ if isinstance(x, (ABCCategoricalIndex, ABCSeries)):
295
+ return x._values
296
+ elif isinstance(x, Categorical):
297
+ return x
298
+ else:
299
+ raise TypeError("all components to combine must be Categorical")
300
+
301
+ to_union = [_maybe_unwrap(x) for x in to_union]
302
+ first = to_union[0]
303
+
304
+ if not lib.dtypes_all_equal([obj.categories.dtype for obj in to_union]):
305
+ raise TypeError("dtype of categories must be the same")
306
+
307
+ ordered = False
308
+ if all(first._categories_match_up_to_permutation(other) for other in to_union[1:]):
309
+ # identical categories - fastpath
310
+ categories = first.categories
311
+ ordered = first.ordered
312
+
313
+ all_codes = [first._encode_with_my_categories(x)._codes for x in to_union]
314
+ new_codes = np.concatenate(all_codes)
315
+
316
+ if sort_categories and not ignore_order and ordered:
317
+ raise TypeError("Cannot use sort_categories=True with ordered Categoricals")
318
+
319
+ if sort_categories and not categories.is_monotonic_increasing:
320
+ categories = categories.sort_values()
321
+ indexer = categories.get_indexer(first.categories)
322
+
323
+ from pandas.core.algorithms import take_nd
324
+
325
+ new_codes = take_nd(indexer, new_codes, fill_value=-1)
326
+ elif ignore_order or all(not c.ordered for c in to_union):
327
+ # different categories - union and recode
328
+ cats = first.categories.append([c.categories for c in to_union[1:]])
329
+ categories = cats.unique()
330
+ if sort_categories:
331
+ categories = categories.sort_values()
332
+
333
+ new_codes = [
334
+ recode_for_categories(c.codes, c.categories, categories) for c in to_union
335
+ ]
336
+ new_codes = np.concatenate(new_codes)
337
+ else:
338
+ # ordered - to show a proper error message
339
+ if all(c.ordered for c in to_union):
340
+ msg = "to union ordered Categoricals, all categories must be the same"
341
+ raise TypeError(msg)
342
+ raise TypeError("Categorical.ordered must be the same")
343
+
344
+ if ignore_order:
345
+ ordered = False
346
+
347
+ dtype = CategoricalDtype(categories=categories, ordered=ordered)
348
+ return Categorical._simple_new(new_codes, dtype=dtype)
env-llmeval/lib/python3.10/site-packages/pandas/core/dtypes/dtypes.py ADDED
@@ -0,0 +1,2348 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ Define extension dtypes.
3
+ """
4
+ from __future__ import annotations
5
+
6
+ from datetime import (
7
+ date,
8
+ datetime,
9
+ time,
10
+ timedelta,
11
+ )
12
+ from decimal import Decimal
13
+ import re
14
+ from typing import (
15
+ TYPE_CHECKING,
16
+ Any,
17
+ cast,
18
+ )
19
+ import warnings
20
+
21
+ import numpy as np
22
+ import pytz
23
+
24
+ from pandas._libs import (
25
+ lib,
26
+ missing as libmissing,
27
+ )
28
+ from pandas._libs.interval import Interval
29
+ from pandas._libs.properties import cache_readonly
30
+ from pandas._libs.tslibs import (
31
+ BaseOffset,
32
+ NaT,
33
+ NaTType,
34
+ Period,
35
+ Timedelta,
36
+ Timestamp,
37
+ timezones,
38
+ to_offset,
39
+ tz_compare,
40
+ )
41
+ from pandas._libs.tslibs.dtypes import (
42
+ PeriodDtypeBase,
43
+ abbrev_to_npy_unit,
44
+ )
45
+ from pandas._libs.tslibs.offsets import BDay
46
+ from pandas.compat import pa_version_under10p1
47
+ from pandas.errors import PerformanceWarning
48
+ from pandas.util._exceptions import find_stack_level
49
+
50
+ from pandas.core.dtypes.base import (
51
+ ExtensionDtype,
52
+ StorageExtensionDtype,
53
+ register_extension_dtype,
54
+ )
55
+ from pandas.core.dtypes.generic import (
56
+ ABCCategoricalIndex,
57
+ ABCIndex,
58
+ ABCRangeIndex,
59
+ )
60
+ from pandas.core.dtypes.inference import (
61
+ is_bool,
62
+ is_list_like,
63
+ )
64
+
65
+ from pandas.util import capitalize_first_letter
66
+
67
+ if not pa_version_under10p1:
68
+ import pyarrow as pa
69
+
70
+ if TYPE_CHECKING:
71
+ from collections.abc import MutableMapping
72
+ from datetime import tzinfo
73
+
74
+ import pyarrow as pa # noqa: TCH004
75
+
76
+ from pandas._typing import (
77
+ Dtype,
78
+ DtypeObj,
79
+ IntervalClosedType,
80
+ Ordered,
81
+ Self,
82
+ npt,
83
+ type_t,
84
+ )
85
+
86
+ from pandas import (
87
+ Categorical,
88
+ CategoricalIndex,
89
+ DatetimeIndex,
90
+ Index,
91
+ IntervalIndex,
92
+ PeriodIndex,
93
+ )
94
+ from pandas.core.arrays import (
95
+ BaseMaskedArray,
96
+ DatetimeArray,
97
+ IntervalArray,
98
+ NumpyExtensionArray,
99
+ PeriodArray,
100
+ SparseArray,
101
+ )
102
+ from pandas.core.arrays.arrow import ArrowExtensionArray
103
+
104
+ str_type = str
105
+
106
+
107
+ class PandasExtensionDtype(ExtensionDtype):
108
+ """
109
+ A np.dtype duck-typed class, suitable for holding a custom dtype.
110
+
111
+ THIS IS NOT A REAL NUMPY DTYPE
112
+ """
113
+
114
+ type: Any
115
+ kind: Any
116
+ # The Any type annotations above are here only because mypy seems to have a
117
+ # problem dealing with multiple inheritance from PandasExtensionDtype
118
+ # and ExtensionDtype's @properties in the subclasses below. The kind and
119
+ # type variables in those subclasses are explicitly typed below.
120
+ subdtype = None
121
+ str: str_type
122
+ num = 100
123
+ shape: tuple[int, ...] = ()
124
+ itemsize = 8
125
+ base: DtypeObj | None = None
126
+ isbuiltin = 0
127
+ isnative = 0
128
+ _cache_dtypes: dict[str_type, PandasExtensionDtype] = {}
129
+
130
+ def __repr__(self) -> str_type:
131
+ """
132
+ Return a string representation for a particular object.
133
+ """
134
+ return str(self)
135
+
136
+ def __hash__(self) -> int:
137
+ raise NotImplementedError("sub-classes should implement an __hash__ method")
138
+
139
+ def __getstate__(self) -> dict[str_type, Any]:
140
+ # pickle support; we don't want to pickle the cache
141
+ return {k: getattr(self, k, None) for k in self._metadata}
142
+
143
+ @classmethod
144
+ def reset_cache(cls) -> None:
145
+ """clear the cache"""
146
+ cls._cache_dtypes = {}
147
+
148
+
149
+ class CategoricalDtypeType(type):
150
+ """
151
+ the type of CategoricalDtype, this metaclass determines subclass ability
152
+ """
153
+
154
+
155
+ @register_extension_dtype
156
+ class CategoricalDtype(PandasExtensionDtype, ExtensionDtype):
157
+ """
158
+ Type for categorical data with the categories and orderedness.
159
+
160
+ Parameters
161
+ ----------
162
+ categories : sequence, optional
163
+ Must be unique, and must not contain any nulls.
164
+ The categories are stored in an Index,
165
+ and if an index is provided the dtype of that index will be used.
166
+ ordered : bool or None, default False
167
+ Whether or not this categorical is treated as a ordered categorical.
168
+ None can be used to maintain the ordered value of existing categoricals when
169
+ used in operations that combine categoricals, e.g. astype, and will resolve to
170
+ False if there is no existing ordered to maintain.
171
+
172
+ Attributes
173
+ ----------
174
+ categories
175
+ ordered
176
+
177
+ Methods
178
+ -------
179
+ None
180
+
181
+ See Also
182
+ --------
183
+ Categorical : Represent a categorical variable in classic R / S-plus fashion.
184
+
185
+ Notes
186
+ -----
187
+ This class is useful for specifying the type of a ``Categorical``
188
+ independent of the values. See :ref:`categorical.categoricaldtype`
189
+ for more.
190
+
191
+ Examples
192
+ --------
193
+ >>> t = pd.CategoricalDtype(categories=['b', 'a'], ordered=True)
194
+ >>> pd.Series(['a', 'b', 'a', 'c'], dtype=t)
195
+ 0 a
196
+ 1 b
197
+ 2 a
198
+ 3 NaN
199
+ dtype: category
200
+ Categories (2, object): ['b' < 'a']
201
+
202
+ An empty CategoricalDtype with a specific dtype can be created
203
+ by providing an empty index. As follows,
204
+
205
+ >>> pd.CategoricalDtype(pd.DatetimeIndex([])).categories.dtype
206
+ dtype('<M8[ns]')
207
+ """
208
+
209
+ # TODO: Document public vs. private API
210
+ name = "category"
211
+ type: type[CategoricalDtypeType] = CategoricalDtypeType
212
+ kind: str_type = "O"
213
+ str = "|O08"
214
+ base = np.dtype("O")
215
+ _metadata = ("categories", "ordered")
216
+ _cache_dtypes: dict[str_type, PandasExtensionDtype] = {}
217
+ _supports_2d = False
218
+ _can_fast_transpose = False
219
+
220
+ def __init__(self, categories=None, ordered: Ordered = False) -> None:
221
+ self._finalize(categories, ordered, fastpath=False)
222
+
223
+ @classmethod
224
+ def _from_fastpath(
225
+ cls, categories=None, ordered: bool | None = None
226
+ ) -> CategoricalDtype:
227
+ self = cls.__new__(cls)
228
+ self._finalize(categories, ordered, fastpath=True)
229
+ return self
230
+
231
+ @classmethod
232
+ def _from_categorical_dtype(
233
+ cls, dtype: CategoricalDtype, categories=None, ordered: Ordered | None = None
234
+ ) -> CategoricalDtype:
235
+ if categories is ordered is None:
236
+ return dtype
237
+ if categories is None:
238
+ categories = dtype.categories
239
+ if ordered is None:
240
+ ordered = dtype.ordered
241
+ return cls(categories, ordered)
242
+
243
+ @classmethod
244
+ def _from_values_or_dtype(
245
+ cls,
246
+ values=None,
247
+ categories=None,
248
+ ordered: bool | None = None,
249
+ dtype: Dtype | None = None,
250
+ ) -> CategoricalDtype:
251
+ """
252
+ Construct dtype from the input parameters used in :class:`Categorical`.
253
+
254
+ This constructor method specifically does not do the factorization
255
+ step, if that is needed to find the categories. This constructor may
256
+ therefore return ``CategoricalDtype(categories=None, ordered=None)``,
257
+ which may not be useful. Additional steps may therefore have to be
258
+ taken to create the final dtype.
259
+
260
+ The return dtype is specified from the inputs in this prioritized
261
+ order:
262
+ 1. if dtype is a CategoricalDtype, return dtype
263
+ 2. if dtype is the string 'category', create a CategoricalDtype from
264
+ the supplied categories and ordered parameters, and return that.
265
+ 3. if values is a categorical, use value.dtype, but override it with
266
+ categories and ordered if either/both of those are not None.
267
+ 4. if dtype is None and values is not a categorical, construct the
268
+ dtype from categories and ordered, even if either of those is None.
269
+
270
+ Parameters
271
+ ----------
272
+ values : list-like, optional
273
+ The list-like must be 1-dimensional.
274
+ categories : list-like, optional
275
+ Categories for the CategoricalDtype.
276
+ ordered : bool, optional
277
+ Designating if the categories are ordered.
278
+ dtype : CategoricalDtype or the string "category", optional
279
+ If ``CategoricalDtype``, cannot be used together with
280
+ `categories` or `ordered`.
281
+
282
+ Returns
283
+ -------
284
+ CategoricalDtype
285
+
286
+ Examples
287
+ --------
288
+ >>> pd.CategoricalDtype._from_values_or_dtype()
289
+ CategoricalDtype(categories=None, ordered=None, categories_dtype=None)
290
+ >>> pd.CategoricalDtype._from_values_or_dtype(
291
+ ... categories=['a', 'b'], ordered=True
292
+ ... )
293
+ CategoricalDtype(categories=['a', 'b'], ordered=True, categories_dtype=object)
294
+ >>> dtype1 = pd.CategoricalDtype(['a', 'b'], ordered=True)
295
+ >>> dtype2 = pd.CategoricalDtype(['x', 'y'], ordered=False)
296
+ >>> c = pd.Categorical([0, 1], dtype=dtype1)
297
+ >>> pd.CategoricalDtype._from_values_or_dtype(
298
+ ... c, ['x', 'y'], ordered=True, dtype=dtype2
299
+ ... )
300
+ Traceback (most recent call last):
301
+ ...
302
+ ValueError: Cannot specify `categories` or `ordered` together with
303
+ `dtype`.
304
+
305
+ The supplied dtype takes precedence over values' dtype:
306
+
307
+ >>> pd.CategoricalDtype._from_values_or_dtype(c, dtype=dtype2)
308
+ CategoricalDtype(categories=['x', 'y'], ordered=False, categories_dtype=object)
309
+ """
310
+
311
+ if dtype is not None:
312
+ # The dtype argument takes precedence over values.dtype (if any)
313
+ if isinstance(dtype, str):
314
+ if dtype == "category":
315
+ if ordered is None and cls.is_dtype(values):
316
+ # GH#49309 preserve orderedness
317
+ ordered = values.dtype.ordered
318
+
319
+ dtype = CategoricalDtype(categories, ordered)
320
+ else:
321
+ raise ValueError(f"Unknown dtype {repr(dtype)}")
322
+ elif categories is not None or ordered is not None:
323
+ raise ValueError(
324
+ "Cannot specify `categories` or `ordered` together with `dtype`."
325
+ )
326
+ elif not isinstance(dtype, CategoricalDtype):
327
+ raise ValueError(f"Cannot not construct CategoricalDtype from {dtype}")
328
+ elif cls.is_dtype(values):
329
+ # If no "dtype" was passed, use the one from "values", but honor
330
+ # the "ordered" and "categories" arguments
331
+ dtype = values.dtype._from_categorical_dtype(
332
+ values.dtype, categories, ordered
333
+ )
334
+ else:
335
+ # If dtype=None and values is not categorical, create a new dtype.
336
+ # Note: This could potentially have categories=None and
337
+ # ordered=None.
338
+ dtype = CategoricalDtype(categories, ordered)
339
+
340
+ return cast(CategoricalDtype, dtype)
341
+
342
+ @classmethod
343
+ def construct_from_string(cls, string: str_type) -> CategoricalDtype:
344
+ """
345
+ Construct a CategoricalDtype from a string.
346
+
347
+ Parameters
348
+ ----------
349
+ string : str
350
+ Must be the string "category" in order to be successfully constructed.
351
+
352
+ Returns
353
+ -------
354
+ CategoricalDtype
355
+ Instance of the dtype.
356
+
357
+ Raises
358
+ ------
359
+ TypeError
360
+ If a CategoricalDtype cannot be constructed from the input.
361
+ """
362
+ if not isinstance(string, str):
363
+ raise TypeError(
364
+ f"'construct_from_string' expects a string, got {type(string)}"
365
+ )
366
+ if string != cls.name:
367
+ raise TypeError(f"Cannot construct a 'CategoricalDtype' from '{string}'")
368
+
369
+ # need ordered=None to ensure that operations specifying dtype="category" don't
370
+ # override the ordered value for existing categoricals
371
+ return cls(ordered=None)
372
+
373
+ def _finalize(self, categories, ordered: Ordered, fastpath: bool = False) -> None:
374
+ if ordered is not None:
375
+ self.validate_ordered(ordered)
376
+
377
+ if categories is not None:
378
+ categories = self.validate_categories(categories, fastpath=fastpath)
379
+
380
+ self._categories = categories
381
+ self._ordered = ordered
382
+
383
+ def __setstate__(self, state: MutableMapping[str_type, Any]) -> None:
384
+ # for pickle compat. __get_state__ is defined in the
385
+ # PandasExtensionDtype superclass and uses the public properties to
386
+ # pickle -> need to set the settable private ones here (see GH26067)
387
+ self._categories = state.pop("categories", None)
388
+ self._ordered = state.pop("ordered", False)
389
+
390
+ def __hash__(self) -> int:
391
+ # _hash_categories returns a uint64, so use the negative
392
+ # space for when we have unknown categories to avoid a conflict
393
+ if self.categories is None:
394
+ if self.ordered:
395
+ return -1
396
+ else:
397
+ return -2
398
+ # We *do* want to include the real self.ordered here
399
+ return int(self._hash_categories)
400
+
401
+ def __eq__(self, other: object) -> bool:
402
+ """
403
+ Rules for CDT equality:
404
+ 1) Any CDT is equal to the string 'category'
405
+ 2) Any CDT is equal to itself
406
+ 3) Any CDT is equal to a CDT with categories=None regardless of ordered
407
+ 4) A CDT with ordered=True is only equal to another CDT with
408
+ ordered=True and identical categories in the same order
409
+ 5) A CDT with ordered={False, None} is only equal to another CDT with
410
+ ordered={False, None} and identical categories, but same order is
411
+ not required. There is no distinction between False/None.
412
+ 6) Any other comparison returns False
413
+ """
414
+ if isinstance(other, str):
415
+ return other == self.name
416
+ elif other is self:
417
+ return True
418
+ elif not (hasattr(other, "ordered") and hasattr(other, "categories")):
419
+ return False
420
+ elif self.categories is None or other.categories is None:
421
+ # For non-fully-initialized dtypes, these are only equal to
422
+ # - the string "category" (handled above)
423
+ # - other CategoricalDtype with categories=None
424
+ return self.categories is other.categories
425
+ elif self.ordered or other.ordered:
426
+ # At least one has ordered=True; equal if both have ordered=True
427
+ # and the same values for categories in the same order.
428
+ return (self.ordered == other.ordered) and self.categories.equals(
429
+ other.categories
430
+ )
431
+ else:
432
+ # Neither has ordered=True; equal if both have the same categories,
433
+ # but same order is not necessary. There is no distinction between
434
+ # ordered=False and ordered=None: CDT(., False) and CDT(., None)
435
+ # will be equal if they have the same categories.
436
+ left = self.categories
437
+ right = other.categories
438
+
439
+ # GH#36280 the ordering of checks here is for performance
440
+ if not left.dtype == right.dtype:
441
+ return False
442
+
443
+ if len(left) != len(right):
444
+ return False
445
+
446
+ if self.categories.equals(other.categories):
447
+ # Check and see if they happen to be identical categories
448
+ return True
449
+
450
+ if left.dtype != object:
451
+ # Faster than calculating hash
452
+ indexer = left.get_indexer(right)
453
+ # Because left and right have the same length and are unique,
454
+ # `indexer` not having any -1s implies that there is a
455
+ # bijection between `left` and `right`.
456
+ return (indexer != -1).all()
457
+
458
+ # With object-dtype we need a comparison that identifies
459
+ # e.g. int(2) as distinct from float(2)
460
+ return set(left) == set(right)
461
+
462
+ def __repr__(self) -> str_type:
463
+ if self.categories is None:
464
+ data = "None"
465
+ dtype = "None"
466
+ else:
467
+ data = self.categories._format_data(name=type(self).__name__)
468
+ if isinstance(self.categories, ABCRangeIndex):
469
+ data = str(self.categories._range)
470
+ data = data.rstrip(", ")
471
+ dtype = self.categories.dtype
472
+
473
+ return (
474
+ f"CategoricalDtype(categories={data}, ordered={self.ordered}, "
475
+ f"categories_dtype={dtype})"
476
+ )
477
+
478
+ @cache_readonly
479
+ def _hash_categories(self) -> int:
480
+ from pandas.core.util.hashing import (
481
+ combine_hash_arrays,
482
+ hash_array,
483
+ hash_tuples,
484
+ )
485
+
486
+ categories = self.categories
487
+ ordered = self.ordered
488
+
489
+ if len(categories) and isinstance(categories[0], tuple):
490
+ # assumes if any individual category is a tuple, then all our. ATM
491
+ # I don't really want to support just some of the categories being
492
+ # tuples.
493
+ cat_list = list(categories) # breaks if a np.array of categories
494
+ cat_array = hash_tuples(cat_list)
495
+ else:
496
+ if categories.dtype == "O" and len({type(x) for x in categories}) != 1:
497
+ # TODO: hash_array doesn't handle mixed types. It casts
498
+ # everything to a str first, which means we treat
499
+ # {'1', '2'} the same as {'1', 2}
500
+ # find a better solution
501
+ hashed = hash((tuple(categories), ordered))
502
+ return hashed
503
+
504
+ if DatetimeTZDtype.is_dtype(categories.dtype):
505
+ # Avoid future warning.
506
+ categories = categories.view("datetime64[ns]")
507
+
508
+ cat_array = hash_array(np.asarray(categories), categorize=False)
509
+ if ordered:
510
+ cat_array = np.vstack(
511
+ [cat_array, np.arange(len(cat_array), dtype=cat_array.dtype)]
512
+ )
513
+ else:
514
+ cat_array = np.array([cat_array])
515
+ combined_hashed = combine_hash_arrays(iter(cat_array), num_items=len(cat_array))
516
+ return np.bitwise_xor.reduce(combined_hashed)
517
+
518
+ @classmethod
519
+ def construct_array_type(cls) -> type_t[Categorical]:
520
+ """
521
+ Return the array type associated with this dtype.
522
+
523
+ Returns
524
+ -------
525
+ type
526
+ """
527
+ from pandas import Categorical
528
+
529
+ return Categorical
530
+
531
+ @staticmethod
532
+ def validate_ordered(ordered: Ordered) -> None:
533
+ """
534
+ Validates that we have a valid ordered parameter. If
535
+ it is not a boolean, a TypeError will be raised.
536
+
537
+ Parameters
538
+ ----------
539
+ ordered : object
540
+ The parameter to be verified.
541
+
542
+ Raises
543
+ ------
544
+ TypeError
545
+ If 'ordered' is not a boolean.
546
+ """
547
+ if not is_bool(ordered):
548
+ raise TypeError("'ordered' must either be 'True' or 'False'")
549
+
550
+ @staticmethod
551
+ def validate_categories(categories, fastpath: bool = False) -> Index:
552
+ """
553
+ Validates that we have good categories
554
+
555
+ Parameters
556
+ ----------
557
+ categories : array-like
558
+ fastpath : bool
559
+ Whether to skip nan and uniqueness checks
560
+
561
+ Returns
562
+ -------
563
+ categories : Index
564
+ """
565
+ from pandas.core.indexes.base import Index
566
+
567
+ if not fastpath and not is_list_like(categories):
568
+ raise TypeError(
569
+ f"Parameter 'categories' must be list-like, was {repr(categories)}"
570
+ )
571
+ if not isinstance(categories, ABCIndex):
572
+ categories = Index._with_infer(categories, tupleize_cols=False)
573
+
574
+ if not fastpath:
575
+ if categories.hasnans:
576
+ raise ValueError("Categorical categories cannot be null")
577
+
578
+ if not categories.is_unique:
579
+ raise ValueError("Categorical categories must be unique")
580
+
581
+ if isinstance(categories, ABCCategoricalIndex):
582
+ categories = categories.categories
583
+
584
+ return categories
585
+
586
+ def update_dtype(self, dtype: str_type | CategoricalDtype) -> CategoricalDtype:
587
+ """
588
+ Returns a CategoricalDtype with categories and ordered taken from dtype
589
+ if specified, otherwise falling back to self if unspecified
590
+
591
+ Parameters
592
+ ----------
593
+ dtype : CategoricalDtype
594
+
595
+ Returns
596
+ -------
597
+ new_dtype : CategoricalDtype
598
+ """
599
+ if isinstance(dtype, str) and dtype == "category":
600
+ # dtype='category' should not change anything
601
+ return self
602
+ elif not self.is_dtype(dtype):
603
+ raise ValueError(
604
+ f"a CategoricalDtype must be passed to perform an update, "
605
+ f"got {repr(dtype)}"
606
+ )
607
+ else:
608
+ # from here on, dtype is a CategoricalDtype
609
+ dtype = cast(CategoricalDtype, dtype)
610
+
611
+ # update categories/ordered unless they've been explicitly passed as None
612
+ new_categories = (
613
+ dtype.categories if dtype.categories is not None else self.categories
614
+ )
615
+ new_ordered = dtype.ordered if dtype.ordered is not None else self.ordered
616
+
617
+ return CategoricalDtype(new_categories, new_ordered)
618
+
619
+ @property
620
+ def categories(self) -> Index:
621
+ """
622
+ An ``Index`` containing the unique categories allowed.
623
+
624
+ Examples
625
+ --------
626
+ >>> cat_type = pd.CategoricalDtype(categories=['a', 'b'], ordered=True)
627
+ >>> cat_type.categories
628
+ Index(['a', 'b'], dtype='object')
629
+ """
630
+ return self._categories
631
+
632
+ @property
633
+ def ordered(self) -> Ordered:
634
+ """
635
+ Whether the categories have an ordered relationship.
636
+
637
+ Examples
638
+ --------
639
+ >>> cat_type = pd.CategoricalDtype(categories=['a', 'b'], ordered=True)
640
+ >>> cat_type.ordered
641
+ True
642
+
643
+ >>> cat_type = pd.CategoricalDtype(categories=['a', 'b'], ordered=False)
644
+ >>> cat_type.ordered
645
+ False
646
+ """
647
+ return self._ordered
648
+
649
+ @property
650
+ def _is_boolean(self) -> bool:
651
+ from pandas.core.dtypes.common import is_bool_dtype
652
+
653
+ return is_bool_dtype(self.categories)
654
+
655
+ def _get_common_dtype(self, dtypes: list[DtypeObj]) -> DtypeObj | None:
656
+ # check if we have all categorical dtype with identical categories
657
+ if all(isinstance(x, CategoricalDtype) for x in dtypes):
658
+ first = dtypes[0]
659
+ if all(first == other for other in dtypes[1:]):
660
+ return first
661
+
662
+ # special case non-initialized categorical
663
+ # TODO we should figure out the expected return value in general
664
+ non_init_cats = [
665
+ isinstance(x, CategoricalDtype) and x.categories is None for x in dtypes
666
+ ]
667
+ if all(non_init_cats):
668
+ return self
669
+ elif any(non_init_cats):
670
+ return None
671
+
672
+ # categorical is aware of Sparse -> extract sparse subdtypes
673
+ dtypes = [x.subtype if isinstance(x, SparseDtype) else x for x in dtypes]
674
+ # extract the categories' dtype
675
+ non_cat_dtypes = [
676
+ x.categories.dtype if isinstance(x, CategoricalDtype) else x for x in dtypes
677
+ ]
678
+ # TODO should categorical always give an answer?
679
+ from pandas.core.dtypes.cast import find_common_type
680
+
681
+ return find_common_type(non_cat_dtypes)
682
+
683
+ @cache_readonly
684
+ def index_class(self) -> type_t[CategoricalIndex]:
685
+ from pandas import CategoricalIndex
686
+
687
+ return CategoricalIndex
688
+
689
+
690
+ @register_extension_dtype
691
+ class DatetimeTZDtype(PandasExtensionDtype):
692
+ """
693
+ An ExtensionDtype for timezone-aware datetime data.
694
+
695
+ **This is not an actual numpy dtype**, but a duck type.
696
+
697
+ Parameters
698
+ ----------
699
+ unit : str, default "ns"
700
+ The precision of the datetime data. Currently limited
701
+ to ``"ns"``.
702
+ tz : str, int, or datetime.tzinfo
703
+ The timezone.
704
+
705
+ Attributes
706
+ ----------
707
+ unit
708
+ tz
709
+
710
+ Methods
711
+ -------
712
+ None
713
+
714
+ Raises
715
+ ------
716
+ ZoneInfoNotFoundError
717
+ When the requested timezone cannot be found.
718
+
719
+ Examples
720
+ --------
721
+ >>> from zoneinfo import ZoneInfo
722
+ >>> pd.DatetimeTZDtype(tz=ZoneInfo('UTC'))
723
+ datetime64[ns, UTC]
724
+
725
+ >>> pd.DatetimeTZDtype(tz=ZoneInfo('Europe/Paris'))
726
+ datetime64[ns, Europe/Paris]
727
+ """
728
+
729
+ type: type[Timestamp] = Timestamp
730
+ kind: str_type = "M"
731
+ num = 101
732
+ _metadata = ("unit", "tz")
733
+ _match = re.compile(r"(datetime64|M8)\[(?P<unit>.+), (?P<tz>.+)\]")
734
+ _cache_dtypes: dict[str_type, PandasExtensionDtype] = {}
735
+ _supports_2d = True
736
+ _can_fast_transpose = True
737
+
738
+ @property
739
+ def na_value(self) -> NaTType:
740
+ return NaT
741
+
742
+ @cache_readonly
743
+ def base(self) -> DtypeObj: # type: ignore[override]
744
+ return np.dtype(f"M8[{self.unit}]")
745
+
746
+ # error: Signature of "str" incompatible with supertype "PandasExtensionDtype"
747
+ @cache_readonly
748
+ def str(self) -> str: # type: ignore[override]
749
+ return f"|M8[{self.unit}]"
750
+
751
+ def __init__(self, unit: str_type | DatetimeTZDtype = "ns", tz=None) -> None:
752
+ if isinstance(unit, DatetimeTZDtype):
753
+ # error: "str" has no attribute "tz"
754
+ unit, tz = unit.unit, unit.tz # type: ignore[attr-defined]
755
+
756
+ if unit != "ns":
757
+ if isinstance(unit, str) and tz is None:
758
+ # maybe a string like datetime64[ns, tz], which we support for
759
+ # now.
760
+ result = type(self).construct_from_string(unit)
761
+ unit = result.unit
762
+ tz = result.tz
763
+ msg = (
764
+ f"Passing a dtype alias like 'datetime64[ns, {tz}]' "
765
+ "to DatetimeTZDtype is no longer supported. Use "
766
+ "'DatetimeTZDtype.construct_from_string()' instead."
767
+ )
768
+ raise ValueError(msg)
769
+ if unit not in ["s", "ms", "us", "ns"]:
770
+ raise ValueError("DatetimeTZDtype only supports s, ms, us, ns units")
771
+
772
+ if tz:
773
+ tz = timezones.maybe_get_tz(tz)
774
+ tz = timezones.tz_standardize(tz)
775
+ elif tz is not None:
776
+ raise pytz.UnknownTimeZoneError(tz)
777
+ if tz is None:
778
+ raise TypeError("A 'tz' is required.")
779
+
780
+ self._unit = unit
781
+ self._tz = tz
782
+
783
+ @cache_readonly
784
+ def _creso(self) -> int:
785
+ """
786
+ The NPY_DATETIMEUNIT corresponding to this dtype's resolution.
787
+ """
788
+ return abbrev_to_npy_unit(self.unit)
789
+
790
+ @property
791
+ def unit(self) -> str_type:
792
+ """
793
+ The precision of the datetime data.
794
+
795
+ Examples
796
+ --------
797
+ >>> from zoneinfo import ZoneInfo
798
+ >>> dtype = pd.DatetimeTZDtype(tz=ZoneInfo('America/Los_Angeles'))
799
+ >>> dtype.unit
800
+ 'ns'
801
+ """
802
+ return self._unit
803
+
804
+ @property
805
+ def tz(self) -> tzinfo:
806
+ """
807
+ The timezone.
808
+
809
+ Examples
810
+ --------
811
+ >>> from zoneinfo import ZoneInfo
812
+ >>> dtype = pd.DatetimeTZDtype(tz=ZoneInfo('America/Los_Angeles'))
813
+ >>> dtype.tz
814
+ zoneinfo.ZoneInfo(key='America/Los_Angeles')
815
+ """
816
+ return self._tz
817
+
818
+ @classmethod
819
+ def construct_array_type(cls) -> type_t[DatetimeArray]:
820
+ """
821
+ Return the array type associated with this dtype.
822
+
823
+ Returns
824
+ -------
825
+ type
826
+ """
827
+ from pandas.core.arrays import DatetimeArray
828
+
829
+ return DatetimeArray
830
+
831
+ @classmethod
832
+ def construct_from_string(cls, string: str_type) -> DatetimeTZDtype:
833
+ """
834
+ Construct a DatetimeTZDtype from a string.
835
+
836
+ Parameters
837
+ ----------
838
+ string : str
839
+ The string alias for this DatetimeTZDtype.
840
+ Should be formatted like ``datetime64[ns, <tz>]``,
841
+ where ``<tz>`` is the timezone name.
842
+
843
+ Examples
844
+ --------
845
+ >>> DatetimeTZDtype.construct_from_string('datetime64[ns, UTC]')
846
+ datetime64[ns, UTC]
847
+ """
848
+ if not isinstance(string, str):
849
+ raise TypeError(
850
+ f"'construct_from_string' expects a string, got {type(string)}"
851
+ )
852
+
853
+ msg = f"Cannot construct a 'DatetimeTZDtype' from '{string}'"
854
+ match = cls._match.match(string)
855
+ if match:
856
+ d = match.groupdict()
857
+ try:
858
+ return cls(unit=d["unit"], tz=d["tz"])
859
+ except (KeyError, TypeError, ValueError) as err:
860
+ # KeyError if maybe_get_tz tries and fails to get a
861
+ # pytz timezone (actually pytz.UnknownTimeZoneError).
862
+ # TypeError if we pass a nonsense tz;
863
+ # ValueError if we pass a unit other than "ns"
864
+ raise TypeError(msg) from err
865
+ raise TypeError(msg)
866
+
867
+ def __str__(self) -> str_type:
868
+ return f"datetime64[{self.unit}, {self.tz}]"
869
+
870
+ @property
871
+ def name(self) -> str_type:
872
+ """A string representation of the dtype."""
873
+ return str(self)
874
+
875
+ def __hash__(self) -> int:
876
+ # make myself hashable
877
+ # TODO: update this.
878
+ return hash(str(self))
879
+
880
+ def __eq__(self, other: object) -> bool:
881
+ if isinstance(other, str):
882
+ if other.startswith("M8["):
883
+ other = f"datetime64[{other[3:]}"
884
+ return other == self.name
885
+
886
+ return (
887
+ isinstance(other, DatetimeTZDtype)
888
+ and self.unit == other.unit
889
+ and tz_compare(self.tz, other.tz)
890
+ )
891
+
892
+ def __from_arrow__(self, array: pa.Array | pa.ChunkedArray) -> DatetimeArray:
893
+ """
894
+ Construct DatetimeArray from pyarrow Array/ChunkedArray.
895
+
896
+ Note: If the units in the pyarrow Array are the same as this
897
+ DatetimeDtype, then values corresponding to the integer representation
898
+ of ``NaT`` (e.g. one nanosecond before :attr:`pandas.Timestamp.min`)
899
+ are converted to ``NaT``, regardless of the null indicator in the
900
+ pyarrow array.
901
+
902
+ Parameters
903
+ ----------
904
+ array : pyarrow.Array or pyarrow.ChunkedArray
905
+ The Arrow array to convert to DatetimeArray.
906
+
907
+ Returns
908
+ -------
909
+ extension array : DatetimeArray
910
+ """
911
+ import pyarrow
912
+
913
+ from pandas.core.arrays import DatetimeArray
914
+
915
+ array = array.cast(pyarrow.timestamp(unit=self._unit), safe=True)
916
+
917
+ if isinstance(array, pyarrow.Array):
918
+ np_arr = array.to_numpy(zero_copy_only=False)
919
+ else:
920
+ np_arr = array.to_numpy()
921
+
922
+ return DatetimeArray._simple_new(np_arr, dtype=self)
923
+
924
+ def __setstate__(self, state) -> None:
925
+ # for pickle compat. __get_state__ is defined in the
926
+ # PandasExtensionDtype superclass and uses the public properties to
927
+ # pickle -> need to set the settable private ones here (see GH26067)
928
+ self._tz = state["tz"]
929
+ self._unit = state["unit"]
930
+
931
+ def _get_common_dtype(self, dtypes: list[DtypeObj]) -> DtypeObj | None:
932
+ if all(isinstance(t, DatetimeTZDtype) and t.tz == self.tz for t in dtypes):
933
+ np_dtype = np.max([cast(DatetimeTZDtype, t).base for t in [self, *dtypes]])
934
+ unit = np.datetime_data(np_dtype)[0]
935
+ return type(self)(unit=unit, tz=self.tz)
936
+ return super()._get_common_dtype(dtypes)
937
+
938
+ @cache_readonly
939
+ def index_class(self) -> type_t[DatetimeIndex]:
940
+ from pandas import DatetimeIndex
941
+
942
+ return DatetimeIndex
943
+
944
+
945
+ @register_extension_dtype
946
+ class PeriodDtype(PeriodDtypeBase, PandasExtensionDtype):
947
+ """
948
+ An ExtensionDtype for Period data.
949
+
950
+ **This is not an actual numpy dtype**, but a duck type.
951
+
952
+ Parameters
953
+ ----------
954
+ freq : str or DateOffset
955
+ The frequency of this PeriodDtype.
956
+
957
+ Attributes
958
+ ----------
959
+ freq
960
+
961
+ Methods
962
+ -------
963
+ None
964
+
965
+ Examples
966
+ --------
967
+ >>> pd.PeriodDtype(freq='D')
968
+ period[D]
969
+
970
+ >>> pd.PeriodDtype(freq=pd.offsets.MonthEnd())
971
+ period[M]
972
+ """
973
+
974
+ type: type[Period] = Period
975
+ kind: str_type = "O"
976
+ str = "|O08"
977
+ base = np.dtype("O")
978
+ num = 102
979
+ _metadata = ("freq",)
980
+ _match = re.compile(r"(P|p)eriod\[(?P<freq>.+)\]")
981
+ # error: Incompatible types in assignment (expression has type
982
+ # "Dict[int, PandasExtensionDtype]", base class "PandasExtensionDtype"
983
+ # defined the type as "Dict[str, PandasExtensionDtype]") [assignment]
984
+ _cache_dtypes: dict[BaseOffset, int] = {} # type: ignore[assignment]
985
+ __hash__ = PeriodDtypeBase.__hash__
986
+ _freq: BaseOffset
987
+ _supports_2d = True
988
+ _can_fast_transpose = True
989
+
990
+ def __new__(cls, freq) -> PeriodDtype: # noqa: PYI034
991
+ """
992
+ Parameters
993
+ ----------
994
+ freq : PeriodDtype, BaseOffset, or string
995
+ """
996
+ if isinstance(freq, PeriodDtype):
997
+ return freq
998
+
999
+ if not isinstance(freq, BaseOffset):
1000
+ freq = cls._parse_dtype_strict(freq)
1001
+
1002
+ if isinstance(freq, BDay):
1003
+ # GH#53446
1004
+ # TODO(3.0): enforcing this will close GH#10575
1005
+ warnings.warn(
1006
+ "PeriodDtype[B] is deprecated and will be removed in a future "
1007
+ "version. Use a DatetimeIndex with freq='B' instead",
1008
+ FutureWarning,
1009
+ stacklevel=find_stack_level(),
1010
+ )
1011
+
1012
+ try:
1013
+ dtype_code = cls._cache_dtypes[freq]
1014
+ except KeyError:
1015
+ dtype_code = freq._period_dtype_code
1016
+ cls._cache_dtypes[freq] = dtype_code
1017
+ u = PeriodDtypeBase.__new__(cls, dtype_code, freq.n)
1018
+ u._freq = freq
1019
+ return u
1020
+
1021
+ def __reduce__(self) -> tuple[type_t[Self], tuple[str_type]]:
1022
+ return type(self), (self.name,)
1023
+
1024
+ @property
1025
+ def freq(self) -> BaseOffset:
1026
+ """
1027
+ The frequency object of this PeriodDtype.
1028
+
1029
+ Examples
1030
+ --------
1031
+ >>> dtype = pd.PeriodDtype(freq='D')
1032
+ >>> dtype.freq
1033
+ <Day>
1034
+ """
1035
+ return self._freq
1036
+
1037
+ @classmethod
1038
+ def _parse_dtype_strict(cls, freq: str_type) -> BaseOffset:
1039
+ if isinstance(freq, str): # note: freq is already of type str!
1040
+ if freq.startswith(("Period[", "period[")):
1041
+ m = cls._match.search(freq)
1042
+ if m is not None:
1043
+ freq = m.group("freq")
1044
+
1045
+ freq_offset = to_offset(freq, is_period=True)
1046
+ if freq_offset is not None:
1047
+ return freq_offset
1048
+
1049
+ raise TypeError(
1050
+ "PeriodDtype argument should be string or BaseOffset, "
1051
+ f"got {type(freq).__name__}"
1052
+ )
1053
+
1054
+ @classmethod
1055
+ def construct_from_string(cls, string: str_type) -> PeriodDtype:
1056
+ """
1057
+ Strict construction from a string, raise a TypeError if not
1058
+ possible
1059
+ """
1060
+ if (
1061
+ isinstance(string, str)
1062
+ and (string.startswith(("period[", "Period[")))
1063
+ or isinstance(string, BaseOffset)
1064
+ ):
1065
+ # do not parse string like U as period[U]
1066
+ # avoid tuple to be regarded as freq
1067
+ try:
1068
+ return cls(freq=string)
1069
+ except ValueError:
1070
+ pass
1071
+ if isinstance(string, str):
1072
+ msg = f"Cannot construct a 'PeriodDtype' from '{string}'"
1073
+ else:
1074
+ msg = f"'construct_from_string' expects a string, got {type(string)}"
1075
+ raise TypeError(msg)
1076
+
1077
+ def __str__(self) -> str_type:
1078
+ return self.name
1079
+
1080
+ @property
1081
+ def name(self) -> str_type:
1082
+ return f"period[{self._freqstr}]"
1083
+
1084
+ @property
1085
+ def na_value(self) -> NaTType:
1086
+ return NaT
1087
+
1088
+ def __eq__(self, other: object) -> bool:
1089
+ if isinstance(other, str):
1090
+ return other in [self.name, capitalize_first_letter(self.name)]
1091
+
1092
+ return super().__eq__(other)
1093
+
1094
+ def __ne__(self, other: object) -> bool:
1095
+ return not self.__eq__(other)
1096
+
1097
+ @classmethod
1098
+ def is_dtype(cls, dtype: object) -> bool:
1099
+ """
1100
+ Return a boolean if we if the passed type is an actual dtype that we
1101
+ can match (via string or type)
1102
+ """
1103
+ if isinstance(dtype, str):
1104
+ # PeriodDtype can be instantiated from freq string like "U",
1105
+ # but doesn't regard freq str like "U" as dtype.
1106
+ if dtype.startswith(("period[", "Period[")):
1107
+ try:
1108
+ return cls._parse_dtype_strict(dtype) is not None
1109
+ except ValueError:
1110
+ return False
1111
+ else:
1112
+ return False
1113
+ return super().is_dtype(dtype)
1114
+
1115
+ @classmethod
1116
+ def construct_array_type(cls) -> type_t[PeriodArray]:
1117
+ """
1118
+ Return the array type associated with this dtype.
1119
+
1120
+ Returns
1121
+ -------
1122
+ type
1123
+ """
1124
+ from pandas.core.arrays import PeriodArray
1125
+
1126
+ return PeriodArray
1127
+
1128
+ def __from_arrow__(self, array: pa.Array | pa.ChunkedArray) -> PeriodArray:
1129
+ """
1130
+ Construct PeriodArray from pyarrow Array/ChunkedArray.
1131
+ """
1132
+ import pyarrow
1133
+
1134
+ from pandas.core.arrays import PeriodArray
1135
+ from pandas.core.arrays.arrow._arrow_utils import (
1136
+ pyarrow_array_to_numpy_and_mask,
1137
+ )
1138
+
1139
+ if isinstance(array, pyarrow.Array):
1140
+ chunks = [array]
1141
+ else:
1142
+ chunks = array.chunks
1143
+
1144
+ results = []
1145
+ for arr in chunks:
1146
+ data, mask = pyarrow_array_to_numpy_and_mask(arr, dtype=np.dtype(np.int64))
1147
+ parr = PeriodArray(data.copy(), dtype=self, copy=False)
1148
+ # error: Invalid index type "ndarray[Any, dtype[bool_]]" for "PeriodArray";
1149
+ # expected type "Union[int, Sequence[int], Sequence[bool], slice]"
1150
+ parr[~mask] = NaT # type: ignore[index]
1151
+ results.append(parr)
1152
+
1153
+ if not results:
1154
+ return PeriodArray(np.array([], dtype="int64"), dtype=self, copy=False)
1155
+ return PeriodArray._concat_same_type(results)
1156
+
1157
+ @cache_readonly
1158
+ def index_class(self) -> type_t[PeriodIndex]:
1159
+ from pandas import PeriodIndex
1160
+
1161
+ return PeriodIndex
1162
+
1163
+
1164
+ @register_extension_dtype
1165
+ class IntervalDtype(PandasExtensionDtype):
1166
+ """
1167
+ An ExtensionDtype for Interval data.
1168
+
1169
+ **This is not an actual numpy dtype**, but a duck type.
1170
+
1171
+ Parameters
1172
+ ----------
1173
+ subtype : str, np.dtype
1174
+ The dtype of the Interval bounds.
1175
+
1176
+ Attributes
1177
+ ----------
1178
+ subtype
1179
+
1180
+ Methods
1181
+ -------
1182
+ None
1183
+
1184
+ Examples
1185
+ --------
1186
+ >>> pd.IntervalDtype(subtype='int64', closed='both')
1187
+ interval[int64, both]
1188
+ """
1189
+
1190
+ name = "interval"
1191
+ kind: str_type = "O"
1192
+ str = "|O08"
1193
+ base = np.dtype("O")
1194
+ num = 103
1195
+ _metadata = (
1196
+ "subtype",
1197
+ "closed",
1198
+ )
1199
+
1200
+ _match = re.compile(
1201
+ r"(I|i)nterval\[(?P<subtype>[^,]+(\[.+\])?)"
1202
+ r"(, (?P<closed>(right|left|both|neither)))?\]"
1203
+ )
1204
+
1205
+ _cache_dtypes: dict[str_type, PandasExtensionDtype] = {}
1206
+ _subtype: None | np.dtype
1207
+ _closed: IntervalClosedType | None
1208
+
1209
+ def __init__(self, subtype=None, closed: IntervalClosedType | None = None) -> None:
1210
+ from pandas.core.dtypes.common import (
1211
+ is_string_dtype,
1212
+ pandas_dtype,
1213
+ )
1214
+
1215
+ if closed is not None and closed not in {"right", "left", "both", "neither"}:
1216
+ raise ValueError("closed must be one of 'right', 'left', 'both', 'neither'")
1217
+
1218
+ if isinstance(subtype, IntervalDtype):
1219
+ if closed is not None and closed != subtype.closed:
1220
+ raise ValueError(
1221
+ "dtype.closed and 'closed' do not match. "
1222
+ "Try IntervalDtype(dtype.subtype, closed) instead."
1223
+ )
1224
+ self._subtype = subtype._subtype
1225
+ self._closed = subtype._closed
1226
+ elif subtype is None:
1227
+ # we are called as an empty constructor
1228
+ # generally for pickle compat
1229
+ self._subtype = None
1230
+ self._closed = closed
1231
+ elif isinstance(subtype, str) and subtype.lower() == "interval":
1232
+ self._subtype = None
1233
+ self._closed = closed
1234
+ else:
1235
+ if isinstance(subtype, str):
1236
+ m = IntervalDtype._match.search(subtype)
1237
+ if m is not None:
1238
+ gd = m.groupdict()
1239
+ subtype = gd["subtype"]
1240
+ if gd.get("closed", None) is not None:
1241
+ if closed is not None:
1242
+ if closed != gd["closed"]:
1243
+ raise ValueError(
1244
+ "'closed' keyword does not match value "
1245
+ "specified in dtype string"
1246
+ )
1247
+ closed = gd["closed"] # type: ignore[assignment]
1248
+
1249
+ try:
1250
+ subtype = pandas_dtype(subtype)
1251
+ except TypeError as err:
1252
+ raise TypeError("could not construct IntervalDtype") from err
1253
+ if CategoricalDtype.is_dtype(subtype) or is_string_dtype(subtype):
1254
+ # GH 19016
1255
+ msg = (
1256
+ "category, object, and string subtypes are not supported "
1257
+ "for IntervalDtype"
1258
+ )
1259
+ raise TypeError(msg)
1260
+ self._subtype = subtype
1261
+ self._closed = closed
1262
+
1263
+ @cache_readonly
1264
+ def _can_hold_na(self) -> bool:
1265
+ subtype = self._subtype
1266
+ if subtype is None:
1267
+ # partially-initialized
1268
+ raise NotImplementedError(
1269
+ "_can_hold_na is not defined for partially-initialized IntervalDtype"
1270
+ )
1271
+ if subtype.kind in "iu":
1272
+ return False
1273
+ return True
1274
+
1275
+ @property
1276
+ def closed(self) -> IntervalClosedType:
1277
+ return self._closed # type: ignore[return-value]
1278
+
1279
+ @property
1280
+ def subtype(self):
1281
+ """
1282
+ The dtype of the Interval bounds.
1283
+
1284
+ Examples
1285
+ --------
1286
+ >>> dtype = pd.IntervalDtype(subtype='int64', closed='both')
1287
+ >>> dtype.subtype
1288
+ dtype('int64')
1289
+ """
1290
+ return self._subtype
1291
+
1292
+ @classmethod
1293
+ def construct_array_type(cls) -> type[IntervalArray]:
1294
+ """
1295
+ Return the array type associated with this dtype.
1296
+
1297
+ Returns
1298
+ -------
1299
+ type
1300
+ """
1301
+ from pandas.core.arrays import IntervalArray
1302
+
1303
+ return IntervalArray
1304
+
1305
+ @classmethod
1306
+ def construct_from_string(cls, string: str_type) -> IntervalDtype:
1307
+ """
1308
+ attempt to construct this type from a string, raise a TypeError
1309
+ if its not possible
1310
+ """
1311
+ if not isinstance(string, str):
1312
+ raise TypeError(
1313
+ f"'construct_from_string' expects a string, got {type(string)}"
1314
+ )
1315
+
1316
+ if string.lower() == "interval" or cls._match.search(string) is not None:
1317
+ return cls(string)
1318
+
1319
+ msg = (
1320
+ f"Cannot construct a 'IntervalDtype' from '{string}'.\n\n"
1321
+ "Incorrectly formatted string passed to constructor. "
1322
+ "Valid formats include Interval or Interval[dtype] "
1323
+ "where dtype is numeric, datetime, or timedelta"
1324
+ )
1325
+ raise TypeError(msg)
1326
+
1327
+ @property
1328
+ def type(self) -> type[Interval]:
1329
+ return Interval
1330
+
1331
+ def __str__(self) -> str_type:
1332
+ if self.subtype is None:
1333
+ return "interval"
1334
+ if self.closed is None:
1335
+ # Only partially initialized GH#38394
1336
+ return f"interval[{self.subtype}]"
1337
+ return f"interval[{self.subtype}, {self.closed}]"
1338
+
1339
+ def __hash__(self) -> int:
1340
+ # make myself hashable
1341
+ return hash(str(self))
1342
+
1343
+ def __eq__(self, other: object) -> bool:
1344
+ if isinstance(other, str):
1345
+ return other.lower() in (self.name.lower(), str(self).lower())
1346
+ elif not isinstance(other, IntervalDtype):
1347
+ return False
1348
+ elif self.subtype is None or other.subtype is None:
1349
+ # None should match any subtype
1350
+ return True
1351
+ elif self.closed != other.closed:
1352
+ return False
1353
+ else:
1354
+ return self.subtype == other.subtype
1355
+
1356
+ def __setstate__(self, state) -> None:
1357
+ # for pickle compat. __get_state__ is defined in the
1358
+ # PandasExtensionDtype superclass and uses the public properties to
1359
+ # pickle -> need to set the settable private ones here (see GH26067)
1360
+ self._subtype = state["subtype"]
1361
+
1362
+ # backward-compat older pickles won't have "closed" key
1363
+ self._closed = state.pop("closed", None)
1364
+
1365
+ @classmethod
1366
+ def is_dtype(cls, dtype: object) -> bool:
1367
+ """
1368
+ Return a boolean if we if the passed type is an actual dtype that we
1369
+ can match (via string or type)
1370
+ """
1371
+ if isinstance(dtype, str):
1372
+ if dtype.lower().startswith("interval"):
1373
+ try:
1374
+ return cls.construct_from_string(dtype) is not None
1375
+ except (ValueError, TypeError):
1376
+ return False
1377
+ else:
1378
+ return False
1379
+ return super().is_dtype(dtype)
1380
+
1381
+ def __from_arrow__(self, array: pa.Array | pa.ChunkedArray) -> IntervalArray:
1382
+ """
1383
+ Construct IntervalArray from pyarrow Array/ChunkedArray.
1384
+ """
1385
+ import pyarrow
1386
+
1387
+ from pandas.core.arrays import IntervalArray
1388
+
1389
+ if isinstance(array, pyarrow.Array):
1390
+ chunks = [array]
1391
+ else:
1392
+ chunks = array.chunks
1393
+
1394
+ results = []
1395
+ for arr in chunks:
1396
+ if isinstance(arr, pyarrow.ExtensionArray):
1397
+ arr = arr.storage
1398
+ left = np.asarray(arr.field("left"), dtype=self.subtype)
1399
+ right = np.asarray(arr.field("right"), dtype=self.subtype)
1400
+ iarr = IntervalArray.from_arrays(left, right, closed=self.closed)
1401
+ results.append(iarr)
1402
+
1403
+ if not results:
1404
+ return IntervalArray.from_arrays(
1405
+ np.array([], dtype=self.subtype),
1406
+ np.array([], dtype=self.subtype),
1407
+ closed=self.closed,
1408
+ )
1409
+ return IntervalArray._concat_same_type(results)
1410
+
1411
+ def _get_common_dtype(self, dtypes: list[DtypeObj]) -> DtypeObj | None:
1412
+ if not all(isinstance(x, IntervalDtype) for x in dtypes):
1413
+ return None
1414
+
1415
+ closed = cast("IntervalDtype", dtypes[0]).closed
1416
+ if not all(cast("IntervalDtype", x).closed == closed for x in dtypes):
1417
+ return np.dtype(object)
1418
+
1419
+ from pandas.core.dtypes.cast import find_common_type
1420
+
1421
+ common = find_common_type([cast("IntervalDtype", x).subtype for x in dtypes])
1422
+ if common == object:
1423
+ return np.dtype(object)
1424
+ return IntervalDtype(common, closed=closed)
1425
+
1426
+ @cache_readonly
1427
+ def index_class(self) -> type_t[IntervalIndex]:
1428
+ from pandas import IntervalIndex
1429
+
1430
+ return IntervalIndex
1431
+
1432
+
1433
+ class NumpyEADtype(ExtensionDtype):
1434
+ """
1435
+ A Pandas ExtensionDtype for NumPy dtypes.
1436
+
1437
+ This is mostly for internal compatibility, and is not especially
1438
+ useful on its own.
1439
+
1440
+ Parameters
1441
+ ----------
1442
+ dtype : object
1443
+ Object to be converted to a NumPy data type object.
1444
+
1445
+ See Also
1446
+ --------
1447
+ numpy.dtype
1448
+ """
1449
+
1450
+ _metadata = ("_dtype",)
1451
+ _supports_2d = False
1452
+ _can_fast_transpose = False
1453
+
1454
+ def __init__(self, dtype: npt.DTypeLike | NumpyEADtype | None) -> None:
1455
+ if isinstance(dtype, NumpyEADtype):
1456
+ # make constructor idempotent
1457
+ dtype = dtype.numpy_dtype
1458
+ self._dtype = np.dtype(dtype)
1459
+
1460
+ def __repr__(self) -> str:
1461
+ return f"NumpyEADtype({repr(self.name)})"
1462
+
1463
+ @property
1464
+ def numpy_dtype(self) -> np.dtype:
1465
+ """
1466
+ The NumPy dtype this NumpyEADtype wraps.
1467
+ """
1468
+ return self._dtype
1469
+
1470
+ @property
1471
+ def name(self) -> str:
1472
+ """
1473
+ A bit-width name for this data-type.
1474
+ """
1475
+ return self._dtype.name
1476
+
1477
+ @property
1478
+ def type(self) -> type[np.generic]:
1479
+ """
1480
+ The type object used to instantiate a scalar of this NumPy data-type.
1481
+ """
1482
+ return self._dtype.type
1483
+
1484
+ @property
1485
+ def _is_numeric(self) -> bool:
1486
+ # exclude object, str, unicode, void.
1487
+ return self.kind in set("biufc")
1488
+
1489
+ @property
1490
+ def _is_boolean(self) -> bool:
1491
+ return self.kind == "b"
1492
+
1493
+ @classmethod
1494
+ def construct_from_string(cls, string: str) -> NumpyEADtype:
1495
+ try:
1496
+ dtype = np.dtype(string)
1497
+ except TypeError as err:
1498
+ if not isinstance(string, str):
1499
+ msg = f"'construct_from_string' expects a string, got {type(string)}"
1500
+ else:
1501
+ msg = f"Cannot construct a 'NumpyEADtype' from '{string}'"
1502
+ raise TypeError(msg) from err
1503
+ return cls(dtype)
1504
+
1505
+ @classmethod
1506
+ def construct_array_type(cls) -> type_t[NumpyExtensionArray]:
1507
+ """
1508
+ Return the array type associated with this dtype.
1509
+
1510
+ Returns
1511
+ -------
1512
+ type
1513
+ """
1514
+ from pandas.core.arrays import NumpyExtensionArray
1515
+
1516
+ return NumpyExtensionArray
1517
+
1518
+ @property
1519
+ def kind(self) -> str:
1520
+ """
1521
+ A character code (one of 'biufcmMOSUV') identifying the general kind of data.
1522
+ """
1523
+ return self._dtype.kind
1524
+
1525
+ @property
1526
+ def itemsize(self) -> int:
1527
+ """
1528
+ The element size of this data-type object.
1529
+ """
1530
+ return self._dtype.itemsize
1531
+
1532
+
1533
+ class BaseMaskedDtype(ExtensionDtype):
1534
+ """
1535
+ Base class for dtypes for BaseMaskedArray subclasses.
1536
+ """
1537
+
1538
+ base = None
1539
+ type: type
1540
+
1541
+ @property
1542
+ def na_value(self) -> libmissing.NAType:
1543
+ return libmissing.NA
1544
+
1545
+ @cache_readonly
1546
+ def numpy_dtype(self) -> np.dtype:
1547
+ """Return an instance of our numpy dtype"""
1548
+ return np.dtype(self.type)
1549
+
1550
+ @cache_readonly
1551
+ def kind(self) -> str:
1552
+ return self.numpy_dtype.kind
1553
+
1554
+ @cache_readonly
1555
+ def itemsize(self) -> int:
1556
+ """Return the number of bytes in this dtype"""
1557
+ return self.numpy_dtype.itemsize
1558
+
1559
+ @classmethod
1560
+ def construct_array_type(cls) -> type_t[BaseMaskedArray]:
1561
+ """
1562
+ Return the array type associated with this dtype.
1563
+
1564
+ Returns
1565
+ -------
1566
+ type
1567
+ """
1568
+ raise NotImplementedError
1569
+
1570
+ @classmethod
1571
+ def from_numpy_dtype(cls, dtype: np.dtype) -> BaseMaskedDtype:
1572
+ """
1573
+ Construct the MaskedDtype corresponding to the given numpy dtype.
1574
+ """
1575
+ if dtype.kind == "b":
1576
+ from pandas.core.arrays.boolean import BooleanDtype
1577
+
1578
+ return BooleanDtype()
1579
+ elif dtype.kind in "iu":
1580
+ from pandas.core.arrays.integer import NUMPY_INT_TO_DTYPE
1581
+
1582
+ return NUMPY_INT_TO_DTYPE[dtype]
1583
+ elif dtype.kind == "f":
1584
+ from pandas.core.arrays.floating import NUMPY_FLOAT_TO_DTYPE
1585
+
1586
+ return NUMPY_FLOAT_TO_DTYPE[dtype]
1587
+ else:
1588
+ raise NotImplementedError(dtype)
1589
+
1590
+ def _get_common_dtype(self, dtypes: list[DtypeObj]) -> DtypeObj | None:
1591
+ # We unwrap any masked dtypes, find the common dtype we would use
1592
+ # for that, then re-mask the result.
1593
+ from pandas.core.dtypes.cast import find_common_type
1594
+
1595
+ new_dtype = find_common_type(
1596
+ [
1597
+ dtype.numpy_dtype if isinstance(dtype, BaseMaskedDtype) else dtype
1598
+ for dtype in dtypes
1599
+ ]
1600
+ )
1601
+ if not isinstance(new_dtype, np.dtype):
1602
+ # If we ever support e.g. Masked[DatetimeArray] then this will change
1603
+ return None
1604
+ try:
1605
+ return type(self).from_numpy_dtype(new_dtype)
1606
+ except (KeyError, NotImplementedError):
1607
+ return None
1608
+
1609
+
1610
+ @register_extension_dtype
1611
+ class SparseDtype(ExtensionDtype):
1612
+ """
1613
+ Dtype for data stored in :class:`SparseArray`.
1614
+
1615
+ This dtype implements the pandas ExtensionDtype interface.
1616
+
1617
+ Parameters
1618
+ ----------
1619
+ dtype : str, ExtensionDtype, numpy.dtype, type, default numpy.float64
1620
+ The dtype of the underlying array storing the non-fill value values.
1621
+ fill_value : scalar, optional
1622
+ The scalar value not stored in the SparseArray. By default, this
1623
+ depends on `dtype`.
1624
+
1625
+ =========== ==========
1626
+ dtype na_value
1627
+ =========== ==========
1628
+ float ``np.nan``
1629
+ int ``0``
1630
+ bool ``False``
1631
+ datetime64 ``pd.NaT``
1632
+ timedelta64 ``pd.NaT``
1633
+ =========== ==========
1634
+
1635
+ The default value may be overridden by specifying a `fill_value`.
1636
+
1637
+ Attributes
1638
+ ----------
1639
+ None
1640
+
1641
+ Methods
1642
+ -------
1643
+ None
1644
+
1645
+ Examples
1646
+ --------
1647
+ >>> ser = pd.Series([1, 0, 0], dtype=pd.SparseDtype(dtype=int, fill_value=0))
1648
+ >>> ser
1649
+ 0 1
1650
+ 1 0
1651
+ 2 0
1652
+ dtype: Sparse[int64, 0]
1653
+ >>> ser.sparse.density
1654
+ 0.3333333333333333
1655
+ """
1656
+
1657
+ _is_immutable = True
1658
+
1659
+ # We include `_is_na_fill_value` in the metadata to avoid hash collisions
1660
+ # between SparseDtype(float, 0.0) and SparseDtype(float, nan).
1661
+ # Without is_na_fill_value in the comparison, those would be equal since
1662
+ # hash(nan) is (sometimes?) 0.
1663
+ _metadata = ("_dtype", "_fill_value", "_is_na_fill_value")
1664
+
1665
+ def __init__(self, dtype: Dtype = np.float64, fill_value: Any = None) -> None:
1666
+ if isinstance(dtype, type(self)):
1667
+ if fill_value is None:
1668
+ fill_value = dtype.fill_value
1669
+ dtype = dtype.subtype
1670
+
1671
+ from pandas.core.dtypes.common import (
1672
+ is_string_dtype,
1673
+ pandas_dtype,
1674
+ )
1675
+ from pandas.core.dtypes.missing import na_value_for_dtype
1676
+
1677
+ dtype = pandas_dtype(dtype)
1678
+ if is_string_dtype(dtype):
1679
+ dtype = np.dtype("object")
1680
+ if not isinstance(dtype, np.dtype):
1681
+ # GH#53160
1682
+ raise TypeError("SparseDtype subtype must be a numpy dtype")
1683
+
1684
+ if fill_value is None:
1685
+ fill_value = na_value_for_dtype(dtype)
1686
+
1687
+ self._dtype = dtype
1688
+ self._fill_value = fill_value
1689
+ self._check_fill_value()
1690
+
1691
+ def __hash__(self) -> int:
1692
+ # Python3 doesn't inherit __hash__ when a base class overrides
1693
+ # __eq__, so we explicitly do it here.
1694
+ return super().__hash__()
1695
+
1696
+ def __eq__(self, other: object) -> bool:
1697
+ # We have to override __eq__ to handle NA values in _metadata.
1698
+ # The base class does simple == checks, which fail for NA.
1699
+ if isinstance(other, str):
1700
+ try:
1701
+ other = self.construct_from_string(other)
1702
+ except TypeError:
1703
+ return False
1704
+
1705
+ if isinstance(other, type(self)):
1706
+ subtype = self.subtype == other.subtype
1707
+ if self._is_na_fill_value:
1708
+ # this case is complicated by two things:
1709
+ # SparseDtype(float, float(nan)) == SparseDtype(float, np.nan)
1710
+ # SparseDtype(float, np.nan) != SparseDtype(float, pd.NaT)
1711
+ # i.e. we want to treat any floating-point NaN as equal, but
1712
+ # not a floating-point NaN and a datetime NaT.
1713
+ fill_value = (
1714
+ other._is_na_fill_value
1715
+ and isinstance(self.fill_value, type(other.fill_value))
1716
+ or isinstance(other.fill_value, type(self.fill_value))
1717
+ )
1718
+ else:
1719
+ with warnings.catch_warnings():
1720
+ # Ignore spurious numpy warning
1721
+ warnings.filterwarnings(
1722
+ "ignore",
1723
+ "elementwise comparison failed",
1724
+ category=DeprecationWarning,
1725
+ )
1726
+
1727
+ fill_value = self.fill_value == other.fill_value
1728
+
1729
+ return subtype and fill_value
1730
+ return False
1731
+
1732
+ @property
1733
+ def fill_value(self):
1734
+ """
1735
+ The fill value of the array.
1736
+
1737
+ Converting the SparseArray to a dense ndarray will fill the
1738
+ array with this value.
1739
+
1740
+ .. warning::
1741
+
1742
+ It's possible to end up with a SparseArray that has ``fill_value``
1743
+ values in ``sp_values``. This can occur, for example, when setting
1744
+ ``SparseArray.fill_value`` directly.
1745
+ """
1746
+ return self._fill_value
1747
+
1748
+ def _check_fill_value(self) -> None:
1749
+ if not lib.is_scalar(self._fill_value):
1750
+ raise ValueError(
1751
+ f"fill_value must be a scalar. Got {self._fill_value} instead"
1752
+ )
1753
+
1754
+ from pandas.core.dtypes.cast import can_hold_element
1755
+ from pandas.core.dtypes.missing import (
1756
+ is_valid_na_for_dtype,
1757
+ isna,
1758
+ )
1759
+
1760
+ from pandas.core.construction import ensure_wrapped_if_datetimelike
1761
+
1762
+ # GH#23124 require fill_value and subtype to match
1763
+ val = self._fill_value
1764
+ if isna(val):
1765
+ if not is_valid_na_for_dtype(val, self.subtype):
1766
+ warnings.warn(
1767
+ "Allowing arbitrary scalar fill_value in SparseDtype is "
1768
+ "deprecated. In a future version, the fill_value must be "
1769
+ "a valid value for the SparseDtype.subtype.",
1770
+ FutureWarning,
1771
+ stacklevel=find_stack_level(),
1772
+ )
1773
+ else:
1774
+ dummy = np.empty(0, dtype=self.subtype)
1775
+ dummy = ensure_wrapped_if_datetimelike(dummy)
1776
+
1777
+ if not can_hold_element(dummy, val):
1778
+ warnings.warn(
1779
+ "Allowing arbitrary scalar fill_value in SparseDtype is "
1780
+ "deprecated. In a future version, the fill_value must be "
1781
+ "a valid value for the SparseDtype.subtype.",
1782
+ FutureWarning,
1783
+ stacklevel=find_stack_level(),
1784
+ )
1785
+
1786
+ @property
1787
+ def _is_na_fill_value(self) -> bool:
1788
+ from pandas import isna
1789
+
1790
+ return isna(self.fill_value)
1791
+
1792
+ @property
1793
+ def _is_numeric(self) -> bool:
1794
+ return not self.subtype == object
1795
+
1796
+ @property
1797
+ def _is_boolean(self) -> bool:
1798
+ return self.subtype.kind == "b"
1799
+
1800
+ @property
1801
+ def kind(self) -> str:
1802
+ """
1803
+ The sparse kind. Either 'integer', or 'block'.
1804
+ """
1805
+ return self.subtype.kind
1806
+
1807
+ @property
1808
+ def type(self):
1809
+ return self.subtype.type
1810
+
1811
+ @property
1812
+ def subtype(self):
1813
+ return self._dtype
1814
+
1815
+ @property
1816
+ def name(self) -> str:
1817
+ return f"Sparse[{self.subtype.name}, {repr(self.fill_value)}]"
1818
+
1819
+ def __repr__(self) -> str:
1820
+ return self.name
1821
+
1822
+ @classmethod
1823
+ def construct_array_type(cls) -> type_t[SparseArray]:
1824
+ """
1825
+ Return the array type associated with this dtype.
1826
+
1827
+ Returns
1828
+ -------
1829
+ type
1830
+ """
1831
+ from pandas.core.arrays.sparse.array import SparseArray
1832
+
1833
+ return SparseArray
1834
+
1835
+ @classmethod
1836
+ def construct_from_string(cls, string: str) -> SparseDtype:
1837
+ """
1838
+ Construct a SparseDtype from a string form.
1839
+
1840
+ Parameters
1841
+ ----------
1842
+ string : str
1843
+ Can take the following forms.
1844
+
1845
+ string dtype
1846
+ ================ ============================
1847
+ 'int' SparseDtype[np.int64, 0]
1848
+ 'Sparse' SparseDtype[np.float64, nan]
1849
+ 'Sparse[int]' SparseDtype[np.int64, 0]
1850
+ 'Sparse[int, 0]' SparseDtype[np.int64, 0]
1851
+ ================ ============================
1852
+
1853
+ It is not possible to specify non-default fill values
1854
+ with a string. An argument like ``'Sparse[int, 1]'``
1855
+ will raise a ``TypeError`` because the default fill value
1856
+ for integers is 0.
1857
+
1858
+ Returns
1859
+ -------
1860
+ SparseDtype
1861
+ """
1862
+ if not isinstance(string, str):
1863
+ raise TypeError(
1864
+ f"'construct_from_string' expects a string, got {type(string)}"
1865
+ )
1866
+ msg = f"Cannot construct a 'SparseDtype' from '{string}'"
1867
+ if string.startswith("Sparse"):
1868
+ try:
1869
+ sub_type, has_fill_value = cls._parse_subtype(string)
1870
+ except ValueError as err:
1871
+ raise TypeError(msg) from err
1872
+ else:
1873
+ result = SparseDtype(sub_type)
1874
+ msg = (
1875
+ f"Cannot construct a 'SparseDtype' from '{string}'.\n\nIt "
1876
+ "looks like the fill_value in the string is not "
1877
+ "the default for the dtype. Non-default fill_values "
1878
+ "are not supported. Use the 'SparseDtype()' "
1879
+ "constructor instead."
1880
+ )
1881
+ if has_fill_value and str(result) != string:
1882
+ raise TypeError(msg)
1883
+ return result
1884
+ else:
1885
+ raise TypeError(msg)
1886
+
1887
+ @staticmethod
1888
+ def _parse_subtype(dtype: str) -> tuple[str, bool]:
1889
+ """
1890
+ Parse a string to get the subtype
1891
+
1892
+ Parameters
1893
+ ----------
1894
+ dtype : str
1895
+ A string like
1896
+
1897
+ * Sparse[subtype]
1898
+ * Sparse[subtype, fill_value]
1899
+
1900
+ Returns
1901
+ -------
1902
+ subtype : str
1903
+
1904
+ Raises
1905
+ ------
1906
+ ValueError
1907
+ When the subtype cannot be extracted.
1908
+ """
1909
+ xpr = re.compile(r"Sparse\[(?P<subtype>[^,]*)(, )?(?P<fill_value>.*?)?\]$")
1910
+ m = xpr.match(dtype)
1911
+ has_fill_value = False
1912
+ if m:
1913
+ subtype = m.groupdict()["subtype"]
1914
+ has_fill_value = bool(m.groupdict()["fill_value"])
1915
+ elif dtype == "Sparse":
1916
+ subtype = "float64"
1917
+ else:
1918
+ raise ValueError(f"Cannot parse {dtype}")
1919
+ return subtype, has_fill_value
1920
+
1921
+ @classmethod
1922
+ def is_dtype(cls, dtype: object) -> bool:
1923
+ dtype = getattr(dtype, "dtype", dtype)
1924
+ if isinstance(dtype, str) and dtype.startswith("Sparse"):
1925
+ sub_type, _ = cls._parse_subtype(dtype)
1926
+ dtype = np.dtype(sub_type)
1927
+ elif isinstance(dtype, cls):
1928
+ return True
1929
+ return isinstance(dtype, np.dtype) or dtype == "Sparse"
1930
+
1931
+ def update_dtype(self, dtype) -> SparseDtype:
1932
+ """
1933
+ Convert the SparseDtype to a new dtype.
1934
+
1935
+ This takes care of converting the ``fill_value``.
1936
+
1937
+ Parameters
1938
+ ----------
1939
+ dtype : Union[str, numpy.dtype, SparseDtype]
1940
+ The new dtype to use.
1941
+
1942
+ * For a SparseDtype, it is simply returned
1943
+ * For a NumPy dtype (or str), the current fill value
1944
+ is converted to the new dtype, and a SparseDtype
1945
+ with `dtype` and the new fill value is returned.
1946
+
1947
+ Returns
1948
+ -------
1949
+ SparseDtype
1950
+ A new SparseDtype with the correct `dtype` and fill value
1951
+ for that `dtype`.
1952
+
1953
+ Raises
1954
+ ------
1955
+ ValueError
1956
+ When the current fill value cannot be converted to the
1957
+ new `dtype` (e.g. trying to convert ``np.nan`` to an
1958
+ integer dtype).
1959
+
1960
+
1961
+ Examples
1962
+ --------
1963
+ >>> SparseDtype(int, 0).update_dtype(float)
1964
+ Sparse[float64, 0.0]
1965
+
1966
+ >>> SparseDtype(int, 1).update_dtype(SparseDtype(float, np.nan))
1967
+ Sparse[float64, nan]
1968
+ """
1969
+ from pandas.core.dtypes.astype import astype_array
1970
+ from pandas.core.dtypes.common import pandas_dtype
1971
+
1972
+ cls = type(self)
1973
+ dtype = pandas_dtype(dtype)
1974
+
1975
+ if not isinstance(dtype, cls):
1976
+ if not isinstance(dtype, np.dtype):
1977
+ raise TypeError("sparse arrays of extension dtypes not supported")
1978
+
1979
+ fv_asarray = np.atleast_1d(np.array(self.fill_value))
1980
+ fvarr = astype_array(fv_asarray, dtype)
1981
+ # NB: not fv_0d.item(), as that casts dt64->int
1982
+ fill_value = fvarr[0]
1983
+ dtype = cls(dtype, fill_value=fill_value)
1984
+
1985
+ return dtype
1986
+
1987
+ @property
1988
+ def _subtype_with_str(self):
1989
+ """
1990
+ Whether the SparseDtype's subtype should be considered ``str``.
1991
+
1992
+ Typically, pandas will store string data in an object-dtype array.
1993
+ When converting values to a dtype, e.g. in ``.astype``, we need to
1994
+ be more specific, we need the actual underlying type.
1995
+
1996
+ Returns
1997
+ -------
1998
+ >>> SparseDtype(int, 1)._subtype_with_str
1999
+ dtype('int64')
2000
+
2001
+ >>> SparseDtype(object, 1)._subtype_with_str
2002
+ dtype('O')
2003
+
2004
+ >>> dtype = SparseDtype(str, '')
2005
+ >>> dtype.subtype
2006
+ dtype('O')
2007
+
2008
+ >>> dtype._subtype_with_str
2009
+ <class 'str'>
2010
+ """
2011
+ if isinstance(self.fill_value, str):
2012
+ return type(self.fill_value)
2013
+ return self.subtype
2014
+
2015
+ def _get_common_dtype(self, dtypes: list[DtypeObj]) -> DtypeObj | None:
2016
+ # TODO for now only handle SparseDtypes and numpy dtypes => extend
2017
+ # with other compatible extension dtypes
2018
+ from pandas.core.dtypes.cast import np_find_common_type
2019
+
2020
+ if any(
2021
+ isinstance(x, ExtensionDtype) and not isinstance(x, SparseDtype)
2022
+ for x in dtypes
2023
+ ):
2024
+ return None
2025
+
2026
+ fill_values = [x.fill_value for x in dtypes if isinstance(x, SparseDtype)]
2027
+ fill_value = fill_values[0]
2028
+
2029
+ from pandas import isna
2030
+
2031
+ # np.nan isn't a singleton, so we may end up with multiple
2032
+ # NaNs here, so we ignore the all NA case too.
2033
+ if not (len(set(fill_values)) == 1 or isna(fill_values).all()):
2034
+ warnings.warn(
2035
+ "Concatenating sparse arrays with multiple fill "
2036
+ f"values: '{fill_values}'. Picking the first and "
2037
+ "converting the rest.",
2038
+ PerformanceWarning,
2039
+ stacklevel=find_stack_level(),
2040
+ )
2041
+
2042
+ np_dtypes = (x.subtype if isinstance(x, SparseDtype) else x for x in dtypes)
2043
+ return SparseDtype(np_find_common_type(*np_dtypes), fill_value=fill_value)
2044
+
2045
+
2046
+ @register_extension_dtype
2047
+ class ArrowDtype(StorageExtensionDtype):
2048
+ """
2049
+ An ExtensionDtype for PyArrow data types.
2050
+
2051
+ .. warning::
2052
+
2053
+ ArrowDtype is considered experimental. The implementation and
2054
+ parts of the API may change without warning.
2055
+
2056
+ While most ``dtype`` arguments can accept the "string"
2057
+ constructor, e.g. ``"int64[pyarrow]"``, ArrowDtype is useful
2058
+ if the data type contains parameters like ``pyarrow.timestamp``.
2059
+
2060
+ Parameters
2061
+ ----------
2062
+ pyarrow_dtype : pa.DataType
2063
+ An instance of a `pyarrow.DataType <https://arrow.apache.org/docs/python/api/datatypes.html#factory-functions>`__.
2064
+
2065
+ Attributes
2066
+ ----------
2067
+ pyarrow_dtype
2068
+
2069
+ Methods
2070
+ -------
2071
+ None
2072
+
2073
+ Returns
2074
+ -------
2075
+ ArrowDtype
2076
+
2077
+ Examples
2078
+ --------
2079
+ >>> import pyarrow as pa
2080
+ >>> pd.ArrowDtype(pa.int64())
2081
+ int64[pyarrow]
2082
+
2083
+ Types with parameters must be constructed with ArrowDtype.
2084
+
2085
+ >>> pd.ArrowDtype(pa.timestamp("s", tz="America/New_York"))
2086
+ timestamp[s, tz=America/New_York][pyarrow]
2087
+ >>> pd.ArrowDtype(pa.list_(pa.int64()))
2088
+ list<item: int64>[pyarrow]
2089
+ """
2090
+
2091
+ _metadata = ("storage", "pyarrow_dtype") # type: ignore[assignment]
2092
+
2093
+ def __init__(self, pyarrow_dtype: pa.DataType) -> None:
2094
+ super().__init__("pyarrow")
2095
+ if pa_version_under10p1:
2096
+ raise ImportError("pyarrow>=10.0.1 is required for ArrowDtype")
2097
+ if not isinstance(pyarrow_dtype, pa.DataType):
2098
+ raise ValueError(
2099
+ f"pyarrow_dtype ({pyarrow_dtype}) must be an instance "
2100
+ f"of a pyarrow.DataType. Got {type(pyarrow_dtype)} instead."
2101
+ )
2102
+ self.pyarrow_dtype = pyarrow_dtype
2103
+
2104
+ def __repr__(self) -> str:
2105
+ return self.name
2106
+
2107
+ def __hash__(self) -> int:
2108
+ # make myself hashable
2109
+ return hash(str(self))
2110
+
2111
+ def __eq__(self, other: object) -> bool:
2112
+ if not isinstance(other, type(self)):
2113
+ return super().__eq__(other)
2114
+ return self.pyarrow_dtype == other.pyarrow_dtype
2115
+
2116
+ @property
2117
+ def type(self):
2118
+ """
2119
+ Returns associated scalar type.
2120
+ """
2121
+ pa_type = self.pyarrow_dtype
2122
+ if pa.types.is_integer(pa_type):
2123
+ return int
2124
+ elif pa.types.is_floating(pa_type):
2125
+ return float
2126
+ elif pa.types.is_string(pa_type) or pa.types.is_large_string(pa_type):
2127
+ return str
2128
+ elif (
2129
+ pa.types.is_binary(pa_type)
2130
+ or pa.types.is_fixed_size_binary(pa_type)
2131
+ or pa.types.is_large_binary(pa_type)
2132
+ ):
2133
+ return bytes
2134
+ elif pa.types.is_boolean(pa_type):
2135
+ return bool
2136
+ elif pa.types.is_duration(pa_type):
2137
+ if pa_type.unit == "ns":
2138
+ return Timedelta
2139
+ else:
2140
+ return timedelta
2141
+ elif pa.types.is_timestamp(pa_type):
2142
+ if pa_type.unit == "ns":
2143
+ return Timestamp
2144
+ else:
2145
+ return datetime
2146
+ elif pa.types.is_date(pa_type):
2147
+ return date
2148
+ elif pa.types.is_time(pa_type):
2149
+ return time
2150
+ elif pa.types.is_decimal(pa_type):
2151
+ return Decimal
2152
+ elif pa.types.is_dictionary(pa_type):
2153
+ # TODO: Potentially change this & CategoricalDtype.type to
2154
+ # something more representative of the scalar
2155
+ return CategoricalDtypeType
2156
+ elif pa.types.is_list(pa_type) or pa.types.is_large_list(pa_type):
2157
+ return list
2158
+ elif pa.types.is_fixed_size_list(pa_type):
2159
+ return list
2160
+ elif pa.types.is_map(pa_type):
2161
+ return list
2162
+ elif pa.types.is_struct(pa_type):
2163
+ return dict
2164
+ elif pa.types.is_null(pa_type):
2165
+ # TODO: None? pd.NA? pa.null?
2166
+ return type(pa_type)
2167
+ elif isinstance(pa_type, pa.ExtensionType):
2168
+ return type(self)(pa_type.storage_type).type
2169
+ raise NotImplementedError(pa_type)
2170
+
2171
+ @property
2172
+ def name(self) -> str: # type: ignore[override]
2173
+ """
2174
+ A string identifying the data type.
2175
+ """
2176
+ return f"{str(self.pyarrow_dtype)}[{self.storage}]"
2177
+
2178
+ @cache_readonly
2179
+ def numpy_dtype(self) -> np.dtype:
2180
+ """Return an instance of the related numpy dtype"""
2181
+ if pa.types.is_timestamp(self.pyarrow_dtype):
2182
+ # pa.timestamp(unit).to_pandas_dtype() returns ns units
2183
+ # regardless of the pyarrow timestamp units.
2184
+ # This can be removed if/when pyarrow addresses it:
2185
+ # https://github.com/apache/arrow/issues/34462
2186
+ return np.dtype(f"datetime64[{self.pyarrow_dtype.unit}]")
2187
+ if pa.types.is_duration(self.pyarrow_dtype):
2188
+ # pa.duration(unit).to_pandas_dtype() returns ns units
2189
+ # regardless of the pyarrow duration units
2190
+ # This can be removed if/when pyarrow addresses it:
2191
+ # https://github.com/apache/arrow/issues/34462
2192
+ return np.dtype(f"timedelta64[{self.pyarrow_dtype.unit}]")
2193
+ if pa.types.is_string(self.pyarrow_dtype) or pa.types.is_large_string(
2194
+ self.pyarrow_dtype
2195
+ ):
2196
+ # pa.string().to_pandas_dtype() = object which we don't want
2197
+ return np.dtype(str)
2198
+ try:
2199
+ return np.dtype(self.pyarrow_dtype.to_pandas_dtype())
2200
+ except (NotImplementedError, TypeError):
2201
+ return np.dtype(object)
2202
+
2203
+ @cache_readonly
2204
+ def kind(self) -> str:
2205
+ if pa.types.is_timestamp(self.pyarrow_dtype):
2206
+ # To mirror DatetimeTZDtype
2207
+ return "M"
2208
+ return self.numpy_dtype.kind
2209
+
2210
+ @cache_readonly
2211
+ def itemsize(self) -> int:
2212
+ """Return the number of bytes in this dtype"""
2213
+ return self.numpy_dtype.itemsize
2214
+
2215
+ @classmethod
2216
+ def construct_array_type(cls) -> type_t[ArrowExtensionArray]:
2217
+ """
2218
+ Return the array type associated with this dtype.
2219
+
2220
+ Returns
2221
+ -------
2222
+ type
2223
+ """
2224
+ from pandas.core.arrays.arrow import ArrowExtensionArray
2225
+
2226
+ return ArrowExtensionArray
2227
+
2228
+ @classmethod
2229
+ def construct_from_string(cls, string: str) -> ArrowDtype:
2230
+ """
2231
+ Construct this type from a string.
2232
+
2233
+ Parameters
2234
+ ----------
2235
+ string : str
2236
+ string should follow the format f"{pyarrow_type}[pyarrow]"
2237
+ e.g. int64[pyarrow]
2238
+ """
2239
+ if not isinstance(string, str):
2240
+ raise TypeError(
2241
+ f"'construct_from_string' expects a string, got {type(string)}"
2242
+ )
2243
+ if not string.endswith("[pyarrow]"):
2244
+ raise TypeError(f"'{string}' must end with '[pyarrow]'")
2245
+ if string == "string[pyarrow]":
2246
+ # Ensure Registry.find skips ArrowDtype to use StringDtype instead
2247
+ raise TypeError("string[pyarrow] should be constructed by StringDtype")
2248
+
2249
+ base_type = string[:-9] # get rid of "[pyarrow]"
2250
+ try:
2251
+ pa_dtype = pa.type_for_alias(base_type)
2252
+ except ValueError as err:
2253
+ has_parameters = re.search(r"[\[\(].*[\]\)]", base_type)
2254
+ if has_parameters:
2255
+ # Fallback to try common temporal types
2256
+ try:
2257
+ return cls._parse_temporal_dtype_string(base_type)
2258
+ except (NotImplementedError, ValueError):
2259
+ # Fall through to raise with nice exception message below
2260
+ pass
2261
+
2262
+ raise NotImplementedError(
2263
+ "Passing pyarrow type specific parameters "
2264
+ f"({has_parameters.group()}) in the string is not supported. "
2265
+ "Please construct an ArrowDtype object with a pyarrow_dtype "
2266
+ "instance with specific parameters."
2267
+ ) from err
2268
+ raise TypeError(f"'{base_type}' is not a valid pyarrow data type.") from err
2269
+ return cls(pa_dtype)
2270
+
2271
+ # TODO(arrow#33642): This can be removed once supported by pyarrow
2272
+ @classmethod
2273
+ def _parse_temporal_dtype_string(cls, string: str) -> ArrowDtype:
2274
+ """
2275
+ Construct a temporal ArrowDtype from string.
2276
+ """
2277
+ # we assume
2278
+ # 1) "[pyarrow]" has already been stripped from the end of our string.
2279
+ # 2) we know "[" is present
2280
+ head, tail = string.split("[", 1)
2281
+
2282
+ if not tail.endswith("]"):
2283
+ raise ValueError
2284
+ tail = tail[:-1]
2285
+
2286
+ if head == "timestamp":
2287
+ assert "," in tail # otherwise type_for_alias should work
2288
+ unit, tz = tail.split(",", 1)
2289
+ unit = unit.strip()
2290
+ tz = tz.strip()
2291
+ if tz.startswith("tz="):
2292
+ tz = tz[3:]
2293
+
2294
+ pa_type = pa.timestamp(unit, tz=tz)
2295
+ dtype = cls(pa_type)
2296
+ return dtype
2297
+
2298
+ raise NotImplementedError(string)
2299
+
2300
+ @property
2301
+ def _is_numeric(self) -> bool:
2302
+ """
2303
+ Whether columns with this dtype should be considered numeric.
2304
+ """
2305
+ # TODO: pa.types.is_boolean?
2306
+ return (
2307
+ pa.types.is_integer(self.pyarrow_dtype)
2308
+ or pa.types.is_floating(self.pyarrow_dtype)
2309
+ or pa.types.is_decimal(self.pyarrow_dtype)
2310
+ )
2311
+
2312
+ @property
2313
+ def _is_boolean(self) -> bool:
2314
+ """
2315
+ Whether this dtype should be considered boolean.
2316
+ """
2317
+ return pa.types.is_boolean(self.pyarrow_dtype)
2318
+
2319
+ def _get_common_dtype(self, dtypes: list[DtypeObj]) -> DtypeObj | None:
2320
+ # We unwrap any masked dtypes, find the common dtype we would use
2321
+ # for that, then re-mask the result.
2322
+ # Mirrors BaseMaskedDtype
2323
+ from pandas.core.dtypes.cast import find_common_type
2324
+
2325
+ null_dtype = type(self)(pa.null())
2326
+
2327
+ new_dtype = find_common_type(
2328
+ [
2329
+ dtype.numpy_dtype if isinstance(dtype, ArrowDtype) else dtype
2330
+ for dtype in dtypes
2331
+ if dtype != null_dtype
2332
+ ]
2333
+ )
2334
+ if not isinstance(new_dtype, np.dtype):
2335
+ return None
2336
+ try:
2337
+ pa_dtype = pa.from_numpy_dtype(new_dtype)
2338
+ return type(self)(pa_dtype)
2339
+ except NotImplementedError:
2340
+ return None
2341
+
2342
+ def __from_arrow__(self, array: pa.Array | pa.ChunkedArray):
2343
+ """
2344
+ Construct IntegerArray/FloatingArray from pyarrow Array/ChunkedArray.
2345
+ """
2346
+ array_class = self.construct_array_type()
2347
+ arr = array.cast(self.pyarrow_dtype, safe=True)
2348
+ return array_class(arr)
env-llmeval/lib/python3.10/site-packages/pandas/core/dtypes/generic.py ADDED
@@ -0,0 +1,147 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """ define generic base classes for pandas objects """
2
+ from __future__ import annotations
3
+
4
+ from typing import (
5
+ TYPE_CHECKING,
6
+ Type,
7
+ cast,
8
+ )
9
+
10
+ if TYPE_CHECKING:
11
+ from pandas import (
12
+ Categorical,
13
+ CategoricalIndex,
14
+ DataFrame,
15
+ DatetimeIndex,
16
+ Index,
17
+ IntervalIndex,
18
+ MultiIndex,
19
+ PeriodIndex,
20
+ RangeIndex,
21
+ Series,
22
+ TimedeltaIndex,
23
+ )
24
+ from pandas.core.arrays import (
25
+ DatetimeArray,
26
+ ExtensionArray,
27
+ NumpyExtensionArray,
28
+ PeriodArray,
29
+ TimedeltaArray,
30
+ )
31
+ from pandas.core.generic import NDFrame
32
+
33
+
34
+ # define abstract base classes to enable isinstance type checking on our
35
+ # objects
36
+ def create_pandas_abc_type(name, attr, comp):
37
+ def _check(inst) -> bool:
38
+ return getattr(inst, attr, "_typ") in comp
39
+
40
+ # https://github.com/python/mypy/issues/1006
41
+ # error: 'classmethod' used with a non-method
42
+ @classmethod # type: ignore[misc]
43
+ def _instancecheck(cls, inst) -> bool:
44
+ return _check(inst) and not isinstance(inst, type)
45
+
46
+ @classmethod # type: ignore[misc]
47
+ def _subclasscheck(cls, inst) -> bool:
48
+ # Raise instead of returning False
49
+ # This is consistent with default __subclasscheck__ behavior
50
+ if not isinstance(inst, type):
51
+ raise TypeError("issubclass() arg 1 must be a class")
52
+
53
+ return _check(inst)
54
+
55
+ dct = {"__instancecheck__": _instancecheck, "__subclasscheck__": _subclasscheck}
56
+ meta = type("ABCBase", (type,), dct)
57
+ return meta(name, (), dct)
58
+
59
+
60
+ ABCRangeIndex = cast(
61
+ "Type[RangeIndex]",
62
+ create_pandas_abc_type("ABCRangeIndex", "_typ", ("rangeindex",)),
63
+ )
64
+ ABCMultiIndex = cast(
65
+ "Type[MultiIndex]",
66
+ create_pandas_abc_type("ABCMultiIndex", "_typ", ("multiindex",)),
67
+ )
68
+ ABCDatetimeIndex = cast(
69
+ "Type[DatetimeIndex]",
70
+ create_pandas_abc_type("ABCDatetimeIndex", "_typ", ("datetimeindex",)),
71
+ )
72
+ ABCTimedeltaIndex = cast(
73
+ "Type[TimedeltaIndex]",
74
+ create_pandas_abc_type("ABCTimedeltaIndex", "_typ", ("timedeltaindex",)),
75
+ )
76
+ ABCPeriodIndex = cast(
77
+ "Type[PeriodIndex]",
78
+ create_pandas_abc_type("ABCPeriodIndex", "_typ", ("periodindex",)),
79
+ )
80
+ ABCCategoricalIndex = cast(
81
+ "Type[CategoricalIndex]",
82
+ create_pandas_abc_type("ABCCategoricalIndex", "_typ", ("categoricalindex",)),
83
+ )
84
+ ABCIntervalIndex = cast(
85
+ "Type[IntervalIndex]",
86
+ create_pandas_abc_type("ABCIntervalIndex", "_typ", ("intervalindex",)),
87
+ )
88
+ ABCIndex = cast(
89
+ "Type[Index]",
90
+ create_pandas_abc_type(
91
+ "ABCIndex",
92
+ "_typ",
93
+ {
94
+ "index",
95
+ "rangeindex",
96
+ "multiindex",
97
+ "datetimeindex",
98
+ "timedeltaindex",
99
+ "periodindex",
100
+ "categoricalindex",
101
+ "intervalindex",
102
+ },
103
+ ),
104
+ )
105
+
106
+
107
+ ABCNDFrame = cast(
108
+ "Type[NDFrame]",
109
+ create_pandas_abc_type("ABCNDFrame", "_typ", ("series", "dataframe")),
110
+ )
111
+ ABCSeries = cast(
112
+ "Type[Series]",
113
+ create_pandas_abc_type("ABCSeries", "_typ", ("series",)),
114
+ )
115
+ ABCDataFrame = cast(
116
+ "Type[DataFrame]", create_pandas_abc_type("ABCDataFrame", "_typ", ("dataframe",))
117
+ )
118
+
119
+ ABCCategorical = cast(
120
+ "Type[Categorical]",
121
+ create_pandas_abc_type("ABCCategorical", "_typ", ("categorical")),
122
+ )
123
+ ABCDatetimeArray = cast(
124
+ "Type[DatetimeArray]",
125
+ create_pandas_abc_type("ABCDatetimeArray", "_typ", ("datetimearray")),
126
+ )
127
+ ABCTimedeltaArray = cast(
128
+ "Type[TimedeltaArray]",
129
+ create_pandas_abc_type("ABCTimedeltaArray", "_typ", ("timedeltaarray")),
130
+ )
131
+ ABCPeriodArray = cast(
132
+ "Type[PeriodArray]",
133
+ create_pandas_abc_type("ABCPeriodArray", "_typ", ("periodarray",)),
134
+ )
135
+ ABCExtensionArray = cast(
136
+ "Type[ExtensionArray]",
137
+ create_pandas_abc_type(
138
+ "ABCExtensionArray",
139
+ "_typ",
140
+ # Note: IntervalArray and SparseArray are included bc they have _typ="extension"
141
+ {"extension", "categorical", "periodarray", "datetimearray", "timedeltaarray"},
142
+ ),
143
+ )
144
+ ABCNumpyExtensionArray = cast(
145
+ "Type[NumpyExtensionArray]",
146
+ create_pandas_abc_type("ABCNumpyExtensionArray", "_typ", ("npy_extension",)),
147
+ )
env-llmeval/lib/python3.10/site-packages/pandas/core/dtypes/inference.py ADDED
@@ -0,0 +1,437 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """ basic inference routines """
2
+
3
+ from __future__ import annotations
4
+
5
+ from collections import abc
6
+ from numbers import Number
7
+ import re
8
+ from re import Pattern
9
+ from typing import TYPE_CHECKING
10
+
11
+ import numpy as np
12
+
13
+ from pandas._libs import lib
14
+
15
+ if TYPE_CHECKING:
16
+ from collections.abc import Hashable
17
+
18
+ from pandas._typing import TypeGuard
19
+
20
+ is_bool = lib.is_bool
21
+
22
+ is_integer = lib.is_integer
23
+
24
+ is_float = lib.is_float
25
+
26
+ is_complex = lib.is_complex
27
+
28
+ is_scalar = lib.is_scalar
29
+
30
+ is_decimal = lib.is_decimal
31
+
32
+ is_interval = lib.is_interval
33
+
34
+ is_list_like = lib.is_list_like
35
+
36
+ is_iterator = lib.is_iterator
37
+
38
+
39
+ def is_number(obj) -> TypeGuard[Number | np.number]:
40
+ """
41
+ Check if the object is a number.
42
+
43
+ Returns True when the object is a number, and False if is not.
44
+
45
+ Parameters
46
+ ----------
47
+ obj : any type
48
+ The object to check if is a number.
49
+
50
+ Returns
51
+ -------
52
+ bool
53
+ Whether `obj` is a number or not.
54
+
55
+ See Also
56
+ --------
57
+ api.types.is_integer: Checks a subgroup of numbers.
58
+
59
+ Examples
60
+ --------
61
+ >>> from pandas.api.types import is_number
62
+ >>> is_number(1)
63
+ True
64
+ >>> is_number(7.15)
65
+ True
66
+
67
+ Booleans are valid because they are int subclass.
68
+
69
+ >>> is_number(False)
70
+ True
71
+
72
+ >>> is_number("foo")
73
+ False
74
+ >>> is_number("5")
75
+ False
76
+ """
77
+ return isinstance(obj, (Number, np.number))
78
+
79
+
80
+ def iterable_not_string(obj) -> bool:
81
+ """
82
+ Check if the object is an iterable but not a string.
83
+
84
+ Parameters
85
+ ----------
86
+ obj : The object to check.
87
+
88
+ Returns
89
+ -------
90
+ is_iter_not_string : bool
91
+ Whether `obj` is a non-string iterable.
92
+
93
+ Examples
94
+ --------
95
+ >>> iterable_not_string([1, 2, 3])
96
+ True
97
+ >>> iterable_not_string("foo")
98
+ False
99
+ >>> iterable_not_string(1)
100
+ False
101
+ """
102
+ return isinstance(obj, abc.Iterable) and not isinstance(obj, str)
103
+
104
+
105
+ def is_file_like(obj) -> bool:
106
+ """
107
+ Check if the object is a file-like object.
108
+
109
+ For objects to be considered file-like, they must
110
+ be an iterator AND have either a `read` and/or `write`
111
+ method as an attribute.
112
+
113
+ Note: file-like objects must be iterable, but
114
+ iterable objects need not be file-like.
115
+
116
+ Parameters
117
+ ----------
118
+ obj : The object to check
119
+
120
+ Returns
121
+ -------
122
+ bool
123
+ Whether `obj` has file-like properties.
124
+
125
+ Examples
126
+ --------
127
+ >>> import io
128
+ >>> from pandas.api.types import is_file_like
129
+ >>> buffer = io.StringIO("data")
130
+ >>> is_file_like(buffer)
131
+ True
132
+ >>> is_file_like([1, 2, 3])
133
+ False
134
+ """
135
+ if not (hasattr(obj, "read") or hasattr(obj, "write")):
136
+ return False
137
+
138
+ return bool(hasattr(obj, "__iter__"))
139
+
140
+
141
+ def is_re(obj) -> TypeGuard[Pattern]:
142
+ """
143
+ Check if the object is a regex pattern instance.
144
+
145
+ Parameters
146
+ ----------
147
+ obj : The object to check
148
+
149
+ Returns
150
+ -------
151
+ bool
152
+ Whether `obj` is a regex pattern.
153
+
154
+ Examples
155
+ --------
156
+ >>> from pandas.api.types import is_re
157
+ >>> import re
158
+ >>> is_re(re.compile(".*"))
159
+ True
160
+ >>> is_re("foo")
161
+ False
162
+ """
163
+ return isinstance(obj, Pattern)
164
+
165
+
166
+ def is_re_compilable(obj) -> bool:
167
+ """
168
+ Check if the object can be compiled into a regex pattern instance.
169
+
170
+ Parameters
171
+ ----------
172
+ obj : The object to check
173
+
174
+ Returns
175
+ -------
176
+ bool
177
+ Whether `obj` can be compiled as a regex pattern.
178
+
179
+ Examples
180
+ --------
181
+ >>> from pandas.api.types import is_re_compilable
182
+ >>> is_re_compilable(".*")
183
+ True
184
+ >>> is_re_compilable(1)
185
+ False
186
+ """
187
+ try:
188
+ re.compile(obj)
189
+ except TypeError:
190
+ return False
191
+ else:
192
+ return True
193
+
194
+
195
+ def is_array_like(obj) -> bool:
196
+ """
197
+ Check if the object is array-like.
198
+
199
+ For an object to be considered array-like, it must be list-like and
200
+ have a `dtype` attribute.
201
+
202
+ Parameters
203
+ ----------
204
+ obj : The object to check
205
+
206
+ Returns
207
+ -------
208
+ is_array_like : bool
209
+ Whether `obj` has array-like properties.
210
+
211
+ Examples
212
+ --------
213
+ >>> is_array_like(np.array([1, 2, 3]))
214
+ True
215
+ >>> is_array_like(pd.Series(["a", "b"]))
216
+ True
217
+ >>> is_array_like(pd.Index(["2016-01-01"]))
218
+ True
219
+ >>> is_array_like([1, 2, 3])
220
+ False
221
+ >>> is_array_like(("a", "b"))
222
+ False
223
+ """
224
+ return is_list_like(obj) and hasattr(obj, "dtype")
225
+
226
+
227
+ def is_nested_list_like(obj) -> bool:
228
+ """
229
+ Check if the object is list-like, and that all of its elements
230
+ are also list-like.
231
+
232
+ Parameters
233
+ ----------
234
+ obj : The object to check
235
+
236
+ Returns
237
+ -------
238
+ is_list_like : bool
239
+ Whether `obj` has list-like properties.
240
+
241
+ Examples
242
+ --------
243
+ >>> is_nested_list_like([[1, 2, 3]])
244
+ True
245
+ >>> is_nested_list_like([{1, 2, 3}, {1, 2, 3}])
246
+ True
247
+ >>> is_nested_list_like(["foo"])
248
+ False
249
+ >>> is_nested_list_like([])
250
+ False
251
+ >>> is_nested_list_like([[1, 2, 3], 1])
252
+ False
253
+
254
+ Notes
255
+ -----
256
+ This won't reliably detect whether a consumable iterator (e. g.
257
+ a generator) is a nested-list-like without consuming the iterator.
258
+ To avoid consuming it, we always return False if the outer container
259
+ doesn't define `__len__`.
260
+
261
+ See Also
262
+ --------
263
+ is_list_like
264
+ """
265
+ return (
266
+ is_list_like(obj)
267
+ and hasattr(obj, "__len__")
268
+ and len(obj) > 0
269
+ and all(is_list_like(item) for item in obj)
270
+ )
271
+
272
+
273
+ def is_dict_like(obj) -> bool:
274
+ """
275
+ Check if the object is dict-like.
276
+
277
+ Parameters
278
+ ----------
279
+ obj : The object to check
280
+
281
+ Returns
282
+ -------
283
+ bool
284
+ Whether `obj` has dict-like properties.
285
+
286
+ Examples
287
+ --------
288
+ >>> from pandas.api.types import is_dict_like
289
+ >>> is_dict_like({1: 2})
290
+ True
291
+ >>> is_dict_like([1, 2, 3])
292
+ False
293
+ >>> is_dict_like(dict)
294
+ False
295
+ >>> is_dict_like(dict())
296
+ True
297
+ """
298
+ dict_like_attrs = ("__getitem__", "keys", "__contains__")
299
+ return (
300
+ all(hasattr(obj, attr) for attr in dict_like_attrs)
301
+ # [GH 25196] exclude classes
302
+ and not isinstance(obj, type)
303
+ )
304
+
305
+
306
+ def is_named_tuple(obj) -> bool:
307
+ """
308
+ Check if the object is a named tuple.
309
+
310
+ Parameters
311
+ ----------
312
+ obj : The object to check
313
+
314
+ Returns
315
+ -------
316
+ bool
317
+ Whether `obj` is a named tuple.
318
+
319
+ Examples
320
+ --------
321
+ >>> from collections import namedtuple
322
+ >>> from pandas.api.types import is_named_tuple
323
+ >>> Point = namedtuple("Point", ["x", "y"])
324
+ >>> p = Point(1, 2)
325
+ >>>
326
+ >>> is_named_tuple(p)
327
+ True
328
+ >>> is_named_tuple((1, 2))
329
+ False
330
+ """
331
+ return isinstance(obj, abc.Sequence) and hasattr(obj, "_fields")
332
+
333
+
334
+ def is_hashable(obj) -> TypeGuard[Hashable]:
335
+ """
336
+ Return True if hash(obj) will succeed, False otherwise.
337
+
338
+ Some types will pass a test against collections.abc.Hashable but fail when
339
+ they are actually hashed with hash().
340
+
341
+ Distinguish between these and other types by trying the call to hash() and
342
+ seeing if they raise TypeError.
343
+
344
+ Returns
345
+ -------
346
+ bool
347
+
348
+ Examples
349
+ --------
350
+ >>> import collections
351
+ >>> from pandas.api.types import is_hashable
352
+ >>> a = ([],)
353
+ >>> isinstance(a, collections.abc.Hashable)
354
+ True
355
+ >>> is_hashable(a)
356
+ False
357
+ """
358
+ # Unfortunately, we can't use isinstance(obj, collections.abc.Hashable),
359
+ # which can be faster than calling hash. That is because numpy scalars
360
+ # fail this test.
361
+
362
+ # Reconsider this decision once this numpy bug is fixed:
363
+ # https://github.com/numpy/numpy/issues/5562
364
+
365
+ try:
366
+ hash(obj)
367
+ except TypeError:
368
+ return False
369
+ else:
370
+ return True
371
+
372
+
373
+ def is_sequence(obj) -> bool:
374
+ """
375
+ Check if the object is a sequence of objects.
376
+ String types are not included as sequences here.
377
+
378
+ Parameters
379
+ ----------
380
+ obj : The object to check
381
+
382
+ Returns
383
+ -------
384
+ is_sequence : bool
385
+ Whether `obj` is a sequence of objects.
386
+
387
+ Examples
388
+ --------
389
+ >>> l = [1, 2, 3]
390
+ >>>
391
+ >>> is_sequence(l)
392
+ True
393
+ >>> is_sequence(iter(l))
394
+ False
395
+ """
396
+ try:
397
+ iter(obj) # Can iterate over it.
398
+ len(obj) # Has a length associated with it.
399
+ return not isinstance(obj, (str, bytes))
400
+ except (TypeError, AttributeError):
401
+ return False
402
+
403
+
404
+ def is_dataclass(item) -> bool:
405
+ """
406
+ Checks if the object is a data-class instance
407
+
408
+ Parameters
409
+ ----------
410
+ item : object
411
+
412
+ Returns
413
+ --------
414
+ is_dataclass : bool
415
+ True if the item is an instance of a data-class,
416
+ will return false if you pass the data class itself
417
+
418
+ Examples
419
+ --------
420
+ >>> from dataclasses import dataclass
421
+ >>> @dataclass
422
+ ... class Point:
423
+ ... x: int
424
+ ... y: int
425
+
426
+ >>> is_dataclass(Point)
427
+ False
428
+ >>> is_dataclass(Point(0,2))
429
+ True
430
+
431
+ """
432
+ try:
433
+ import dataclasses
434
+
435
+ return dataclasses.is_dataclass(item) and not isinstance(item, type)
436
+ except ImportError:
437
+ return False
env-llmeval/lib/python3.10/site-packages/pandas/core/dtypes/missing.py ADDED
@@ -0,0 +1,810 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ missing types & inference
3
+ """
4
+ from __future__ import annotations
5
+
6
+ from decimal import Decimal
7
+ from functools import partial
8
+ from typing import (
9
+ TYPE_CHECKING,
10
+ overload,
11
+ )
12
+ import warnings
13
+
14
+ import numpy as np
15
+
16
+ from pandas._config import get_option
17
+
18
+ from pandas._libs import lib
19
+ import pandas._libs.missing as libmissing
20
+ from pandas._libs.tslibs import (
21
+ NaT,
22
+ iNaT,
23
+ )
24
+
25
+ from pandas.core.dtypes.common import (
26
+ DT64NS_DTYPE,
27
+ TD64NS_DTYPE,
28
+ ensure_object,
29
+ is_scalar,
30
+ is_string_or_object_np_dtype,
31
+ )
32
+ from pandas.core.dtypes.dtypes import (
33
+ CategoricalDtype,
34
+ DatetimeTZDtype,
35
+ ExtensionDtype,
36
+ IntervalDtype,
37
+ PeriodDtype,
38
+ )
39
+ from pandas.core.dtypes.generic import (
40
+ ABCDataFrame,
41
+ ABCExtensionArray,
42
+ ABCIndex,
43
+ ABCMultiIndex,
44
+ ABCSeries,
45
+ )
46
+ from pandas.core.dtypes.inference import is_list_like
47
+
48
+ if TYPE_CHECKING:
49
+ from re import Pattern
50
+
51
+ from pandas._typing import (
52
+ ArrayLike,
53
+ DtypeObj,
54
+ NDFrame,
55
+ NDFrameT,
56
+ Scalar,
57
+ npt,
58
+ )
59
+
60
+ from pandas import Series
61
+ from pandas.core.indexes.base import Index
62
+
63
+
64
+ isposinf_scalar = libmissing.isposinf_scalar
65
+ isneginf_scalar = libmissing.isneginf_scalar
66
+
67
+ nan_checker = np.isnan
68
+ INF_AS_NA = False
69
+ _dtype_object = np.dtype("object")
70
+ _dtype_str = np.dtype(str)
71
+
72
+
73
+ @overload
74
+ def isna(obj: Scalar | Pattern) -> bool:
75
+ ...
76
+
77
+
78
+ @overload
79
+ def isna(
80
+ obj: ArrayLike | Index | list,
81
+ ) -> npt.NDArray[np.bool_]:
82
+ ...
83
+
84
+
85
+ @overload
86
+ def isna(obj: NDFrameT) -> NDFrameT:
87
+ ...
88
+
89
+
90
+ # handle unions
91
+ @overload
92
+ def isna(obj: NDFrameT | ArrayLike | Index | list) -> NDFrameT | npt.NDArray[np.bool_]:
93
+ ...
94
+
95
+
96
+ @overload
97
+ def isna(obj: object) -> bool | npt.NDArray[np.bool_] | NDFrame:
98
+ ...
99
+
100
+
101
+ def isna(obj: object) -> bool | npt.NDArray[np.bool_] | NDFrame:
102
+ """
103
+ Detect missing values for an array-like object.
104
+
105
+ This function takes a scalar or array-like object and indicates
106
+ whether values are missing (``NaN`` in numeric arrays, ``None`` or ``NaN``
107
+ in object arrays, ``NaT`` in datetimelike).
108
+
109
+ Parameters
110
+ ----------
111
+ obj : scalar or array-like
112
+ Object to check for null or missing values.
113
+
114
+ Returns
115
+ -------
116
+ bool or array-like of bool
117
+ For scalar input, returns a scalar boolean.
118
+ For array input, returns an array of boolean indicating whether each
119
+ corresponding element is missing.
120
+
121
+ See Also
122
+ --------
123
+ notna : Boolean inverse of pandas.isna.
124
+ Series.isna : Detect missing values in a Series.
125
+ DataFrame.isna : Detect missing values in a DataFrame.
126
+ Index.isna : Detect missing values in an Index.
127
+
128
+ Examples
129
+ --------
130
+ Scalar arguments (including strings) result in a scalar boolean.
131
+
132
+ >>> pd.isna('dog')
133
+ False
134
+
135
+ >>> pd.isna(pd.NA)
136
+ True
137
+
138
+ >>> pd.isna(np.nan)
139
+ True
140
+
141
+ ndarrays result in an ndarray of booleans.
142
+
143
+ >>> array = np.array([[1, np.nan, 3], [4, 5, np.nan]])
144
+ >>> array
145
+ array([[ 1., nan, 3.],
146
+ [ 4., 5., nan]])
147
+ >>> pd.isna(array)
148
+ array([[False, True, False],
149
+ [False, False, True]])
150
+
151
+ For indexes, an ndarray of booleans is returned.
152
+
153
+ >>> index = pd.DatetimeIndex(["2017-07-05", "2017-07-06", None,
154
+ ... "2017-07-08"])
155
+ >>> index
156
+ DatetimeIndex(['2017-07-05', '2017-07-06', 'NaT', '2017-07-08'],
157
+ dtype='datetime64[ns]', freq=None)
158
+ >>> pd.isna(index)
159
+ array([False, False, True, False])
160
+
161
+ For Series and DataFrame, the same type is returned, containing booleans.
162
+
163
+ >>> df = pd.DataFrame([['ant', 'bee', 'cat'], ['dog', None, 'fly']])
164
+ >>> df
165
+ 0 1 2
166
+ 0 ant bee cat
167
+ 1 dog None fly
168
+ >>> pd.isna(df)
169
+ 0 1 2
170
+ 0 False False False
171
+ 1 False True False
172
+
173
+ >>> pd.isna(df[1])
174
+ 0 False
175
+ 1 True
176
+ Name: 1, dtype: bool
177
+ """
178
+ return _isna(obj)
179
+
180
+
181
+ isnull = isna
182
+
183
+
184
+ def _isna(obj, inf_as_na: bool = False):
185
+ """
186
+ Detect missing values, treating None, NaN or NA as null. Infinite
187
+ values will also be treated as null if inf_as_na is True.
188
+
189
+ Parameters
190
+ ----------
191
+ obj: ndarray or object value
192
+ Input array or scalar value.
193
+ inf_as_na: bool
194
+ Whether to treat infinity as null.
195
+
196
+ Returns
197
+ -------
198
+ boolean ndarray or boolean
199
+ """
200
+ if is_scalar(obj):
201
+ return libmissing.checknull(obj, inf_as_na=inf_as_na)
202
+ elif isinstance(obj, ABCMultiIndex):
203
+ raise NotImplementedError("isna is not defined for MultiIndex")
204
+ elif isinstance(obj, type):
205
+ return False
206
+ elif isinstance(obj, (np.ndarray, ABCExtensionArray)):
207
+ return _isna_array(obj, inf_as_na=inf_as_na)
208
+ elif isinstance(obj, ABCIndex):
209
+ # Try to use cached isna, which also short-circuits for integer dtypes
210
+ # and avoids materializing RangeIndex._values
211
+ if not obj._can_hold_na:
212
+ return obj.isna()
213
+ return _isna_array(obj._values, inf_as_na=inf_as_na)
214
+
215
+ elif isinstance(obj, ABCSeries):
216
+ result = _isna_array(obj._values, inf_as_na=inf_as_na)
217
+ # box
218
+ result = obj._constructor(result, index=obj.index, name=obj.name, copy=False)
219
+ return result
220
+ elif isinstance(obj, ABCDataFrame):
221
+ return obj.isna()
222
+ elif isinstance(obj, list):
223
+ return _isna_array(np.asarray(obj, dtype=object), inf_as_na=inf_as_na)
224
+ elif hasattr(obj, "__array__"):
225
+ return _isna_array(np.asarray(obj), inf_as_na=inf_as_na)
226
+ else:
227
+ return False
228
+
229
+
230
+ def _use_inf_as_na(key) -> None:
231
+ """
232
+ Option change callback for na/inf behaviour.
233
+
234
+ Choose which replacement for numpy.isnan / -numpy.isfinite is used.
235
+
236
+ Parameters
237
+ ----------
238
+ flag: bool
239
+ True means treat None, NaN, INF, -INF as null (old way),
240
+ False means None and NaN are null, but INF, -INF are not null
241
+ (new way).
242
+
243
+ Notes
244
+ -----
245
+ This approach to setting global module values is discussed and
246
+ approved here:
247
+
248
+ * https://stackoverflow.com/questions/4859217/
249
+ programmatically-creating-variables-in-python/4859312#4859312
250
+ """
251
+ inf_as_na = get_option(key)
252
+ globals()["_isna"] = partial(_isna, inf_as_na=inf_as_na)
253
+ if inf_as_na:
254
+ globals()["nan_checker"] = lambda x: ~np.isfinite(x)
255
+ globals()["INF_AS_NA"] = True
256
+ else:
257
+ globals()["nan_checker"] = np.isnan
258
+ globals()["INF_AS_NA"] = False
259
+
260
+
261
+ def _isna_array(values: ArrayLike, inf_as_na: bool = False):
262
+ """
263
+ Return an array indicating which values of the input array are NaN / NA.
264
+
265
+ Parameters
266
+ ----------
267
+ obj: ndarray or ExtensionArray
268
+ The input array whose elements are to be checked.
269
+ inf_as_na: bool
270
+ Whether or not to treat infinite values as NA.
271
+
272
+ Returns
273
+ -------
274
+ array-like
275
+ Array of boolean values denoting the NA status of each element.
276
+ """
277
+ dtype = values.dtype
278
+
279
+ if not isinstance(values, np.ndarray):
280
+ # i.e. ExtensionArray
281
+ if inf_as_na and isinstance(dtype, CategoricalDtype):
282
+ result = libmissing.isnaobj(values.to_numpy(), inf_as_na=inf_as_na)
283
+ else:
284
+ # error: Incompatible types in assignment (expression has type
285
+ # "Union[ndarray[Any, Any], ExtensionArraySupportsAnyAll]", variable has
286
+ # type "ndarray[Any, dtype[bool_]]")
287
+ result = values.isna() # type: ignore[assignment]
288
+ elif isinstance(values, np.rec.recarray):
289
+ # GH 48526
290
+ result = _isna_recarray_dtype(values, inf_as_na=inf_as_na)
291
+ elif is_string_or_object_np_dtype(values.dtype):
292
+ result = _isna_string_dtype(values, inf_as_na=inf_as_na)
293
+ elif dtype.kind in "mM":
294
+ # this is the NaT pattern
295
+ result = values.view("i8") == iNaT
296
+ else:
297
+ if inf_as_na:
298
+ result = ~np.isfinite(values)
299
+ else:
300
+ result = np.isnan(values)
301
+
302
+ return result
303
+
304
+
305
+ def _isna_string_dtype(values: np.ndarray, inf_as_na: bool) -> npt.NDArray[np.bool_]:
306
+ # Working around NumPy ticket 1542
307
+ dtype = values.dtype
308
+
309
+ if dtype.kind in ("S", "U"):
310
+ result = np.zeros(values.shape, dtype=bool)
311
+ else:
312
+ if values.ndim in {1, 2}:
313
+ result = libmissing.isnaobj(values, inf_as_na=inf_as_na)
314
+ else:
315
+ # 0-D, reached via e.g. mask_missing
316
+ result = libmissing.isnaobj(values.ravel(), inf_as_na=inf_as_na)
317
+ result = result.reshape(values.shape)
318
+
319
+ return result
320
+
321
+
322
+ def _has_record_inf_value(record_as_array: np.ndarray) -> np.bool_:
323
+ is_inf_in_record = np.zeros(len(record_as_array), dtype=bool)
324
+ for i, value in enumerate(record_as_array):
325
+ is_element_inf = False
326
+ try:
327
+ is_element_inf = np.isinf(value)
328
+ except TypeError:
329
+ is_element_inf = False
330
+ is_inf_in_record[i] = is_element_inf
331
+
332
+ return np.any(is_inf_in_record)
333
+
334
+
335
+ def _isna_recarray_dtype(
336
+ values: np.rec.recarray, inf_as_na: bool
337
+ ) -> npt.NDArray[np.bool_]:
338
+ result = np.zeros(values.shape, dtype=bool)
339
+ for i, record in enumerate(values):
340
+ record_as_array = np.array(record.tolist())
341
+ does_record_contain_nan = isna_all(record_as_array)
342
+ does_record_contain_inf = False
343
+ if inf_as_na:
344
+ does_record_contain_inf = bool(_has_record_inf_value(record_as_array))
345
+ result[i] = np.any(
346
+ np.logical_or(does_record_contain_nan, does_record_contain_inf)
347
+ )
348
+
349
+ return result
350
+
351
+
352
+ @overload
353
+ def notna(obj: Scalar) -> bool:
354
+ ...
355
+
356
+
357
+ @overload
358
+ def notna(
359
+ obj: ArrayLike | Index | list,
360
+ ) -> npt.NDArray[np.bool_]:
361
+ ...
362
+
363
+
364
+ @overload
365
+ def notna(obj: NDFrameT) -> NDFrameT:
366
+ ...
367
+
368
+
369
+ # handle unions
370
+ @overload
371
+ def notna(obj: NDFrameT | ArrayLike | Index | list) -> NDFrameT | npt.NDArray[np.bool_]:
372
+ ...
373
+
374
+
375
+ @overload
376
+ def notna(obj: object) -> bool | npt.NDArray[np.bool_] | NDFrame:
377
+ ...
378
+
379
+
380
+ def notna(obj: object) -> bool | npt.NDArray[np.bool_] | NDFrame:
381
+ """
382
+ Detect non-missing values for an array-like object.
383
+
384
+ This function takes a scalar or array-like object and indicates
385
+ whether values are valid (not missing, which is ``NaN`` in numeric
386
+ arrays, ``None`` or ``NaN`` in object arrays, ``NaT`` in datetimelike).
387
+
388
+ Parameters
389
+ ----------
390
+ obj : array-like or object value
391
+ Object to check for *not* null or *non*-missing values.
392
+
393
+ Returns
394
+ -------
395
+ bool or array-like of bool
396
+ For scalar input, returns a scalar boolean.
397
+ For array input, returns an array of boolean indicating whether each
398
+ corresponding element is valid.
399
+
400
+ See Also
401
+ --------
402
+ isna : Boolean inverse of pandas.notna.
403
+ Series.notna : Detect valid values in a Series.
404
+ DataFrame.notna : Detect valid values in a DataFrame.
405
+ Index.notna : Detect valid values in an Index.
406
+
407
+ Examples
408
+ --------
409
+ Scalar arguments (including strings) result in a scalar boolean.
410
+
411
+ >>> pd.notna('dog')
412
+ True
413
+
414
+ >>> pd.notna(pd.NA)
415
+ False
416
+
417
+ >>> pd.notna(np.nan)
418
+ False
419
+
420
+ ndarrays result in an ndarray of booleans.
421
+
422
+ >>> array = np.array([[1, np.nan, 3], [4, 5, np.nan]])
423
+ >>> array
424
+ array([[ 1., nan, 3.],
425
+ [ 4., 5., nan]])
426
+ >>> pd.notna(array)
427
+ array([[ True, False, True],
428
+ [ True, True, False]])
429
+
430
+ For indexes, an ndarray of booleans is returned.
431
+
432
+ >>> index = pd.DatetimeIndex(["2017-07-05", "2017-07-06", None,
433
+ ... "2017-07-08"])
434
+ >>> index
435
+ DatetimeIndex(['2017-07-05', '2017-07-06', 'NaT', '2017-07-08'],
436
+ dtype='datetime64[ns]', freq=None)
437
+ >>> pd.notna(index)
438
+ array([ True, True, False, True])
439
+
440
+ For Series and DataFrame, the same type is returned, containing booleans.
441
+
442
+ >>> df = pd.DataFrame([['ant', 'bee', 'cat'], ['dog', None, 'fly']])
443
+ >>> df
444
+ 0 1 2
445
+ 0 ant bee cat
446
+ 1 dog None fly
447
+ >>> pd.notna(df)
448
+ 0 1 2
449
+ 0 True True True
450
+ 1 True False True
451
+
452
+ >>> pd.notna(df[1])
453
+ 0 True
454
+ 1 False
455
+ Name: 1, dtype: bool
456
+ """
457
+ res = isna(obj)
458
+ if isinstance(res, bool):
459
+ return not res
460
+ return ~res
461
+
462
+
463
+ notnull = notna
464
+
465
+
466
+ def array_equivalent(
467
+ left,
468
+ right,
469
+ strict_nan: bool = False,
470
+ dtype_equal: bool = False,
471
+ ) -> bool:
472
+ """
473
+ True if two arrays, left and right, have equal non-NaN elements, and NaNs
474
+ in corresponding locations. False otherwise. It is assumed that left and
475
+ right are NumPy arrays of the same dtype. The behavior of this function
476
+ (particularly with respect to NaNs) is not defined if the dtypes are
477
+ different.
478
+
479
+ Parameters
480
+ ----------
481
+ left, right : ndarrays
482
+ strict_nan : bool, default False
483
+ If True, consider NaN and None to be different.
484
+ dtype_equal : bool, default False
485
+ Whether `left` and `right` are known to have the same dtype
486
+ according to `is_dtype_equal`. Some methods like `BlockManager.equals`.
487
+ require that the dtypes match. Setting this to ``True`` can improve
488
+ performance, but will give different results for arrays that are
489
+ equal but different dtypes.
490
+
491
+ Returns
492
+ -------
493
+ b : bool
494
+ Returns True if the arrays are equivalent.
495
+
496
+ Examples
497
+ --------
498
+ >>> array_equivalent(
499
+ ... np.array([1, 2, np.nan]),
500
+ ... np.array([1, 2, np.nan]))
501
+ True
502
+ >>> array_equivalent(
503
+ ... np.array([1, np.nan, 2]),
504
+ ... np.array([1, 2, np.nan]))
505
+ False
506
+ """
507
+ left, right = np.asarray(left), np.asarray(right)
508
+
509
+ # shape compat
510
+ if left.shape != right.shape:
511
+ return False
512
+
513
+ if dtype_equal:
514
+ # fastpath when we require that the dtypes match (Block.equals)
515
+ if left.dtype.kind in "fc":
516
+ return _array_equivalent_float(left, right)
517
+ elif left.dtype.kind in "mM":
518
+ return _array_equivalent_datetimelike(left, right)
519
+ elif is_string_or_object_np_dtype(left.dtype):
520
+ # TODO: fastpath for pandas' StringDtype
521
+ return _array_equivalent_object(left, right, strict_nan)
522
+ else:
523
+ return np.array_equal(left, right)
524
+
525
+ # Slow path when we allow comparing different dtypes.
526
+ # Object arrays can contain None, NaN and NaT.
527
+ # string dtypes must be come to this path for NumPy 1.7.1 compat
528
+ if left.dtype.kind in "OSU" or right.dtype.kind in "OSU":
529
+ # Note: `in "OSU"` is non-trivially faster than `in ["O", "S", "U"]`
530
+ # or `in ("O", "S", "U")`
531
+ return _array_equivalent_object(left, right, strict_nan)
532
+
533
+ # NaNs can occur in float and complex arrays.
534
+ if left.dtype.kind in "fc":
535
+ if not (left.size and right.size):
536
+ return True
537
+ return ((left == right) | (isna(left) & isna(right))).all()
538
+
539
+ elif left.dtype.kind in "mM" or right.dtype.kind in "mM":
540
+ # datetime64, timedelta64, Period
541
+ if left.dtype != right.dtype:
542
+ return False
543
+
544
+ left = left.view("i8")
545
+ right = right.view("i8")
546
+
547
+ # if we have structured dtypes, compare first
548
+ if (
549
+ left.dtype.type is np.void or right.dtype.type is np.void
550
+ ) and left.dtype != right.dtype:
551
+ return False
552
+
553
+ return np.array_equal(left, right)
554
+
555
+
556
+ def _array_equivalent_float(left: np.ndarray, right: np.ndarray) -> bool:
557
+ return bool(((left == right) | (np.isnan(left) & np.isnan(right))).all())
558
+
559
+
560
+ def _array_equivalent_datetimelike(left: np.ndarray, right: np.ndarray):
561
+ return np.array_equal(left.view("i8"), right.view("i8"))
562
+
563
+
564
+ def _array_equivalent_object(left: np.ndarray, right: np.ndarray, strict_nan: bool):
565
+ left = ensure_object(left)
566
+ right = ensure_object(right)
567
+
568
+ mask: npt.NDArray[np.bool_] | None = None
569
+ if strict_nan:
570
+ mask = isna(left) & isna(right)
571
+ if not mask.any():
572
+ mask = None
573
+
574
+ try:
575
+ if mask is None:
576
+ return lib.array_equivalent_object(left, right)
577
+ if not lib.array_equivalent_object(left[~mask], right[~mask]):
578
+ return False
579
+ left_remaining = left[mask]
580
+ right_remaining = right[mask]
581
+ except ValueError:
582
+ # can raise a ValueError if left and right cannot be
583
+ # compared (e.g. nested arrays)
584
+ left_remaining = left
585
+ right_remaining = right
586
+
587
+ for left_value, right_value in zip(left_remaining, right_remaining):
588
+ if left_value is NaT and right_value is not NaT:
589
+ return False
590
+
591
+ elif left_value is libmissing.NA and right_value is not libmissing.NA:
592
+ return False
593
+
594
+ elif isinstance(left_value, float) and np.isnan(left_value):
595
+ if not isinstance(right_value, float) or not np.isnan(right_value):
596
+ return False
597
+ else:
598
+ with warnings.catch_warnings():
599
+ # suppress numpy's "elementwise comparison failed"
600
+ warnings.simplefilter("ignore", DeprecationWarning)
601
+ try:
602
+ if np.any(np.asarray(left_value != right_value)):
603
+ return False
604
+ except TypeError as err:
605
+ if "boolean value of NA is ambiguous" in str(err):
606
+ return False
607
+ raise
608
+ except ValueError:
609
+ # numpy can raise a ValueError if left and right cannot be
610
+ # compared (e.g. nested arrays)
611
+ return False
612
+ return True
613
+
614
+
615
+ def array_equals(left: ArrayLike, right: ArrayLike) -> bool:
616
+ """
617
+ ExtensionArray-compatible implementation of array_equivalent.
618
+ """
619
+ if left.dtype != right.dtype:
620
+ return False
621
+ elif isinstance(left, ABCExtensionArray):
622
+ return left.equals(right)
623
+ else:
624
+ return array_equivalent(left, right, dtype_equal=True)
625
+
626
+
627
+ def infer_fill_value(val):
628
+ """
629
+ infer the fill value for the nan/NaT from the provided
630
+ scalar/ndarray/list-like if we are a NaT, return the correct dtyped
631
+ element to provide proper block construction
632
+ """
633
+ if not is_list_like(val):
634
+ val = [val]
635
+ val = np.asarray(val)
636
+ if val.dtype.kind in "mM":
637
+ return np.array("NaT", dtype=val.dtype)
638
+ elif val.dtype == object:
639
+ dtype = lib.infer_dtype(ensure_object(val), skipna=False)
640
+ if dtype in ["datetime", "datetime64"]:
641
+ return np.array("NaT", dtype=DT64NS_DTYPE)
642
+ elif dtype in ["timedelta", "timedelta64"]:
643
+ return np.array("NaT", dtype=TD64NS_DTYPE)
644
+ return np.array(np.nan, dtype=object)
645
+ elif val.dtype.kind == "U":
646
+ return np.array(np.nan, dtype=val.dtype)
647
+ return np.nan
648
+
649
+
650
+ def construct_1d_array_from_inferred_fill_value(
651
+ value: object, length: int
652
+ ) -> ArrayLike:
653
+ # Find our empty_value dtype by constructing an array
654
+ # from our value and doing a .take on it
655
+ from pandas.core.algorithms import take_nd
656
+ from pandas.core.construction import sanitize_array
657
+ from pandas.core.indexes.base import Index
658
+
659
+ arr = sanitize_array(value, Index(range(1)), copy=False)
660
+ taker = -1 * np.ones(length, dtype=np.intp)
661
+ return take_nd(arr, taker)
662
+
663
+
664
+ def maybe_fill(arr: np.ndarray) -> np.ndarray:
665
+ """
666
+ Fill numpy.ndarray with NaN, unless we have a integer or boolean dtype.
667
+ """
668
+ if arr.dtype.kind not in "iub":
669
+ arr.fill(np.nan)
670
+ return arr
671
+
672
+
673
+ def na_value_for_dtype(dtype: DtypeObj, compat: bool = True):
674
+ """
675
+ Return a dtype compat na value
676
+
677
+ Parameters
678
+ ----------
679
+ dtype : string / dtype
680
+ compat : bool, default True
681
+
682
+ Returns
683
+ -------
684
+ np.dtype or a pandas dtype
685
+
686
+ Examples
687
+ --------
688
+ >>> na_value_for_dtype(np.dtype('int64'))
689
+ 0
690
+ >>> na_value_for_dtype(np.dtype('int64'), compat=False)
691
+ nan
692
+ >>> na_value_for_dtype(np.dtype('float64'))
693
+ nan
694
+ >>> na_value_for_dtype(np.dtype('bool'))
695
+ False
696
+ >>> na_value_for_dtype(np.dtype('datetime64[ns]'))
697
+ numpy.datetime64('NaT')
698
+ """
699
+
700
+ if isinstance(dtype, ExtensionDtype):
701
+ return dtype.na_value
702
+ elif dtype.kind in "mM":
703
+ unit = np.datetime_data(dtype)[0]
704
+ return dtype.type("NaT", unit)
705
+ elif dtype.kind == "f":
706
+ return np.nan
707
+ elif dtype.kind in "iu":
708
+ if compat:
709
+ return 0
710
+ return np.nan
711
+ elif dtype.kind == "b":
712
+ if compat:
713
+ return False
714
+ return np.nan
715
+ return np.nan
716
+
717
+
718
+ def remove_na_arraylike(arr: Series | Index | np.ndarray):
719
+ """
720
+ Return array-like containing only true/non-NaN values, possibly empty.
721
+ """
722
+ if isinstance(arr.dtype, ExtensionDtype):
723
+ return arr[notna(arr)]
724
+ else:
725
+ return arr[notna(np.asarray(arr))]
726
+
727
+
728
+ def is_valid_na_for_dtype(obj, dtype: DtypeObj) -> bool:
729
+ """
730
+ isna check that excludes incompatible dtypes
731
+
732
+ Parameters
733
+ ----------
734
+ obj : object
735
+ dtype : np.datetime64, np.timedelta64, DatetimeTZDtype, or PeriodDtype
736
+
737
+ Returns
738
+ -------
739
+ bool
740
+ """
741
+ if not lib.is_scalar(obj) or not isna(obj):
742
+ return False
743
+ elif dtype.kind == "M":
744
+ if isinstance(dtype, np.dtype):
745
+ # i.e. not tzaware
746
+ return not isinstance(obj, (np.timedelta64, Decimal))
747
+ # we have to rule out tznaive dt64("NaT")
748
+ return not isinstance(obj, (np.timedelta64, np.datetime64, Decimal))
749
+ elif dtype.kind == "m":
750
+ return not isinstance(obj, (np.datetime64, Decimal))
751
+ elif dtype.kind in "iufc":
752
+ # Numeric
753
+ return obj is not NaT and not isinstance(obj, (np.datetime64, np.timedelta64))
754
+ elif dtype.kind == "b":
755
+ # We allow pd.NA, None, np.nan in BooleanArray (same as IntervalDtype)
756
+ return lib.is_float(obj) or obj is None or obj is libmissing.NA
757
+
758
+ elif dtype == _dtype_str:
759
+ # numpy string dtypes to avoid float np.nan
760
+ return not isinstance(obj, (np.datetime64, np.timedelta64, Decimal, float))
761
+
762
+ elif dtype == _dtype_object:
763
+ # This is needed for Categorical, but is kind of weird
764
+ return True
765
+
766
+ elif isinstance(dtype, PeriodDtype):
767
+ return not isinstance(obj, (np.datetime64, np.timedelta64, Decimal))
768
+
769
+ elif isinstance(dtype, IntervalDtype):
770
+ return lib.is_float(obj) or obj is None or obj is libmissing.NA
771
+
772
+ elif isinstance(dtype, CategoricalDtype):
773
+ return is_valid_na_for_dtype(obj, dtype.categories.dtype)
774
+
775
+ # fallback, default to allowing NaN, None, NA, NaT
776
+ return not isinstance(obj, (np.datetime64, np.timedelta64, Decimal))
777
+
778
+
779
+ def isna_all(arr: ArrayLike) -> bool:
780
+ """
781
+ Optimized equivalent to isna(arr).all()
782
+ """
783
+ total_len = len(arr)
784
+
785
+ # Usually it's enough to check but a small fraction of values to see if
786
+ # a block is NOT null, chunks should help in such cases.
787
+ # parameters 1000 and 40 were chosen arbitrarily
788
+ chunk_len = max(total_len // 40, 1000)
789
+
790
+ dtype = arr.dtype
791
+ if lib.is_np_dtype(dtype, "f"):
792
+ checker = nan_checker
793
+
794
+ elif (lib.is_np_dtype(dtype, "mM")) or isinstance(
795
+ dtype, (DatetimeTZDtype, PeriodDtype)
796
+ ):
797
+ # error: Incompatible types in assignment (expression has type
798
+ # "Callable[[Any], Any]", variable has type "ufunc")
799
+ checker = lambda x: np.asarray(x.view("i8")) == iNaT # type: ignore[assignment]
800
+
801
+ else:
802
+ # error: Incompatible types in assignment (expression has type "Callable[[Any],
803
+ # Any]", variable has type "ufunc")
804
+ checker = lambda x: _isna_array( # type: ignore[assignment]
805
+ x, inf_as_na=INF_AS_NA
806
+ )
807
+
808
+ return all(
809
+ checker(arr[i : i + chunk_len]).all() for i in range(0, total_len, chunk_len)
810
+ )