applied-ai-018 commited on
Commit
06c6650
·
verified ·
1 Parent(s): 63339f2

Add files using upload-large-folder tool

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. ckpts/llama-3b/global_step100/bf16_zero_pp_rank_208_mp_rank_03_optim_states.pt +3 -0
  2. ckpts/llama-3b/global_step100/bf16_zero_pp_rank_91_mp_rank_01_optim_states.pt +3 -0
  3. venv/lib/python3.10/site-packages/pandas/core/__init__.py +0 -0
  4. venv/lib/python3.10/site-packages/pandas/core/accessor.py +340 -0
  5. venv/lib/python3.10/site-packages/pandas/core/algorithms.py +1747 -0
  6. venv/lib/python3.10/site-packages/pandas/core/api.py +140 -0
  7. venv/lib/python3.10/site-packages/pandas/core/apply.py +2062 -0
  8. venv/lib/python3.10/site-packages/pandas/core/arraylike.py +530 -0
  9. venv/lib/python3.10/site-packages/pandas/core/arrays/__init__.py +43 -0
  10. venv/lib/python3.10/site-packages/pandas/core/arrays/_arrow_string_mixins.py +84 -0
  11. venv/lib/python3.10/site-packages/pandas/core/arrays/_mixins.py +547 -0
  12. venv/lib/python3.10/site-packages/pandas/core/arrays/_ranges.py +207 -0
  13. venv/lib/python3.10/site-packages/pandas/core/arrays/_utils.py +63 -0
  14. venv/lib/python3.10/site-packages/pandas/core/arrays/base.py +2588 -0
  15. venv/lib/python3.10/site-packages/pandas/core/arrays/categorical.py +0 -0
  16. venv/lib/python3.10/site-packages/pandas/core/arrays/datetimelike.py +2556 -0
  17. venv/lib/python3.10/site-packages/pandas/core/arrays/datetimes.py +2820 -0
  18. venv/lib/python3.10/site-packages/pandas/core/arrays/floating.py +173 -0
  19. venv/lib/python3.10/site-packages/pandas/core/arrays/interval.py +1917 -0
  20. venv/lib/python3.10/site-packages/pandas/core/arrays/numeric.py +286 -0
  21. venv/lib/python3.10/site-packages/pandas/core/arrays/period.py +1313 -0
  22. venv/lib/python3.10/site-packages/pandas/core/arrays/string_.py +657 -0
  23. venv/lib/python3.10/site-packages/pandas/core/arrays/string_arrow.py +715 -0
  24. venv/lib/python3.10/site-packages/pandas/core/arrays/timedeltas.py +1177 -0
  25. venv/lib/python3.10/site-packages/pandas/core/base.py +1391 -0
  26. venv/lib/python3.10/site-packages/pandas/core/common.py +657 -0
  27. venv/lib/python3.10/site-packages/pandas/core/config_init.py +924 -0
  28. venv/lib/python3.10/site-packages/pandas/core/construction.py +824 -0
  29. venv/lib/python3.10/site-packages/pandas/core/flags.py +117 -0
  30. venv/lib/python3.10/site-packages/pandas/core/frame.py +0 -0
  31. venv/lib/python3.10/site-packages/pandas/core/generic.py +0 -0
  32. venv/lib/python3.10/site-packages/pandas/core/indexing.py +0 -0
  33. venv/lib/python3.10/site-packages/pandas/core/interchange/__init__.py +0 -0
  34. venv/lib/python3.10/site-packages/pandas/core/interchange/__pycache__/__init__.cpython-310.pyc +0 -0
  35. venv/lib/python3.10/site-packages/pandas/core/interchange/__pycache__/buffer.cpython-310.pyc +0 -0
  36. venv/lib/python3.10/site-packages/pandas/core/interchange/__pycache__/column.cpython-310.pyc +0 -0
  37. venv/lib/python3.10/site-packages/pandas/core/interchange/__pycache__/dataframe.cpython-310.pyc +0 -0
  38. venv/lib/python3.10/site-packages/pandas/core/interchange/__pycache__/dataframe_protocol.cpython-310.pyc +0 -0
  39. venv/lib/python3.10/site-packages/pandas/core/interchange/__pycache__/from_dataframe.cpython-310.pyc +0 -0
  40. venv/lib/python3.10/site-packages/pandas/core/interchange/__pycache__/utils.cpython-310.pyc +0 -0
  41. venv/lib/python3.10/site-packages/pandas/core/interchange/buffer.py +136 -0
  42. venv/lib/python3.10/site-packages/pandas/core/interchange/column.py +461 -0
  43. venv/lib/python3.10/site-packages/pandas/core/interchange/dataframe.py +113 -0
  44. venv/lib/python3.10/site-packages/pandas/core/interchange/dataframe_protocol.py +465 -0
  45. venv/lib/python3.10/site-packages/pandas/core/interchange/from_dataframe.py +526 -0
  46. venv/lib/python3.10/site-packages/pandas/core/interchange/utils.py +178 -0
  47. venv/lib/python3.10/site-packages/pandas/core/missing.py +1158 -0
  48. venv/lib/python3.10/site-packages/pandas/core/nanops.py +1748 -0
  49. venv/lib/python3.10/site-packages/pandas/core/resample.py +2920 -0
  50. venv/lib/python3.10/site-packages/pandas/core/roperator.py +62 -0
ckpts/llama-3b/global_step100/bf16_zero_pp_rank_208_mp_rank_03_optim_states.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:d46f2968641dc316fda992581d60f8acbaca1974d4d3c257cdfb372e7b9c7d21
3
+ size 41830340
ckpts/llama-3b/global_step100/bf16_zero_pp_rank_91_mp_rank_01_optim_states.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:2b385e155afab2b6f8a8162e22db999498ec26987f12cda6fbcd488fb772d42f
3
+ size 41830202
venv/lib/python3.10/site-packages/pandas/core/__init__.py ADDED
File without changes
venv/lib/python3.10/site-packages/pandas/core/accessor.py ADDED
@@ -0,0 +1,340 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+
3
+ accessor.py contains base classes for implementing accessor properties
4
+ that can be mixed into or pinned onto other pandas classes.
5
+
6
+ """
7
+ from __future__ import annotations
8
+
9
+ from typing import (
10
+ Callable,
11
+ final,
12
+ )
13
+ import warnings
14
+
15
+ from pandas.util._decorators import doc
16
+ from pandas.util._exceptions import find_stack_level
17
+
18
+
19
+ class DirNamesMixin:
20
+ _accessors: set[str] = set()
21
+ _hidden_attrs: frozenset[str] = frozenset()
22
+
23
+ @final
24
+ def _dir_deletions(self) -> set[str]:
25
+ """
26
+ Delete unwanted __dir__ for this object.
27
+ """
28
+ return self._accessors | self._hidden_attrs
29
+
30
+ def _dir_additions(self) -> set[str]:
31
+ """
32
+ Add additional __dir__ for this object.
33
+ """
34
+ return {accessor for accessor in self._accessors if hasattr(self, accessor)}
35
+
36
+ def __dir__(self) -> list[str]:
37
+ """
38
+ Provide method name lookup and completion.
39
+
40
+ Notes
41
+ -----
42
+ Only provide 'public' methods.
43
+ """
44
+ rv = set(super().__dir__())
45
+ rv = (rv - self._dir_deletions()) | self._dir_additions()
46
+ return sorted(rv)
47
+
48
+
49
+ class PandasDelegate:
50
+ """
51
+ Abstract base class for delegating methods/properties.
52
+ """
53
+
54
+ def _delegate_property_get(self, name: str, *args, **kwargs):
55
+ raise TypeError(f"You cannot access the property {name}")
56
+
57
+ def _delegate_property_set(self, name: str, value, *args, **kwargs):
58
+ raise TypeError(f"The property {name} cannot be set")
59
+
60
+ def _delegate_method(self, name: str, *args, **kwargs):
61
+ raise TypeError(f"You cannot call method {name}")
62
+
63
+ @classmethod
64
+ def _add_delegate_accessors(
65
+ cls,
66
+ delegate,
67
+ accessors: list[str],
68
+ typ: str,
69
+ overwrite: bool = False,
70
+ accessor_mapping: Callable[[str], str] = lambda x: x,
71
+ raise_on_missing: bool = True,
72
+ ) -> None:
73
+ """
74
+ Add accessors to cls from the delegate class.
75
+
76
+ Parameters
77
+ ----------
78
+ cls
79
+ Class to add the methods/properties to.
80
+ delegate
81
+ Class to get methods/properties and doc-strings.
82
+ accessors : list of str
83
+ List of accessors to add.
84
+ typ : {'property', 'method'}
85
+ overwrite : bool, default False
86
+ Overwrite the method/property in the target class if it exists.
87
+ accessor_mapping: Callable, default lambda x: x
88
+ Callable to map the delegate's function to the cls' function.
89
+ raise_on_missing: bool, default True
90
+ Raise if an accessor does not exist on delegate.
91
+ False skips the missing accessor.
92
+ """
93
+
94
+ def _create_delegator_property(name: str):
95
+ def _getter(self):
96
+ return self._delegate_property_get(name)
97
+
98
+ def _setter(self, new_values):
99
+ return self._delegate_property_set(name, new_values)
100
+
101
+ _getter.__name__ = name
102
+ _setter.__name__ = name
103
+
104
+ return property(
105
+ fget=_getter,
106
+ fset=_setter,
107
+ doc=getattr(delegate, accessor_mapping(name)).__doc__,
108
+ )
109
+
110
+ def _create_delegator_method(name: str):
111
+ def f(self, *args, **kwargs):
112
+ return self._delegate_method(name, *args, **kwargs)
113
+
114
+ f.__name__ = name
115
+ f.__doc__ = getattr(delegate, accessor_mapping(name)).__doc__
116
+
117
+ return f
118
+
119
+ for name in accessors:
120
+ if (
121
+ not raise_on_missing
122
+ and getattr(delegate, accessor_mapping(name), None) is None
123
+ ):
124
+ continue
125
+
126
+ if typ == "property":
127
+ f = _create_delegator_property(name)
128
+ else:
129
+ f = _create_delegator_method(name)
130
+
131
+ # don't overwrite existing methods/properties
132
+ if overwrite or not hasattr(cls, name):
133
+ setattr(cls, name, f)
134
+
135
+
136
+ def delegate_names(
137
+ delegate,
138
+ accessors: list[str],
139
+ typ: str,
140
+ overwrite: bool = False,
141
+ accessor_mapping: Callable[[str], str] = lambda x: x,
142
+ raise_on_missing: bool = True,
143
+ ):
144
+ """
145
+ Add delegated names to a class using a class decorator. This provides
146
+ an alternative usage to directly calling `_add_delegate_accessors`
147
+ below a class definition.
148
+
149
+ Parameters
150
+ ----------
151
+ delegate : object
152
+ The class to get methods/properties & doc-strings.
153
+ accessors : Sequence[str]
154
+ List of accessor to add.
155
+ typ : {'property', 'method'}
156
+ overwrite : bool, default False
157
+ Overwrite the method/property in the target class if it exists.
158
+ accessor_mapping: Callable, default lambda x: x
159
+ Callable to map the delegate's function to the cls' function.
160
+ raise_on_missing: bool, default True
161
+ Raise if an accessor does not exist on delegate.
162
+ False skips the missing accessor.
163
+
164
+ Returns
165
+ -------
166
+ callable
167
+ A class decorator.
168
+
169
+ Examples
170
+ --------
171
+ @delegate_names(Categorical, ["categories", "ordered"], "property")
172
+ class CategoricalAccessor(PandasDelegate):
173
+ [...]
174
+ """
175
+
176
+ def add_delegate_accessors(cls):
177
+ cls._add_delegate_accessors(
178
+ delegate,
179
+ accessors,
180
+ typ,
181
+ overwrite=overwrite,
182
+ accessor_mapping=accessor_mapping,
183
+ raise_on_missing=raise_on_missing,
184
+ )
185
+ return cls
186
+
187
+ return add_delegate_accessors
188
+
189
+
190
+ # Ported with modifications from xarray; licence at LICENSES/XARRAY_LICENSE
191
+ # https://github.com/pydata/xarray/blob/master/xarray/core/extensions.py
192
+ # 1. We don't need to catch and re-raise AttributeErrors as RuntimeErrors
193
+ # 2. We use a UserWarning instead of a custom Warning
194
+
195
+
196
+ class CachedAccessor:
197
+ """
198
+ Custom property-like object.
199
+
200
+ A descriptor for caching accessors.
201
+
202
+ Parameters
203
+ ----------
204
+ name : str
205
+ Namespace that will be accessed under, e.g. ``df.foo``.
206
+ accessor : cls
207
+ Class with the extension methods.
208
+
209
+ Notes
210
+ -----
211
+ For accessor, The class's __init__ method assumes that one of
212
+ ``Series``, ``DataFrame`` or ``Index`` as the
213
+ single argument ``data``.
214
+ """
215
+
216
+ def __init__(self, name: str, accessor) -> None:
217
+ self._name = name
218
+ self._accessor = accessor
219
+
220
+ def __get__(self, obj, cls):
221
+ if obj is None:
222
+ # we're accessing the attribute of the class, i.e., Dataset.geo
223
+ return self._accessor
224
+ accessor_obj = self._accessor(obj)
225
+ # Replace the property with the accessor object. Inspired by:
226
+ # https://www.pydanny.com/cached-property.html
227
+ # We need to use object.__setattr__ because we overwrite __setattr__ on
228
+ # NDFrame
229
+ object.__setattr__(obj, self._name, accessor_obj)
230
+ return accessor_obj
231
+
232
+
233
+ @doc(klass="", others="")
234
+ def _register_accessor(name: str, cls):
235
+ """
236
+ Register a custom accessor on {klass} objects.
237
+
238
+ Parameters
239
+ ----------
240
+ name : str
241
+ Name under which the accessor should be registered. A warning is issued
242
+ if this name conflicts with a preexisting attribute.
243
+
244
+ Returns
245
+ -------
246
+ callable
247
+ A class decorator.
248
+
249
+ See Also
250
+ --------
251
+ register_dataframe_accessor : Register a custom accessor on DataFrame objects.
252
+ register_series_accessor : Register a custom accessor on Series objects.
253
+ register_index_accessor : Register a custom accessor on Index objects.
254
+
255
+ Notes
256
+ -----
257
+ When accessed, your accessor will be initialized with the pandas object
258
+ the user is interacting with. So the signature must be
259
+
260
+ .. code-block:: python
261
+
262
+ def __init__(self, pandas_object): # noqa: E999
263
+ ...
264
+
265
+ For consistency with pandas methods, you should raise an ``AttributeError``
266
+ if the data passed to your accessor has an incorrect dtype.
267
+
268
+ >>> pd.Series(['a', 'b']).dt
269
+ Traceback (most recent call last):
270
+ ...
271
+ AttributeError: Can only use .dt accessor with datetimelike values
272
+
273
+ Examples
274
+ --------
275
+ In your library code::
276
+
277
+ import pandas as pd
278
+
279
+ @pd.api.extensions.register_dataframe_accessor("geo")
280
+ class GeoAccessor:
281
+ def __init__(self, pandas_obj):
282
+ self._obj = pandas_obj
283
+
284
+ @property
285
+ def center(self):
286
+ # return the geographic center point of this DataFrame
287
+ lat = self._obj.latitude
288
+ lon = self._obj.longitude
289
+ return (float(lon.mean()), float(lat.mean()))
290
+
291
+ def plot(self):
292
+ # plot this array's data on a map, e.g., using Cartopy
293
+ pass
294
+
295
+ Back in an interactive IPython session:
296
+
297
+ .. code-block:: ipython
298
+
299
+ In [1]: ds = pd.DataFrame({{"longitude": np.linspace(0, 10),
300
+ ...: "latitude": np.linspace(0, 20)}})
301
+ In [2]: ds.geo.center
302
+ Out[2]: (5.0, 10.0)
303
+ In [3]: ds.geo.plot() # plots data on a map
304
+ """
305
+
306
+ def decorator(accessor):
307
+ if hasattr(cls, name):
308
+ warnings.warn(
309
+ f"registration of accessor {repr(accessor)} under name "
310
+ f"{repr(name)} for type {repr(cls)} is overriding a preexisting "
311
+ f"attribute with the same name.",
312
+ UserWarning,
313
+ stacklevel=find_stack_level(),
314
+ )
315
+ setattr(cls, name, CachedAccessor(name, accessor))
316
+ cls._accessors.add(name)
317
+ return accessor
318
+
319
+ return decorator
320
+
321
+
322
+ @doc(_register_accessor, klass="DataFrame")
323
+ def register_dataframe_accessor(name: str):
324
+ from pandas import DataFrame
325
+
326
+ return _register_accessor(name, DataFrame)
327
+
328
+
329
+ @doc(_register_accessor, klass="Series")
330
+ def register_series_accessor(name: str):
331
+ from pandas import Series
332
+
333
+ return _register_accessor(name, Series)
334
+
335
+
336
+ @doc(_register_accessor, klass="Index")
337
+ def register_index_accessor(name: str):
338
+ from pandas import Index
339
+
340
+ return _register_accessor(name, Index)
venv/lib/python3.10/site-packages/pandas/core/algorithms.py ADDED
@@ -0,0 +1,1747 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ Generic data algorithms. This module is experimental at the moment and not
3
+ intended for public consumption
4
+ """
5
+ from __future__ import annotations
6
+
7
+ import decimal
8
+ import operator
9
+ from textwrap import dedent
10
+ from typing import (
11
+ TYPE_CHECKING,
12
+ Literal,
13
+ cast,
14
+ )
15
+ import warnings
16
+
17
+ import numpy as np
18
+
19
+ from pandas._libs import (
20
+ algos,
21
+ hashtable as htable,
22
+ iNaT,
23
+ lib,
24
+ )
25
+ from pandas._typing import (
26
+ AnyArrayLike,
27
+ ArrayLike,
28
+ AxisInt,
29
+ DtypeObj,
30
+ TakeIndexer,
31
+ npt,
32
+ )
33
+ from pandas.util._decorators import doc
34
+ from pandas.util._exceptions import find_stack_level
35
+
36
+ from pandas.core.dtypes.cast import (
37
+ construct_1d_object_array_from_listlike,
38
+ np_find_common_type,
39
+ )
40
+ from pandas.core.dtypes.common import (
41
+ ensure_float64,
42
+ ensure_object,
43
+ ensure_platform_int,
44
+ is_array_like,
45
+ is_bool_dtype,
46
+ is_complex_dtype,
47
+ is_dict_like,
48
+ is_extension_array_dtype,
49
+ is_float_dtype,
50
+ is_integer,
51
+ is_integer_dtype,
52
+ is_list_like,
53
+ is_object_dtype,
54
+ is_signed_integer_dtype,
55
+ needs_i8_conversion,
56
+ )
57
+ from pandas.core.dtypes.concat import concat_compat
58
+ from pandas.core.dtypes.dtypes import (
59
+ BaseMaskedDtype,
60
+ CategoricalDtype,
61
+ ExtensionDtype,
62
+ NumpyEADtype,
63
+ )
64
+ from pandas.core.dtypes.generic import (
65
+ ABCDatetimeArray,
66
+ ABCExtensionArray,
67
+ ABCIndex,
68
+ ABCMultiIndex,
69
+ ABCSeries,
70
+ ABCTimedeltaArray,
71
+ )
72
+ from pandas.core.dtypes.missing import (
73
+ isna,
74
+ na_value_for_dtype,
75
+ )
76
+
77
+ from pandas.core.array_algos.take import take_nd
78
+ from pandas.core.construction import (
79
+ array as pd_array,
80
+ ensure_wrapped_if_datetimelike,
81
+ extract_array,
82
+ )
83
+ from pandas.core.indexers import validate_indices
84
+
85
+ if TYPE_CHECKING:
86
+ from pandas._typing import (
87
+ ListLike,
88
+ NumpySorter,
89
+ NumpyValueArrayLike,
90
+ )
91
+
92
+ from pandas import (
93
+ Categorical,
94
+ Index,
95
+ Series,
96
+ )
97
+ from pandas.core.arrays import (
98
+ BaseMaskedArray,
99
+ ExtensionArray,
100
+ )
101
+
102
+
103
+ # --------------- #
104
+ # dtype access #
105
+ # --------------- #
106
+ def _ensure_data(values: ArrayLike) -> np.ndarray:
107
+ """
108
+ routine to ensure that our data is of the correct
109
+ input dtype for lower-level routines
110
+
111
+ This will coerce:
112
+ - ints -> int64
113
+ - uint -> uint64
114
+ - bool -> uint8
115
+ - datetimelike -> i8
116
+ - datetime64tz -> i8 (in local tz)
117
+ - categorical -> codes
118
+
119
+ Parameters
120
+ ----------
121
+ values : np.ndarray or ExtensionArray
122
+
123
+ Returns
124
+ -------
125
+ np.ndarray
126
+ """
127
+
128
+ if not isinstance(values, ABCMultiIndex):
129
+ # extract_array would raise
130
+ values = extract_array(values, extract_numpy=True)
131
+
132
+ if is_object_dtype(values.dtype):
133
+ return ensure_object(np.asarray(values))
134
+
135
+ elif isinstance(values.dtype, BaseMaskedDtype):
136
+ # i.e. BooleanArray, FloatingArray, IntegerArray
137
+ values = cast("BaseMaskedArray", values)
138
+ if not values._hasna:
139
+ # No pd.NAs -> We can avoid an object-dtype cast (and copy) GH#41816
140
+ # recurse to avoid re-implementing logic for eg bool->uint8
141
+ return _ensure_data(values._data)
142
+ return np.asarray(values)
143
+
144
+ elif isinstance(values.dtype, CategoricalDtype):
145
+ # NB: cases that go through here should NOT be using _reconstruct_data
146
+ # on the back-end.
147
+ values = cast("Categorical", values)
148
+ return values.codes
149
+
150
+ elif is_bool_dtype(values.dtype):
151
+ if isinstance(values, np.ndarray):
152
+ # i.e. actually dtype == np.dtype("bool")
153
+ return np.asarray(values).view("uint8")
154
+ else:
155
+ # e.g. Sparse[bool, False] # TODO: no test cases get here
156
+ return np.asarray(values).astype("uint8", copy=False)
157
+
158
+ elif is_integer_dtype(values.dtype):
159
+ return np.asarray(values)
160
+
161
+ elif is_float_dtype(values.dtype):
162
+ # Note: checking `values.dtype == "float128"` raises on Windows and 32bit
163
+ # error: Item "ExtensionDtype" of "Union[Any, ExtensionDtype, dtype[Any]]"
164
+ # has no attribute "itemsize"
165
+ if values.dtype.itemsize in [2, 12, 16]: # type: ignore[union-attr]
166
+ # we dont (yet) have float128 hashtable support
167
+ return ensure_float64(values)
168
+ return np.asarray(values)
169
+
170
+ elif is_complex_dtype(values.dtype):
171
+ return cast(np.ndarray, values)
172
+
173
+ # datetimelike
174
+ elif needs_i8_conversion(values.dtype):
175
+ npvalues = values.view("i8")
176
+ npvalues = cast(np.ndarray, npvalues)
177
+ return npvalues
178
+
179
+ # we have failed, return object
180
+ values = np.asarray(values, dtype=object)
181
+ return ensure_object(values)
182
+
183
+
184
+ def _reconstruct_data(
185
+ values: ArrayLike, dtype: DtypeObj, original: AnyArrayLike
186
+ ) -> ArrayLike:
187
+ """
188
+ reverse of _ensure_data
189
+
190
+ Parameters
191
+ ----------
192
+ values : np.ndarray or ExtensionArray
193
+ dtype : np.dtype or ExtensionDtype
194
+ original : AnyArrayLike
195
+
196
+ Returns
197
+ -------
198
+ ExtensionArray or np.ndarray
199
+ """
200
+ if isinstance(values, ABCExtensionArray) and values.dtype == dtype:
201
+ # Catch DatetimeArray/TimedeltaArray
202
+ return values
203
+
204
+ if not isinstance(dtype, np.dtype):
205
+ # i.e. ExtensionDtype; note we have ruled out above the possibility
206
+ # that values.dtype == dtype
207
+ cls = dtype.construct_array_type()
208
+
209
+ values = cls._from_sequence(values, dtype=dtype)
210
+
211
+ else:
212
+ values = values.astype(dtype, copy=False)
213
+
214
+ return values
215
+
216
+
217
+ def _ensure_arraylike(values, func_name: str) -> ArrayLike:
218
+ """
219
+ ensure that we are arraylike if not already
220
+ """
221
+ if not isinstance(values, (ABCIndex, ABCSeries, ABCExtensionArray, np.ndarray)):
222
+ # GH#52986
223
+ if func_name != "isin-targets":
224
+ # Make an exception for the comps argument in isin.
225
+ warnings.warn(
226
+ f"{func_name} with argument that is not not a Series, Index, "
227
+ "ExtensionArray, or np.ndarray is deprecated and will raise in a "
228
+ "future version.",
229
+ FutureWarning,
230
+ stacklevel=find_stack_level(),
231
+ )
232
+
233
+ inferred = lib.infer_dtype(values, skipna=False)
234
+ if inferred in ["mixed", "string", "mixed-integer"]:
235
+ # "mixed-integer" to ensure we do not cast ["ss", 42] to str GH#22160
236
+ if isinstance(values, tuple):
237
+ values = list(values)
238
+ values = construct_1d_object_array_from_listlike(values)
239
+ else:
240
+ values = np.asarray(values)
241
+ return values
242
+
243
+
244
+ _hashtables = {
245
+ "complex128": htable.Complex128HashTable,
246
+ "complex64": htable.Complex64HashTable,
247
+ "float64": htable.Float64HashTable,
248
+ "float32": htable.Float32HashTable,
249
+ "uint64": htable.UInt64HashTable,
250
+ "uint32": htable.UInt32HashTable,
251
+ "uint16": htable.UInt16HashTable,
252
+ "uint8": htable.UInt8HashTable,
253
+ "int64": htable.Int64HashTable,
254
+ "int32": htable.Int32HashTable,
255
+ "int16": htable.Int16HashTable,
256
+ "int8": htable.Int8HashTable,
257
+ "string": htable.StringHashTable,
258
+ "object": htable.PyObjectHashTable,
259
+ }
260
+
261
+
262
+ def _get_hashtable_algo(values: np.ndarray):
263
+ """
264
+ Parameters
265
+ ----------
266
+ values : np.ndarray
267
+
268
+ Returns
269
+ -------
270
+ htable : HashTable subclass
271
+ values : ndarray
272
+ """
273
+ values = _ensure_data(values)
274
+
275
+ ndtype = _check_object_for_strings(values)
276
+ hashtable = _hashtables[ndtype]
277
+ return hashtable, values
278
+
279
+
280
+ def _check_object_for_strings(values: np.ndarray) -> str:
281
+ """
282
+ Check if we can use string hashtable instead of object hashtable.
283
+
284
+ Parameters
285
+ ----------
286
+ values : ndarray
287
+
288
+ Returns
289
+ -------
290
+ str
291
+ """
292
+ ndtype = values.dtype.name
293
+ if ndtype == "object":
294
+ # it's cheaper to use a String Hash Table than Object; we infer
295
+ # including nulls because that is the only difference between
296
+ # StringHashTable and ObjectHashtable
297
+ if lib.is_string_array(values, skipna=False):
298
+ ndtype = "string"
299
+ return ndtype
300
+
301
+
302
+ # --------------- #
303
+ # top-level algos #
304
+ # --------------- #
305
+
306
+
307
+ def unique(values):
308
+ """
309
+ Return unique values based on a hash table.
310
+
311
+ Uniques are returned in order of appearance. This does NOT sort.
312
+
313
+ Significantly faster than numpy.unique for long enough sequences.
314
+ Includes NA values.
315
+
316
+ Parameters
317
+ ----------
318
+ values : 1d array-like
319
+
320
+ Returns
321
+ -------
322
+ numpy.ndarray or ExtensionArray
323
+
324
+ The return can be:
325
+
326
+ * Index : when the input is an Index
327
+ * Categorical : when the input is a Categorical dtype
328
+ * ndarray : when the input is a Series/ndarray
329
+
330
+ Return numpy.ndarray or ExtensionArray.
331
+
332
+ See Also
333
+ --------
334
+ Index.unique : Return unique values from an Index.
335
+ Series.unique : Return unique values of Series object.
336
+
337
+ Examples
338
+ --------
339
+ >>> pd.unique(pd.Series([2, 1, 3, 3]))
340
+ array([2, 1, 3])
341
+
342
+ >>> pd.unique(pd.Series([2] + [1] * 5))
343
+ array([2, 1])
344
+
345
+ >>> pd.unique(pd.Series([pd.Timestamp("20160101"), pd.Timestamp("20160101")]))
346
+ array(['2016-01-01T00:00:00.000000000'], dtype='datetime64[ns]')
347
+
348
+ >>> pd.unique(
349
+ ... pd.Series(
350
+ ... [
351
+ ... pd.Timestamp("20160101", tz="US/Eastern"),
352
+ ... pd.Timestamp("20160101", tz="US/Eastern"),
353
+ ... ]
354
+ ... )
355
+ ... )
356
+ <DatetimeArray>
357
+ ['2016-01-01 00:00:00-05:00']
358
+ Length: 1, dtype: datetime64[ns, US/Eastern]
359
+
360
+ >>> pd.unique(
361
+ ... pd.Index(
362
+ ... [
363
+ ... pd.Timestamp("20160101", tz="US/Eastern"),
364
+ ... pd.Timestamp("20160101", tz="US/Eastern"),
365
+ ... ]
366
+ ... )
367
+ ... )
368
+ DatetimeIndex(['2016-01-01 00:00:00-05:00'],
369
+ dtype='datetime64[ns, US/Eastern]',
370
+ freq=None)
371
+
372
+ >>> pd.unique(np.array(list("baabc"), dtype="O"))
373
+ array(['b', 'a', 'c'], dtype=object)
374
+
375
+ An unordered Categorical will return categories in the
376
+ order of appearance.
377
+
378
+ >>> pd.unique(pd.Series(pd.Categorical(list("baabc"))))
379
+ ['b', 'a', 'c']
380
+ Categories (3, object): ['a', 'b', 'c']
381
+
382
+ >>> pd.unique(pd.Series(pd.Categorical(list("baabc"), categories=list("abc"))))
383
+ ['b', 'a', 'c']
384
+ Categories (3, object): ['a', 'b', 'c']
385
+
386
+ An ordered Categorical preserves the category ordering.
387
+
388
+ >>> pd.unique(
389
+ ... pd.Series(
390
+ ... pd.Categorical(list("baabc"), categories=list("abc"), ordered=True)
391
+ ... )
392
+ ... )
393
+ ['b', 'a', 'c']
394
+ Categories (3, object): ['a' < 'b' < 'c']
395
+
396
+ An array of tuples
397
+
398
+ >>> pd.unique(pd.Series([("a", "b"), ("b", "a"), ("a", "c"), ("b", "a")]).values)
399
+ array([('a', 'b'), ('b', 'a'), ('a', 'c')], dtype=object)
400
+ """
401
+ return unique_with_mask(values)
402
+
403
+
404
+ def nunique_ints(values: ArrayLike) -> int:
405
+ """
406
+ Return the number of unique values for integer array-likes.
407
+
408
+ Significantly faster than pandas.unique for long enough sequences.
409
+ No checks are done to ensure input is integral.
410
+
411
+ Parameters
412
+ ----------
413
+ values : 1d array-like
414
+
415
+ Returns
416
+ -------
417
+ int : The number of unique values in ``values``
418
+ """
419
+ if len(values) == 0:
420
+ return 0
421
+ values = _ensure_data(values)
422
+ # bincount requires intp
423
+ result = (np.bincount(values.ravel().astype("intp")) != 0).sum()
424
+ return result
425
+
426
+
427
+ def unique_with_mask(values, mask: npt.NDArray[np.bool_] | None = None):
428
+ """See algorithms.unique for docs. Takes a mask for masked arrays."""
429
+ values = _ensure_arraylike(values, func_name="unique")
430
+
431
+ if isinstance(values.dtype, ExtensionDtype):
432
+ # Dispatch to extension dtype's unique.
433
+ return values.unique()
434
+
435
+ original = values
436
+ hashtable, values = _get_hashtable_algo(values)
437
+
438
+ table = hashtable(len(values))
439
+ if mask is None:
440
+ uniques = table.unique(values)
441
+ uniques = _reconstruct_data(uniques, original.dtype, original)
442
+ return uniques
443
+
444
+ else:
445
+ uniques, mask = table.unique(values, mask=mask)
446
+ uniques = _reconstruct_data(uniques, original.dtype, original)
447
+ assert mask is not None # for mypy
448
+ return uniques, mask.astype("bool")
449
+
450
+
451
+ unique1d = unique
452
+
453
+
454
+ _MINIMUM_COMP_ARR_LEN = 1_000_000
455
+
456
+
457
+ def isin(comps: ListLike, values: ListLike) -> npt.NDArray[np.bool_]:
458
+ """
459
+ Compute the isin boolean array.
460
+
461
+ Parameters
462
+ ----------
463
+ comps : list-like
464
+ values : list-like
465
+
466
+ Returns
467
+ -------
468
+ ndarray[bool]
469
+ Same length as `comps`.
470
+ """
471
+ if not is_list_like(comps):
472
+ raise TypeError(
473
+ "only list-like objects are allowed to be passed "
474
+ f"to isin(), you passed a `{type(comps).__name__}`"
475
+ )
476
+ if not is_list_like(values):
477
+ raise TypeError(
478
+ "only list-like objects are allowed to be passed "
479
+ f"to isin(), you passed a `{type(values).__name__}`"
480
+ )
481
+
482
+ if not isinstance(values, (ABCIndex, ABCSeries, ABCExtensionArray, np.ndarray)):
483
+ orig_values = list(values)
484
+ values = _ensure_arraylike(orig_values, func_name="isin-targets")
485
+
486
+ if (
487
+ len(values) > 0
488
+ and values.dtype.kind in "iufcb"
489
+ and not is_signed_integer_dtype(comps)
490
+ ):
491
+ # GH#46485 Use object to avoid upcast to float64 later
492
+ # TODO: Share with _find_common_type_compat
493
+ values = construct_1d_object_array_from_listlike(orig_values)
494
+
495
+ elif isinstance(values, ABCMultiIndex):
496
+ # Avoid raising in extract_array
497
+ values = np.array(values)
498
+ else:
499
+ values = extract_array(values, extract_numpy=True, extract_range=True)
500
+
501
+ comps_array = _ensure_arraylike(comps, func_name="isin")
502
+ comps_array = extract_array(comps_array, extract_numpy=True)
503
+ if not isinstance(comps_array, np.ndarray):
504
+ # i.e. Extension Array
505
+ return comps_array.isin(values)
506
+
507
+ elif needs_i8_conversion(comps_array.dtype):
508
+ # Dispatch to DatetimeLikeArrayMixin.isin
509
+ return pd_array(comps_array).isin(values)
510
+ elif needs_i8_conversion(values.dtype) and not is_object_dtype(comps_array.dtype):
511
+ # e.g. comps_array are integers and values are datetime64s
512
+ return np.zeros(comps_array.shape, dtype=bool)
513
+ # TODO: not quite right ... Sparse/Categorical
514
+ elif needs_i8_conversion(values.dtype):
515
+ return isin(comps_array, values.astype(object))
516
+
517
+ elif isinstance(values.dtype, ExtensionDtype):
518
+ return isin(np.asarray(comps_array), np.asarray(values))
519
+
520
+ # GH16012
521
+ # Ensure np.isin doesn't get object types or it *may* throw an exception
522
+ # Albeit hashmap has O(1) look-up (vs. O(logn) in sorted array),
523
+ # isin is faster for small sizes
524
+ if (
525
+ len(comps_array) > _MINIMUM_COMP_ARR_LEN
526
+ and len(values) <= 26
527
+ and comps_array.dtype != object
528
+ ):
529
+ # If the values include nan we need to check for nan explicitly
530
+ # since np.nan it not equal to np.nan
531
+ if isna(values).any():
532
+
533
+ def f(c, v):
534
+ return np.logical_or(np.isin(c, v).ravel(), np.isnan(c))
535
+
536
+ else:
537
+ f = lambda a, b: np.isin(a, b).ravel()
538
+
539
+ else:
540
+ common = np_find_common_type(values.dtype, comps_array.dtype)
541
+ values = values.astype(common, copy=False)
542
+ comps_array = comps_array.astype(common, copy=False)
543
+ f = htable.ismember
544
+
545
+ return f(comps_array, values)
546
+
547
+
548
+ def factorize_array(
549
+ values: np.ndarray,
550
+ use_na_sentinel: bool = True,
551
+ size_hint: int | None = None,
552
+ na_value: object = None,
553
+ mask: npt.NDArray[np.bool_] | None = None,
554
+ ) -> tuple[npt.NDArray[np.intp], np.ndarray]:
555
+ """
556
+ Factorize a numpy array to codes and uniques.
557
+
558
+ This doesn't do any coercion of types or unboxing before factorization.
559
+
560
+ Parameters
561
+ ----------
562
+ values : ndarray
563
+ use_na_sentinel : bool, default True
564
+ If True, the sentinel -1 will be used for NaN values. If False,
565
+ NaN values will be encoded as non-negative integers and will not drop the
566
+ NaN from the uniques of the values.
567
+ size_hint : int, optional
568
+ Passed through to the hashtable's 'get_labels' method
569
+ na_value : object, optional
570
+ A value in `values` to consider missing. Note: only use this
571
+ parameter when you know that you don't have any values pandas would
572
+ consider missing in the array (NaN for float data, iNaT for
573
+ datetimes, etc.).
574
+ mask : ndarray[bool], optional
575
+ If not None, the mask is used as indicator for missing values
576
+ (True = missing, False = valid) instead of `na_value` or
577
+ condition "val != val".
578
+
579
+ Returns
580
+ -------
581
+ codes : ndarray[np.intp]
582
+ uniques : ndarray
583
+ """
584
+ original = values
585
+ if values.dtype.kind in "mM":
586
+ # _get_hashtable_algo will cast dt64/td64 to i8 via _ensure_data, so we
587
+ # need to do the same to na_value. We are assuming here that the passed
588
+ # na_value is an appropriately-typed NaT.
589
+ # e.g. test_where_datetimelike_categorical
590
+ na_value = iNaT
591
+
592
+ hash_klass, values = _get_hashtable_algo(values)
593
+
594
+ table = hash_klass(size_hint or len(values))
595
+ uniques, codes = table.factorize(
596
+ values,
597
+ na_sentinel=-1,
598
+ na_value=na_value,
599
+ mask=mask,
600
+ ignore_na=use_na_sentinel,
601
+ )
602
+
603
+ # re-cast e.g. i8->dt64/td64, uint8->bool
604
+ uniques = _reconstruct_data(uniques, original.dtype, original)
605
+
606
+ codes = ensure_platform_int(codes)
607
+ return codes, uniques
608
+
609
+
610
+ @doc(
611
+ values=dedent(
612
+ """\
613
+ values : sequence
614
+ A 1-D sequence. Sequences that aren't pandas objects are
615
+ coerced to ndarrays before factorization.
616
+ """
617
+ ),
618
+ sort=dedent(
619
+ """\
620
+ sort : bool, default False
621
+ Sort `uniques` and shuffle `codes` to maintain the
622
+ relationship.
623
+ """
624
+ ),
625
+ size_hint=dedent(
626
+ """\
627
+ size_hint : int, optional
628
+ Hint to the hashtable sizer.
629
+ """
630
+ ),
631
+ )
632
+ def factorize(
633
+ values,
634
+ sort: bool = False,
635
+ use_na_sentinel: bool = True,
636
+ size_hint: int | None = None,
637
+ ) -> tuple[np.ndarray, np.ndarray | Index]:
638
+ """
639
+ Encode the object as an enumerated type or categorical variable.
640
+
641
+ This method is useful for obtaining a numeric representation of an
642
+ array when all that matters is identifying distinct values. `factorize`
643
+ is available as both a top-level function :func:`pandas.factorize`,
644
+ and as a method :meth:`Series.factorize` and :meth:`Index.factorize`.
645
+
646
+ Parameters
647
+ ----------
648
+ {values}{sort}
649
+ use_na_sentinel : bool, default True
650
+ If True, the sentinel -1 will be used for NaN values. If False,
651
+ NaN values will be encoded as non-negative integers and will not drop the
652
+ NaN from the uniques of the values.
653
+
654
+ .. versionadded:: 1.5.0
655
+ {size_hint}\
656
+
657
+ Returns
658
+ -------
659
+ codes : ndarray
660
+ An integer ndarray that's an indexer into `uniques`.
661
+ ``uniques.take(codes)`` will have the same values as `values`.
662
+ uniques : ndarray, Index, or Categorical
663
+ The unique valid values. When `values` is Categorical, `uniques`
664
+ is a Categorical. When `values` is some other pandas object, an
665
+ `Index` is returned. Otherwise, a 1-D ndarray is returned.
666
+
667
+ .. note::
668
+
669
+ Even if there's a missing value in `values`, `uniques` will
670
+ *not* contain an entry for it.
671
+
672
+ See Also
673
+ --------
674
+ cut : Discretize continuous-valued array.
675
+ unique : Find the unique value in an array.
676
+
677
+ Notes
678
+ -----
679
+ Reference :ref:`the user guide <reshaping.factorize>` for more examples.
680
+
681
+ Examples
682
+ --------
683
+ These examples all show factorize as a top-level method like
684
+ ``pd.factorize(values)``. The results are identical for methods like
685
+ :meth:`Series.factorize`.
686
+
687
+ >>> codes, uniques = pd.factorize(np.array(['b', 'b', 'a', 'c', 'b'], dtype="O"))
688
+ >>> codes
689
+ array([0, 0, 1, 2, 0])
690
+ >>> uniques
691
+ array(['b', 'a', 'c'], dtype=object)
692
+
693
+ With ``sort=True``, the `uniques` will be sorted, and `codes` will be
694
+ shuffled so that the relationship is the maintained.
695
+
696
+ >>> codes, uniques = pd.factorize(np.array(['b', 'b', 'a', 'c', 'b'], dtype="O"),
697
+ ... sort=True)
698
+ >>> codes
699
+ array([1, 1, 0, 2, 1])
700
+ >>> uniques
701
+ array(['a', 'b', 'c'], dtype=object)
702
+
703
+ When ``use_na_sentinel=True`` (the default), missing values are indicated in
704
+ the `codes` with the sentinel value ``-1`` and missing values are not
705
+ included in `uniques`.
706
+
707
+ >>> codes, uniques = pd.factorize(np.array(['b', None, 'a', 'c', 'b'], dtype="O"))
708
+ >>> codes
709
+ array([ 0, -1, 1, 2, 0])
710
+ >>> uniques
711
+ array(['b', 'a', 'c'], dtype=object)
712
+
713
+ Thus far, we've only factorized lists (which are internally coerced to
714
+ NumPy arrays). When factorizing pandas objects, the type of `uniques`
715
+ will differ. For Categoricals, a `Categorical` is returned.
716
+
717
+ >>> cat = pd.Categorical(['a', 'a', 'c'], categories=['a', 'b', 'c'])
718
+ >>> codes, uniques = pd.factorize(cat)
719
+ >>> codes
720
+ array([0, 0, 1])
721
+ >>> uniques
722
+ ['a', 'c']
723
+ Categories (3, object): ['a', 'b', 'c']
724
+
725
+ Notice that ``'b'`` is in ``uniques.categories``, despite not being
726
+ present in ``cat.values``.
727
+
728
+ For all other pandas objects, an Index of the appropriate type is
729
+ returned.
730
+
731
+ >>> cat = pd.Series(['a', 'a', 'c'])
732
+ >>> codes, uniques = pd.factorize(cat)
733
+ >>> codes
734
+ array([0, 0, 1])
735
+ >>> uniques
736
+ Index(['a', 'c'], dtype='object')
737
+
738
+ If NaN is in the values, and we want to include NaN in the uniques of the
739
+ values, it can be achieved by setting ``use_na_sentinel=False``.
740
+
741
+ >>> values = np.array([1, 2, 1, np.nan])
742
+ >>> codes, uniques = pd.factorize(values) # default: use_na_sentinel=True
743
+ >>> codes
744
+ array([ 0, 1, 0, -1])
745
+ >>> uniques
746
+ array([1., 2.])
747
+
748
+ >>> codes, uniques = pd.factorize(values, use_na_sentinel=False)
749
+ >>> codes
750
+ array([0, 1, 0, 2])
751
+ >>> uniques
752
+ array([ 1., 2., nan])
753
+ """
754
+ # Implementation notes: This method is responsible for 3 things
755
+ # 1.) coercing data to array-like (ndarray, Index, extension array)
756
+ # 2.) factorizing codes and uniques
757
+ # 3.) Maybe boxing the uniques in an Index
758
+ #
759
+ # Step 2 is dispatched to extension types (like Categorical). They are
760
+ # responsible only for factorization. All data coercion, sorting and boxing
761
+ # should happen here.
762
+ if isinstance(values, (ABCIndex, ABCSeries)):
763
+ return values.factorize(sort=sort, use_na_sentinel=use_na_sentinel)
764
+
765
+ values = _ensure_arraylike(values, func_name="factorize")
766
+ original = values
767
+
768
+ if (
769
+ isinstance(values, (ABCDatetimeArray, ABCTimedeltaArray))
770
+ and values.freq is not None
771
+ ):
772
+ # The presence of 'freq' means we can fast-path sorting and know there
773
+ # aren't NAs
774
+ codes, uniques = values.factorize(sort=sort)
775
+ return codes, uniques
776
+
777
+ elif not isinstance(values, np.ndarray):
778
+ # i.e. ExtensionArray
779
+ codes, uniques = values.factorize(use_na_sentinel=use_na_sentinel)
780
+
781
+ else:
782
+ values = np.asarray(values) # convert DTA/TDA/MultiIndex
783
+
784
+ if not use_na_sentinel and values.dtype == object:
785
+ # factorize can now handle differentiating various types of null values.
786
+ # These can only occur when the array has object dtype.
787
+ # However, for backwards compatibility we only use the null for the
788
+ # provided dtype. This may be revisited in the future, see GH#48476.
789
+ null_mask = isna(values)
790
+ if null_mask.any():
791
+ na_value = na_value_for_dtype(values.dtype, compat=False)
792
+ # Don't modify (potentially user-provided) array
793
+ values = np.where(null_mask, na_value, values)
794
+
795
+ codes, uniques = factorize_array(
796
+ values,
797
+ use_na_sentinel=use_na_sentinel,
798
+ size_hint=size_hint,
799
+ )
800
+
801
+ if sort and len(uniques) > 0:
802
+ uniques, codes = safe_sort(
803
+ uniques,
804
+ codes,
805
+ use_na_sentinel=use_na_sentinel,
806
+ assume_unique=True,
807
+ verify=False,
808
+ )
809
+
810
+ uniques = _reconstruct_data(uniques, original.dtype, original)
811
+
812
+ return codes, uniques
813
+
814
+
815
+ def value_counts(
816
+ values,
817
+ sort: bool = True,
818
+ ascending: bool = False,
819
+ normalize: bool = False,
820
+ bins=None,
821
+ dropna: bool = True,
822
+ ) -> Series:
823
+ """
824
+ Compute a histogram of the counts of non-null values.
825
+
826
+ Parameters
827
+ ----------
828
+ values : ndarray (1-d)
829
+ sort : bool, default True
830
+ Sort by values
831
+ ascending : bool, default False
832
+ Sort in ascending order
833
+ normalize: bool, default False
834
+ If True then compute a relative histogram
835
+ bins : integer, optional
836
+ Rather than count values, group them into half-open bins,
837
+ convenience for pd.cut, only works with numeric data
838
+ dropna : bool, default True
839
+ Don't include counts of NaN
840
+
841
+ Returns
842
+ -------
843
+ Series
844
+ """
845
+ warnings.warn(
846
+ # GH#53493
847
+ "pandas.value_counts is deprecated and will be removed in a "
848
+ "future version. Use pd.Series(obj).value_counts() instead.",
849
+ FutureWarning,
850
+ stacklevel=find_stack_level(),
851
+ )
852
+ return value_counts_internal(
853
+ values,
854
+ sort=sort,
855
+ ascending=ascending,
856
+ normalize=normalize,
857
+ bins=bins,
858
+ dropna=dropna,
859
+ )
860
+
861
+
862
+ def value_counts_internal(
863
+ values,
864
+ sort: bool = True,
865
+ ascending: bool = False,
866
+ normalize: bool = False,
867
+ bins=None,
868
+ dropna: bool = True,
869
+ ) -> Series:
870
+ from pandas import (
871
+ Index,
872
+ Series,
873
+ )
874
+
875
+ index_name = getattr(values, "name", None)
876
+ name = "proportion" if normalize else "count"
877
+
878
+ if bins is not None:
879
+ from pandas.core.reshape.tile import cut
880
+
881
+ if isinstance(values, Series):
882
+ values = values._values
883
+
884
+ try:
885
+ ii = cut(values, bins, include_lowest=True)
886
+ except TypeError as err:
887
+ raise TypeError("bins argument only works with numeric data.") from err
888
+
889
+ # count, remove nulls (from the index), and but the bins
890
+ result = ii.value_counts(dropna=dropna)
891
+ result.name = name
892
+ result = result[result.index.notna()]
893
+ result.index = result.index.astype("interval")
894
+ result = result.sort_index()
895
+
896
+ # if we are dropna and we have NO values
897
+ if dropna and (result._values == 0).all():
898
+ result = result.iloc[0:0]
899
+
900
+ # normalizing is by len of all (regardless of dropna)
901
+ counts = np.array([len(ii)])
902
+
903
+ else:
904
+ if is_extension_array_dtype(values):
905
+ # handle Categorical and sparse,
906
+ result = Series(values, copy=False)._values.value_counts(dropna=dropna)
907
+ result.name = name
908
+ result.index.name = index_name
909
+ counts = result._values
910
+ if not isinstance(counts, np.ndarray):
911
+ # e.g. ArrowExtensionArray
912
+ counts = np.asarray(counts)
913
+
914
+ elif isinstance(values, ABCMultiIndex):
915
+ # GH49558
916
+ levels = list(range(values.nlevels))
917
+ result = (
918
+ Series(index=values, name=name)
919
+ .groupby(level=levels, dropna=dropna)
920
+ .size()
921
+ )
922
+ result.index.names = values.names
923
+ counts = result._values
924
+
925
+ else:
926
+ values = _ensure_arraylike(values, func_name="value_counts")
927
+ keys, counts, _ = value_counts_arraylike(values, dropna)
928
+ if keys.dtype == np.float16:
929
+ keys = keys.astype(np.float32)
930
+
931
+ # For backwards compatibility, we let Index do its normal type
932
+ # inference, _except_ for if if infers from object to bool.
933
+ idx = Index(keys)
934
+ if idx.dtype == bool and keys.dtype == object:
935
+ idx = idx.astype(object)
936
+ elif (
937
+ idx.dtype != keys.dtype # noqa: PLR1714 # # pylint: disable=R1714
938
+ and idx.dtype != "string[pyarrow_numpy]"
939
+ ):
940
+ warnings.warn(
941
+ # GH#56161
942
+ "The behavior of value_counts with object-dtype is deprecated. "
943
+ "In a future version, this will *not* perform dtype inference "
944
+ "on the resulting index. To retain the old behavior, use "
945
+ "`result.index = result.index.infer_objects()`",
946
+ FutureWarning,
947
+ stacklevel=find_stack_level(),
948
+ )
949
+ idx.name = index_name
950
+
951
+ result = Series(counts, index=idx, name=name, copy=False)
952
+
953
+ if sort:
954
+ result = result.sort_values(ascending=ascending)
955
+
956
+ if normalize:
957
+ result = result / counts.sum()
958
+
959
+ return result
960
+
961
+
962
+ # Called once from SparseArray, otherwise could be private
963
+ def value_counts_arraylike(
964
+ values: np.ndarray, dropna: bool, mask: npt.NDArray[np.bool_] | None = None
965
+ ) -> tuple[ArrayLike, npt.NDArray[np.int64], int]:
966
+ """
967
+ Parameters
968
+ ----------
969
+ values : np.ndarray
970
+ dropna : bool
971
+ mask : np.ndarray[bool] or None, default None
972
+
973
+ Returns
974
+ -------
975
+ uniques : np.ndarray
976
+ counts : np.ndarray[np.int64]
977
+ """
978
+ original = values
979
+ values = _ensure_data(values)
980
+
981
+ keys, counts, na_counter = htable.value_count(values, dropna, mask=mask)
982
+
983
+ if needs_i8_conversion(original.dtype):
984
+ # datetime, timedelta, or period
985
+
986
+ if dropna:
987
+ mask = keys != iNaT
988
+ keys, counts = keys[mask], counts[mask]
989
+
990
+ res_keys = _reconstruct_data(keys, original.dtype, original)
991
+ return res_keys, counts, na_counter
992
+
993
+
994
+ def duplicated(
995
+ values: ArrayLike,
996
+ keep: Literal["first", "last", False] = "first",
997
+ mask: npt.NDArray[np.bool_] | None = None,
998
+ ) -> npt.NDArray[np.bool_]:
999
+ """
1000
+ Return boolean ndarray denoting duplicate values.
1001
+
1002
+ Parameters
1003
+ ----------
1004
+ values : np.ndarray or ExtensionArray
1005
+ Array over which to check for duplicate values.
1006
+ keep : {'first', 'last', False}, default 'first'
1007
+ - ``first`` : Mark duplicates as ``True`` except for the first
1008
+ occurrence.
1009
+ - ``last`` : Mark duplicates as ``True`` except for the last
1010
+ occurrence.
1011
+ - False : Mark all duplicates as ``True``.
1012
+ mask : ndarray[bool], optional
1013
+ array indicating which elements to exclude from checking
1014
+
1015
+ Returns
1016
+ -------
1017
+ duplicated : ndarray[bool]
1018
+ """
1019
+ values = _ensure_data(values)
1020
+ return htable.duplicated(values, keep=keep, mask=mask)
1021
+
1022
+
1023
+ def mode(
1024
+ values: ArrayLike, dropna: bool = True, mask: npt.NDArray[np.bool_] | None = None
1025
+ ) -> ArrayLike:
1026
+ """
1027
+ Returns the mode(s) of an array.
1028
+
1029
+ Parameters
1030
+ ----------
1031
+ values : array-like
1032
+ Array over which to check for duplicate values.
1033
+ dropna : bool, default True
1034
+ Don't consider counts of NaN/NaT.
1035
+
1036
+ Returns
1037
+ -------
1038
+ np.ndarray or ExtensionArray
1039
+ """
1040
+ values = _ensure_arraylike(values, func_name="mode")
1041
+ original = values
1042
+
1043
+ if needs_i8_conversion(values.dtype):
1044
+ # Got here with ndarray; dispatch to DatetimeArray/TimedeltaArray.
1045
+ values = ensure_wrapped_if_datetimelike(values)
1046
+ values = cast("ExtensionArray", values)
1047
+ return values._mode(dropna=dropna)
1048
+
1049
+ values = _ensure_data(values)
1050
+
1051
+ npresult, res_mask = htable.mode(values, dropna=dropna, mask=mask)
1052
+ if res_mask is not None:
1053
+ return npresult, res_mask # type: ignore[return-value]
1054
+
1055
+ try:
1056
+ npresult = np.sort(npresult)
1057
+ except TypeError as err:
1058
+ warnings.warn(
1059
+ f"Unable to sort modes: {err}",
1060
+ stacklevel=find_stack_level(),
1061
+ )
1062
+
1063
+ result = _reconstruct_data(npresult, original.dtype, original)
1064
+ return result
1065
+
1066
+
1067
+ def rank(
1068
+ values: ArrayLike,
1069
+ axis: AxisInt = 0,
1070
+ method: str = "average",
1071
+ na_option: str = "keep",
1072
+ ascending: bool = True,
1073
+ pct: bool = False,
1074
+ ) -> npt.NDArray[np.float64]:
1075
+ """
1076
+ Rank the values along a given axis.
1077
+
1078
+ Parameters
1079
+ ----------
1080
+ values : np.ndarray or ExtensionArray
1081
+ Array whose values will be ranked. The number of dimensions in this
1082
+ array must not exceed 2.
1083
+ axis : int, default 0
1084
+ Axis over which to perform rankings.
1085
+ method : {'average', 'min', 'max', 'first', 'dense'}, default 'average'
1086
+ The method by which tiebreaks are broken during the ranking.
1087
+ na_option : {'keep', 'top'}, default 'keep'
1088
+ The method by which NaNs are placed in the ranking.
1089
+ - ``keep``: rank each NaN value with a NaN ranking
1090
+ - ``top``: replace each NaN with either +/- inf so that they
1091
+ there are ranked at the top
1092
+ ascending : bool, default True
1093
+ Whether or not the elements should be ranked in ascending order.
1094
+ pct : bool, default False
1095
+ Whether or not to the display the returned rankings in integer form
1096
+ (e.g. 1, 2, 3) or in percentile form (e.g. 0.333..., 0.666..., 1).
1097
+ """
1098
+ is_datetimelike = needs_i8_conversion(values.dtype)
1099
+ values = _ensure_data(values)
1100
+
1101
+ if values.ndim == 1:
1102
+ ranks = algos.rank_1d(
1103
+ values,
1104
+ is_datetimelike=is_datetimelike,
1105
+ ties_method=method,
1106
+ ascending=ascending,
1107
+ na_option=na_option,
1108
+ pct=pct,
1109
+ )
1110
+ elif values.ndim == 2:
1111
+ ranks = algos.rank_2d(
1112
+ values,
1113
+ axis=axis,
1114
+ is_datetimelike=is_datetimelike,
1115
+ ties_method=method,
1116
+ ascending=ascending,
1117
+ na_option=na_option,
1118
+ pct=pct,
1119
+ )
1120
+ else:
1121
+ raise TypeError("Array with ndim > 2 are not supported.")
1122
+
1123
+ return ranks
1124
+
1125
+
1126
+ # ---- #
1127
+ # take #
1128
+ # ---- #
1129
+
1130
+
1131
+ def take(
1132
+ arr,
1133
+ indices: TakeIndexer,
1134
+ axis: AxisInt = 0,
1135
+ allow_fill: bool = False,
1136
+ fill_value=None,
1137
+ ):
1138
+ """
1139
+ Take elements from an array.
1140
+
1141
+ Parameters
1142
+ ----------
1143
+ arr : array-like or scalar value
1144
+ Non array-likes (sequences/scalars without a dtype) are coerced
1145
+ to an ndarray.
1146
+
1147
+ .. deprecated:: 2.1.0
1148
+ Passing an argument other than a numpy.ndarray, ExtensionArray,
1149
+ Index, or Series is deprecated.
1150
+
1151
+ indices : sequence of int or one-dimensional np.ndarray of int
1152
+ Indices to be taken.
1153
+ axis : int, default 0
1154
+ The axis over which to select values.
1155
+ allow_fill : bool, default False
1156
+ How to handle negative values in `indices`.
1157
+
1158
+ * False: negative values in `indices` indicate positional indices
1159
+ from the right (the default). This is similar to :func:`numpy.take`.
1160
+
1161
+ * True: negative values in `indices` indicate
1162
+ missing values. These values are set to `fill_value`. Any other
1163
+ negative values raise a ``ValueError``.
1164
+
1165
+ fill_value : any, optional
1166
+ Fill value to use for NA-indices when `allow_fill` is True.
1167
+ This may be ``None``, in which case the default NA value for
1168
+ the type (``self.dtype.na_value``) is used.
1169
+
1170
+ For multi-dimensional `arr`, each *element* is filled with
1171
+ `fill_value`.
1172
+
1173
+ Returns
1174
+ -------
1175
+ ndarray or ExtensionArray
1176
+ Same type as the input.
1177
+
1178
+ Raises
1179
+ ------
1180
+ IndexError
1181
+ When `indices` is out of bounds for the array.
1182
+ ValueError
1183
+ When the indexer contains negative values other than ``-1``
1184
+ and `allow_fill` is True.
1185
+
1186
+ Notes
1187
+ -----
1188
+ When `allow_fill` is False, `indices` may be whatever dimensionality
1189
+ is accepted by NumPy for `arr`.
1190
+
1191
+ When `allow_fill` is True, `indices` should be 1-D.
1192
+
1193
+ See Also
1194
+ --------
1195
+ numpy.take : Take elements from an array along an axis.
1196
+
1197
+ Examples
1198
+ --------
1199
+ >>> import pandas as pd
1200
+
1201
+ With the default ``allow_fill=False``, negative numbers indicate
1202
+ positional indices from the right.
1203
+
1204
+ >>> pd.api.extensions.take(np.array([10, 20, 30]), [0, 0, -1])
1205
+ array([10, 10, 30])
1206
+
1207
+ Setting ``allow_fill=True`` will place `fill_value` in those positions.
1208
+
1209
+ >>> pd.api.extensions.take(np.array([10, 20, 30]), [0, 0, -1], allow_fill=True)
1210
+ array([10., 10., nan])
1211
+
1212
+ >>> pd.api.extensions.take(np.array([10, 20, 30]), [0, 0, -1], allow_fill=True,
1213
+ ... fill_value=-10)
1214
+ array([ 10, 10, -10])
1215
+ """
1216
+ if not isinstance(arr, (np.ndarray, ABCExtensionArray, ABCIndex, ABCSeries)):
1217
+ # GH#52981
1218
+ warnings.warn(
1219
+ "pd.api.extensions.take accepting non-standard inputs is deprecated "
1220
+ "and will raise in a future version. Pass either a numpy.ndarray, "
1221
+ "ExtensionArray, Index, or Series instead.",
1222
+ FutureWarning,
1223
+ stacklevel=find_stack_level(),
1224
+ )
1225
+
1226
+ if not is_array_like(arr):
1227
+ arr = np.asarray(arr)
1228
+
1229
+ indices = ensure_platform_int(indices)
1230
+
1231
+ if allow_fill:
1232
+ # Pandas style, -1 means NA
1233
+ validate_indices(indices, arr.shape[axis])
1234
+ result = take_nd(
1235
+ arr, indices, axis=axis, allow_fill=True, fill_value=fill_value
1236
+ )
1237
+ else:
1238
+ # NumPy style
1239
+ result = arr.take(indices, axis=axis)
1240
+ return result
1241
+
1242
+
1243
+ # ------------ #
1244
+ # searchsorted #
1245
+ # ------------ #
1246
+
1247
+
1248
+ def searchsorted(
1249
+ arr: ArrayLike,
1250
+ value: NumpyValueArrayLike | ExtensionArray,
1251
+ side: Literal["left", "right"] = "left",
1252
+ sorter: NumpySorter | None = None,
1253
+ ) -> npt.NDArray[np.intp] | np.intp:
1254
+ """
1255
+ Find indices where elements should be inserted to maintain order.
1256
+
1257
+ Find the indices into a sorted array `arr` (a) such that, if the
1258
+ corresponding elements in `value` were inserted before the indices,
1259
+ the order of `arr` would be preserved.
1260
+
1261
+ Assuming that `arr` is sorted:
1262
+
1263
+ ====== ================================
1264
+ `side` returned index `i` satisfies
1265
+ ====== ================================
1266
+ left ``arr[i-1] < value <= self[i]``
1267
+ right ``arr[i-1] <= value < self[i]``
1268
+ ====== ================================
1269
+
1270
+ Parameters
1271
+ ----------
1272
+ arr: np.ndarray, ExtensionArray, Series
1273
+ Input array. If `sorter` is None, then it must be sorted in
1274
+ ascending order, otherwise `sorter` must be an array of indices
1275
+ that sort it.
1276
+ value : array-like or scalar
1277
+ Values to insert into `arr`.
1278
+ side : {'left', 'right'}, optional
1279
+ If 'left', the index of the first suitable location found is given.
1280
+ If 'right', return the last such index. If there is no suitable
1281
+ index, return either 0 or N (where N is the length of `self`).
1282
+ sorter : 1-D array-like, optional
1283
+ Optional array of integer indices that sort array a into ascending
1284
+ order. They are typically the result of argsort.
1285
+
1286
+ Returns
1287
+ -------
1288
+ array of ints or int
1289
+ If value is array-like, array of insertion points.
1290
+ If value is scalar, a single integer.
1291
+
1292
+ See Also
1293
+ --------
1294
+ numpy.searchsorted : Similar method from NumPy.
1295
+ """
1296
+ if sorter is not None:
1297
+ sorter = ensure_platform_int(sorter)
1298
+
1299
+ if (
1300
+ isinstance(arr, np.ndarray)
1301
+ and arr.dtype.kind in "iu"
1302
+ and (is_integer(value) or is_integer_dtype(value))
1303
+ ):
1304
+ # if `arr` and `value` have different dtypes, `arr` would be
1305
+ # recast by numpy, causing a slow search.
1306
+ # Before searching below, we therefore try to give `value` the
1307
+ # same dtype as `arr`, while guarding against integer overflows.
1308
+ iinfo = np.iinfo(arr.dtype.type)
1309
+ value_arr = np.array([value]) if is_integer(value) else np.array(value)
1310
+ if (value_arr >= iinfo.min).all() and (value_arr <= iinfo.max).all():
1311
+ # value within bounds, so no overflow, so can convert value dtype
1312
+ # to dtype of arr
1313
+ dtype = arr.dtype
1314
+ else:
1315
+ dtype = value_arr.dtype
1316
+
1317
+ if is_integer(value):
1318
+ # We know that value is int
1319
+ value = cast(int, dtype.type(value))
1320
+ else:
1321
+ value = pd_array(cast(ArrayLike, value), dtype=dtype)
1322
+ else:
1323
+ # E.g. if `arr` is an array with dtype='datetime64[ns]'
1324
+ # and `value` is a pd.Timestamp, we may need to convert value
1325
+ arr = ensure_wrapped_if_datetimelike(arr)
1326
+
1327
+ # Argument 1 to "searchsorted" of "ndarray" has incompatible type
1328
+ # "Union[NumpyValueArrayLike, ExtensionArray]"; expected "NumpyValueArrayLike"
1329
+ return arr.searchsorted(value, side=side, sorter=sorter) # type: ignore[arg-type]
1330
+
1331
+
1332
+ # ---- #
1333
+ # diff #
1334
+ # ---- #
1335
+
1336
+ _diff_special = {"float64", "float32", "int64", "int32", "int16", "int8"}
1337
+
1338
+
1339
+ def diff(arr, n: int, axis: AxisInt = 0):
1340
+ """
1341
+ difference of n between self,
1342
+ analogous to s-s.shift(n)
1343
+
1344
+ Parameters
1345
+ ----------
1346
+ arr : ndarray or ExtensionArray
1347
+ n : int
1348
+ number of periods
1349
+ axis : {0, 1}
1350
+ axis to shift on
1351
+ stacklevel : int, default 3
1352
+ The stacklevel for the lost dtype warning.
1353
+
1354
+ Returns
1355
+ -------
1356
+ shifted
1357
+ """
1358
+
1359
+ n = int(n)
1360
+ na = np.nan
1361
+ dtype = arr.dtype
1362
+
1363
+ is_bool = is_bool_dtype(dtype)
1364
+ if is_bool:
1365
+ op = operator.xor
1366
+ else:
1367
+ op = operator.sub
1368
+
1369
+ if isinstance(dtype, NumpyEADtype):
1370
+ # NumpyExtensionArray cannot necessarily hold shifted versions of itself.
1371
+ arr = arr.to_numpy()
1372
+ dtype = arr.dtype
1373
+
1374
+ if not isinstance(arr, np.ndarray):
1375
+ # i.e ExtensionArray
1376
+ if hasattr(arr, f"__{op.__name__}__"):
1377
+ if axis != 0:
1378
+ raise ValueError(f"cannot diff {type(arr).__name__} on axis={axis}")
1379
+ return op(arr, arr.shift(n))
1380
+ else:
1381
+ raise TypeError(
1382
+ f"{type(arr).__name__} has no 'diff' method. "
1383
+ "Convert to a suitable dtype prior to calling 'diff'."
1384
+ )
1385
+
1386
+ is_timedelta = False
1387
+ if arr.dtype.kind in "mM":
1388
+ dtype = np.int64
1389
+ arr = arr.view("i8")
1390
+ na = iNaT
1391
+ is_timedelta = True
1392
+
1393
+ elif is_bool:
1394
+ # We have to cast in order to be able to hold np.nan
1395
+ dtype = np.object_
1396
+
1397
+ elif dtype.kind in "iu":
1398
+ # We have to cast in order to be able to hold np.nan
1399
+
1400
+ # int8, int16 are incompatible with float64,
1401
+ # see https://github.com/cython/cython/issues/2646
1402
+ if arr.dtype.name in ["int8", "int16"]:
1403
+ dtype = np.float32
1404
+ else:
1405
+ dtype = np.float64
1406
+
1407
+ orig_ndim = arr.ndim
1408
+ if orig_ndim == 1:
1409
+ # reshape so we can always use algos.diff_2d
1410
+ arr = arr.reshape(-1, 1)
1411
+ # TODO: require axis == 0
1412
+
1413
+ dtype = np.dtype(dtype)
1414
+ out_arr = np.empty(arr.shape, dtype=dtype)
1415
+
1416
+ na_indexer = [slice(None)] * 2
1417
+ na_indexer[axis] = slice(None, n) if n >= 0 else slice(n, None)
1418
+ out_arr[tuple(na_indexer)] = na
1419
+
1420
+ if arr.dtype.name in _diff_special:
1421
+ # TODO: can diff_2d dtype specialization troubles be fixed by defining
1422
+ # out_arr inside diff_2d?
1423
+ algos.diff_2d(arr, out_arr, n, axis, datetimelike=is_timedelta)
1424
+ else:
1425
+ # To keep mypy happy, _res_indexer is a list while res_indexer is
1426
+ # a tuple, ditto for lag_indexer.
1427
+ _res_indexer = [slice(None)] * 2
1428
+ _res_indexer[axis] = slice(n, None) if n >= 0 else slice(None, n)
1429
+ res_indexer = tuple(_res_indexer)
1430
+
1431
+ _lag_indexer = [slice(None)] * 2
1432
+ _lag_indexer[axis] = slice(None, -n) if n > 0 else slice(-n, None)
1433
+ lag_indexer = tuple(_lag_indexer)
1434
+
1435
+ out_arr[res_indexer] = op(arr[res_indexer], arr[lag_indexer])
1436
+
1437
+ if is_timedelta:
1438
+ out_arr = out_arr.view("timedelta64[ns]")
1439
+
1440
+ if orig_ndim == 1:
1441
+ out_arr = out_arr[:, 0]
1442
+ return out_arr
1443
+
1444
+
1445
+ # --------------------------------------------------------------------
1446
+ # Helper functions
1447
+
1448
+
1449
+ # Note: safe_sort is in algorithms.py instead of sorting.py because it is
1450
+ # low-dependency, is used in this module, and used private methods from
1451
+ # this module.
1452
+ def safe_sort(
1453
+ values: Index | ArrayLike,
1454
+ codes: npt.NDArray[np.intp] | None = None,
1455
+ use_na_sentinel: bool = True,
1456
+ assume_unique: bool = False,
1457
+ verify: bool = True,
1458
+ ) -> AnyArrayLike | tuple[AnyArrayLike, np.ndarray]:
1459
+ """
1460
+ Sort ``values`` and reorder corresponding ``codes``.
1461
+
1462
+ ``values`` should be unique if ``codes`` is not None.
1463
+ Safe for use with mixed types (int, str), orders ints before strs.
1464
+
1465
+ Parameters
1466
+ ----------
1467
+ values : list-like
1468
+ Sequence; must be unique if ``codes`` is not None.
1469
+ codes : np.ndarray[intp] or None, default None
1470
+ Indices to ``values``. All out of bound indices are treated as
1471
+ "not found" and will be masked with ``-1``.
1472
+ use_na_sentinel : bool, default True
1473
+ If True, the sentinel -1 will be used for NaN values. If False,
1474
+ NaN values will be encoded as non-negative integers and will not drop the
1475
+ NaN from the uniques of the values.
1476
+ assume_unique : bool, default False
1477
+ When True, ``values`` are assumed to be unique, which can speed up
1478
+ the calculation. Ignored when ``codes`` is None.
1479
+ verify : bool, default True
1480
+ Check if codes are out of bound for the values and put out of bound
1481
+ codes equal to ``-1``. If ``verify=False``, it is assumed there
1482
+ are no out of bound codes. Ignored when ``codes`` is None.
1483
+
1484
+ Returns
1485
+ -------
1486
+ ordered : AnyArrayLike
1487
+ Sorted ``values``
1488
+ new_codes : ndarray
1489
+ Reordered ``codes``; returned when ``codes`` is not None.
1490
+
1491
+ Raises
1492
+ ------
1493
+ TypeError
1494
+ * If ``values`` is not list-like or if ``codes`` is neither None
1495
+ nor list-like
1496
+ * If ``values`` cannot be sorted
1497
+ ValueError
1498
+ * If ``codes`` is not None and ``values`` contain duplicates.
1499
+ """
1500
+ if not isinstance(values, (np.ndarray, ABCExtensionArray, ABCIndex)):
1501
+ raise TypeError(
1502
+ "Only np.ndarray, ExtensionArray, and Index objects are allowed to "
1503
+ "be passed to safe_sort as values"
1504
+ )
1505
+
1506
+ sorter = None
1507
+ ordered: AnyArrayLike
1508
+
1509
+ if (
1510
+ not isinstance(values.dtype, ExtensionDtype)
1511
+ and lib.infer_dtype(values, skipna=False) == "mixed-integer"
1512
+ ):
1513
+ ordered = _sort_mixed(values)
1514
+ else:
1515
+ try:
1516
+ sorter = values.argsort()
1517
+ ordered = values.take(sorter)
1518
+ except (TypeError, decimal.InvalidOperation):
1519
+ # Previous sorters failed or were not applicable, try `_sort_mixed`
1520
+ # which would work, but which fails for special case of 1d arrays
1521
+ # with tuples.
1522
+ if values.size and isinstance(values[0], tuple):
1523
+ # error: Argument 1 to "_sort_tuples" has incompatible type
1524
+ # "Union[Index, ExtensionArray, ndarray[Any, Any]]"; expected
1525
+ # "ndarray[Any, Any]"
1526
+ ordered = _sort_tuples(values) # type: ignore[arg-type]
1527
+ else:
1528
+ ordered = _sort_mixed(values)
1529
+
1530
+ # codes:
1531
+
1532
+ if codes is None:
1533
+ return ordered
1534
+
1535
+ if not is_list_like(codes):
1536
+ raise TypeError(
1537
+ "Only list-like objects or None are allowed to "
1538
+ "be passed to safe_sort as codes"
1539
+ )
1540
+ codes = ensure_platform_int(np.asarray(codes))
1541
+
1542
+ if not assume_unique and not len(unique(values)) == len(values):
1543
+ raise ValueError("values should be unique if codes is not None")
1544
+
1545
+ if sorter is None:
1546
+ # mixed types
1547
+ # error: Argument 1 to "_get_hashtable_algo" has incompatible type
1548
+ # "Union[Index, ExtensionArray, ndarray[Any, Any]]"; expected
1549
+ # "ndarray[Any, Any]"
1550
+ hash_klass, values = _get_hashtable_algo(values) # type: ignore[arg-type]
1551
+ t = hash_klass(len(values))
1552
+ t.map_locations(values)
1553
+ sorter = ensure_platform_int(t.lookup(ordered))
1554
+
1555
+ if use_na_sentinel:
1556
+ # take_nd is faster, but only works for na_sentinels of -1
1557
+ order2 = sorter.argsort()
1558
+ if verify:
1559
+ mask = (codes < -len(values)) | (codes >= len(values))
1560
+ codes[mask] = 0
1561
+ else:
1562
+ mask = None
1563
+ new_codes = take_nd(order2, codes, fill_value=-1)
1564
+ else:
1565
+ reverse_indexer = np.empty(len(sorter), dtype=int)
1566
+ reverse_indexer.put(sorter, np.arange(len(sorter)))
1567
+ # Out of bound indices will be masked with `-1` next, so we
1568
+ # may deal with them here without performance loss using `mode='wrap'`
1569
+ new_codes = reverse_indexer.take(codes, mode="wrap")
1570
+
1571
+ if use_na_sentinel:
1572
+ mask = codes == -1
1573
+ if verify:
1574
+ mask = mask | (codes < -len(values)) | (codes >= len(values))
1575
+
1576
+ if use_na_sentinel and mask is not None:
1577
+ np.putmask(new_codes, mask, -1)
1578
+
1579
+ return ordered, ensure_platform_int(new_codes)
1580
+
1581
+
1582
+ def _sort_mixed(values) -> AnyArrayLike:
1583
+ """order ints before strings before nulls in 1d arrays"""
1584
+ str_pos = np.array([isinstance(x, str) for x in values], dtype=bool)
1585
+ null_pos = np.array([isna(x) for x in values], dtype=bool)
1586
+ num_pos = ~str_pos & ~null_pos
1587
+ str_argsort = np.argsort(values[str_pos])
1588
+ num_argsort = np.argsort(values[num_pos])
1589
+ # convert boolean arrays to positional indices, then order by underlying values
1590
+ str_locs = str_pos.nonzero()[0].take(str_argsort)
1591
+ num_locs = num_pos.nonzero()[0].take(num_argsort)
1592
+ null_locs = null_pos.nonzero()[0]
1593
+ locs = np.concatenate([num_locs, str_locs, null_locs])
1594
+ return values.take(locs)
1595
+
1596
+
1597
+ def _sort_tuples(values: np.ndarray) -> np.ndarray:
1598
+ """
1599
+ Convert array of tuples (1d) to array of arrays (2d).
1600
+ We need to keep the columns separately as they contain different types and
1601
+ nans (can't use `np.sort` as it may fail when str and nan are mixed in a
1602
+ column as types cannot be compared).
1603
+ """
1604
+ from pandas.core.internals.construction import to_arrays
1605
+ from pandas.core.sorting import lexsort_indexer
1606
+
1607
+ arrays, _ = to_arrays(values, None)
1608
+ indexer = lexsort_indexer(arrays, orders=True)
1609
+ return values[indexer]
1610
+
1611
+
1612
+ def union_with_duplicates(
1613
+ lvals: ArrayLike | Index, rvals: ArrayLike | Index
1614
+ ) -> ArrayLike | Index:
1615
+ """
1616
+ Extracts the union from lvals and rvals with respect to duplicates and nans in
1617
+ both arrays.
1618
+
1619
+ Parameters
1620
+ ----------
1621
+ lvals: np.ndarray or ExtensionArray
1622
+ left values which is ordered in front.
1623
+ rvals: np.ndarray or ExtensionArray
1624
+ right values ordered after lvals.
1625
+
1626
+ Returns
1627
+ -------
1628
+ np.ndarray or ExtensionArray
1629
+ Containing the unsorted union of both arrays.
1630
+
1631
+ Notes
1632
+ -----
1633
+ Caller is responsible for ensuring lvals.dtype == rvals.dtype.
1634
+ """
1635
+ from pandas import Series
1636
+
1637
+ with warnings.catch_warnings():
1638
+ # filter warning from object dtype inference; we will end up discarding
1639
+ # the index here, so the deprecation does not affect the end result here.
1640
+ warnings.filterwarnings(
1641
+ "ignore",
1642
+ "The behavior of value_counts with object-dtype is deprecated",
1643
+ category=FutureWarning,
1644
+ )
1645
+ l_count = value_counts_internal(lvals, dropna=False)
1646
+ r_count = value_counts_internal(rvals, dropna=False)
1647
+ l_count, r_count = l_count.align(r_count, fill_value=0)
1648
+ final_count = np.maximum(l_count.values, r_count.values)
1649
+ final_count = Series(final_count, index=l_count.index, dtype="int", copy=False)
1650
+ if isinstance(lvals, ABCMultiIndex) and isinstance(rvals, ABCMultiIndex):
1651
+ unique_vals = lvals.append(rvals).unique()
1652
+ else:
1653
+ if isinstance(lvals, ABCIndex):
1654
+ lvals = lvals._values
1655
+ if isinstance(rvals, ABCIndex):
1656
+ rvals = rvals._values
1657
+ # error: List item 0 has incompatible type "Union[ExtensionArray,
1658
+ # ndarray[Any, Any], Index]"; expected "Union[ExtensionArray,
1659
+ # ndarray[Any, Any]]"
1660
+ combined = concat_compat([lvals, rvals]) # type: ignore[list-item]
1661
+ unique_vals = unique(combined)
1662
+ unique_vals = ensure_wrapped_if_datetimelike(unique_vals)
1663
+ repeats = final_count.reindex(unique_vals).values
1664
+ return np.repeat(unique_vals, repeats)
1665
+
1666
+
1667
+ def map_array(
1668
+ arr: ArrayLike,
1669
+ mapper,
1670
+ na_action: Literal["ignore"] | None = None,
1671
+ convert: bool = True,
1672
+ ) -> np.ndarray | ExtensionArray | Index:
1673
+ """
1674
+ Map values using an input mapping or function.
1675
+
1676
+ Parameters
1677
+ ----------
1678
+ mapper : function, dict, or Series
1679
+ Mapping correspondence.
1680
+ na_action : {None, 'ignore'}, default None
1681
+ If 'ignore', propagate NA values, without passing them to the
1682
+ mapping correspondence.
1683
+ convert : bool, default True
1684
+ Try to find better dtype for elementwise function results. If
1685
+ False, leave as dtype=object.
1686
+
1687
+ Returns
1688
+ -------
1689
+ Union[ndarray, Index, ExtensionArray]
1690
+ The output of the mapping function applied to the array.
1691
+ If the function returns a tuple with more than one element
1692
+ a MultiIndex will be returned.
1693
+ """
1694
+ if na_action not in (None, "ignore"):
1695
+ msg = f"na_action must either be 'ignore' or None, {na_action} was passed"
1696
+ raise ValueError(msg)
1697
+
1698
+ # we can fastpath dict/Series to an efficient map
1699
+ # as we know that we are not going to have to yield
1700
+ # python types
1701
+ if is_dict_like(mapper):
1702
+ if isinstance(mapper, dict) and hasattr(mapper, "__missing__"):
1703
+ # If a dictionary subclass defines a default value method,
1704
+ # convert mapper to a lookup function (GH #15999).
1705
+ dict_with_default = mapper
1706
+ mapper = lambda x: dict_with_default[
1707
+ np.nan if isinstance(x, float) and np.isnan(x) else x
1708
+ ]
1709
+ else:
1710
+ # Dictionary does not have a default. Thus it's safe to
1711
+ # convert to an Series for efficiency.
1712
+ # we specify the keys here to handle the
1713
+ # possibility that they are tuples
1714
+
1715
+ # The return value of mapping with an empty mapper is
1716
+ # expected to be pd.Series(np.nan, ...). As np.nan is
1717
+ # of dtype float64 the return value of this method should
1718
+ # be float64 as well
1719
+ from pandas import Series
1720
+
1721
+ if len(mapper) == 0:
1722
+ mapper = Series(mapper, dtype=np.float64)
1723
+ else:
1724
+ mapper = Series(mapper)
1725
+
1726
+ if isinstance(mapper, ABCSeries):
1727
+ if na_action == "ignore":
1728
+ mapper = mapper[mapper.index.notna()]
1729
+
1730
+ # Since values were input this means we came from either
1731
+ # a dict or a series and mapper should be an index
1732
+ indexer = mapper.index.get_indexer(arr)
1733
+ new_values = take_nd(mapper._values, indexer)
1734
+
1735
+ return new_values
1736
+
1737
+ if not len(arr):
1738
+ return arr.copy()
1739
+
1740
+ # we must convert to python types
1741
+ values = arr.astype(object, copy=False)
1742
+ if na_action is None:
1743
+ return lib.map_infer(values, mapper, convert=convert)
1744
+ else:
1745
+ return lib.map_infer_mask(
1746
+ values, mapper, mask=isna(values).view(np.uint8), convert=convert
1747
+ )
venv/lib/python3.10/site-packages/pandas/core/api.py ADDED
@@ -0,0 +1,140 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from pandas._libs import (
2
+ NaT,
3
+ Period,
4
+ Timedelta,
5
+ Timestamp,
6
+ )
7
+ from pandas._libs.missing import NA
8
+
9
+ from pandas.core.dtypes.dtypes import (
10
+ ArrowDtype,
11
+ CategoricalDtype,
12
+ DatetimeTZDtype,
13
+ IntervalDtype,
14
+ PeriodDtype,
15
+ )
16
+ from pandas.core.dtypes.missing import (
17
+ isna,
18
+ isnull,
19
+ notna,
20
+ notnull,
21
+ )
22
+
23
+ from pandas.core.algorithms import (
24
+ factorize,
25
+ unique,
26
+ value_counts,
27
+ )
28
+ from pandas.core.arrays import Categorical
29
+ from pandas.core.arrays.boolean import BooleanDtype
30
+ from pandas.core.arrays.floating import (
31
+ Float32Dtype,
32
+ Float64Dtype,
33
+ )
34
+ from pandas.core.arrays.integer import (
35
+ Int8Dtype,
36
+ Int16Dtype,
37
+ Int32Dtype,
38
+ Int64Dtype,
39
+ UInt8Dtype,
40
+ UInt16Dtype,
41
+ UInt32Dtype,
42
+ UInt64Dtype,
43
+ )
44
+ from pandas.core.arrays.string_ import StringDtype
45
+ from pandas.core.construction import array
46
+ from pandas.core.flags import Flags
47
+ from pandas.core.groupby import (
48
+ Grouper,
49
+ NamedAgg,
50
+ )
51
+ from pandas.core.indexes.api import (
52
+ CategoricalIndex,
53
+ DatetimeIndex,
54
+ Index,
55
+ IntervalIndex,
56
+ MultiIndex,
57
+ PeriodIndex,
58
+ RangeIndex,
59
+ TimedeltaIndex,
60
+ )
61
+ from pandas.core.indexes.datetimes import (
62
+ bdate_range,
63
+ date_range,
64
+ )
65
+ from pandas.core.indexes.interval import (
66
+ Interval,
67
+ interval_range,
68
+ )
69
+ from pandas.core.indexes.period import period_range
70
+ from pandas.core.indexes.timedeltas import timedelta_range
71
+ from pandas.core.indexing import IndexSlice
72
+ from pandas.core.series import Series
73
+ from pandas.core.tools.datetimes import to_datetime
74
+ from pandas.core.tools.numeric import to_numeric
75
+ from pandas.core.tools.timedeltas import to_timedelta
76
+
77
+ from pandas.io.formats.format import set_eng_float_format
78
+ from pandas.tseries.offsets import DateOffset
79
+
80
+ # DataFrame needs to be imported after NamedAgg to avoid a circular import
81
+ from pandas.core.frame import DataFrame # isort:skip
82
+
83
+ __all__ = [
84
+ "array",
85
+ "ArrowDtype",
86
+ "bdate_range",
87
+ "BooleanDtype",
88
+ "Categorical",
89
+ "CategoricalDtype",
90
+ "CategoricalIndex",
91
+ "DataFrame",
92
+ "DateOffset",
93
+ "date_range",
94
+ "DatetimeIndex",
95
+ "DatetimeTZDtype",
96
+ "factorize",
97
+ "Flags",
98
+ "Float32Dtype",
99
+ "Float64Dtype",
100
+ "Grouper",
101
+ "Index",
102
+ "IndexSlice",
103
+ "Int16Dtype",
104
+ "Int32Dtype",
105
+ "Int64Dtype",
106
+ "Int8Dtype",
107
+ "Interval",
108
+ "IntervalDtype",
109
+ "IntervalIndex",
110
+ "interval_range",
111
+ "isna",
112
+ "isnull",
113
+ "MultiIndex",
114
+ "NA",
115
+ "NamedAgg",
116
+ "NaT",
117
+ "notna",
118
+ "notnull",
119
+ "Period",
120
+ "PeriodDtype",
121
+ "PeriodIndex",
122
+ "period_range",
123
+ "RangeIndex",
124
+ "Series",
125
+ "set_eng_float_format",
126
+ "StringDtype",
127
+ "Timedelta",
128
+ "TimedeltaIndex",
129
+ "timedelta_range",
130
+ "Timestamp",
131
+ "to_datetime",
132
+ "to_numeric",
133
+ "to_timedelta",
134
+ "UInt16Dtype",
135
+ "UInt32Dtype",
136
+ "UInt64Dtype",
137
+ "UInt8Dtype",
138
+ "unique",
139
+ "value_counts",
140
+ ]
venv/lib/python3.10/site-packages/pandas/core/apply.py ADDED
@@ -0,0 +1,2062 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from __future__ import annotations
2
+
3
+ import abc
4
+ from collections import defaultdict
5
+ import functools
6
+ from functools import partial
7
+ import inspect
8
+ from typing import (
9
+ TYPE_CHECKING,
10
+ Any,
11
+ Callable,
12
+ Literal,
13
+ cast,
14
+ )
15
+ import warnings
16
+
17
+ import numpy as np
18
+
19
+ from pandas._config import option_context
20
+
21
+ from pandas._libs import lib
22
+ from pandas._libs.internals import BlockValuesRefs
23
+ from pandas._typing import (
24
+ AggFuncType,
25
+ AggFuncTypeBase,
26
+ AggFuncTypeDict,
27
+ AggObjType,
28
+ Axis,
29
+ AxisInt,
30
+ NDFrameT,
31
+ npt,
32
+ )
33
+ from pandas.compat._optional import import_optional_dependency
34
+ from pandas.errors import SpecificationError
35
+ from pandas.util._decorators import cache_readonly
36
+ from pandas.util._exceptions import find_stack_level
37
+
38
+ from pandas.core.dtypes.cast import is_nested_object
39
+ from pandas.core.dtypes.common import (
40
+ is_dict_like,
41
+ is_extension_array_dtype,
42
+ is_list_like,
43
+ is_numeric_dtype,
44
+ is_sequence,
45
+ )
46
+ from pandas.core.dtypes.dtypes import (
47
+ CategoricalDtype,
48
+ ExtensionDtype,
49
+ )
50
+ from pandas.core.dtypes.generic import (
51
+ ABCDataFrame,
52
+ ABCNDFrame,
53
+ ABCSeries,
54
+ )
55
+
56
+ from pandas.core._numba.executor import generate_apply_looper
57
+ import pandas.core.common as com
58
+ from pandas.core.construction import ensure_wrapped_if_datetimelike
59
+
60
+ if TYPE_CHECKING:
61
+ from collections.abc import (
62
+ Generator,
63
+ Hashable,
64
+ Iterable,
65
+ MutableMapping,
66
+ Sequence,
67
+ )
68
+
69
+ from pandas import (
70
+ DataFrame,
71
+ Index,
72
+ Series,
73
+ )
74
+ from pandas.core.groupby import GroupBy
75
+ from pandas.core.resample import Resampler
76
+ from pandas.core.window.rolling import BaseWindow
77
+
78
+
79
+ ResType = dict[int, Any]
80
+
81
+
82
+ def frame_apply(
83
+ obj: DataFrame,
84
+ func: AggFuncType,
85
+ axis: Axis = 0,
86
+ raw: bool = False,
87
+ result_type: str | None = None,
88
+ by_row: Literal[False, "compat"] = "compat",
89
+ engine: str = "python",
90
+ engine_kwargs: dict[str, bool] | None = None,
91
+ args=None,
92
+ kwargs=None,
93
+ ) -> FrameApply:
94
+ """construct and return a row or column based frame apply object"""
95
+ axis = obj._get_axis_number(axis)
96
+ klass: type[FrameApply]
97
+ if axis == 0:
98
+ klass = FrameRowApply
99
+ elif axis == 1:
100
+ klass = FrameColumnApply
101
+
102
+ _, func, _, _ = reconstruct_func(func, **kwargs)
103
+ assert func is not None
104
+
105
+ return klass(
106
+ obj,
107
+ func,
108
+ raw=raw,
109
+ result_type=result_type,
110
+ by_row=by_row,
111
+ engine=engine,
112
+ engine_kwargs=engine_kwargs,
113
+ args=args,
114
+ kwargs=kwargs,
115
+ )
116
+
117
+
118
+ class Apply(metaclass=abc.ABCMeta):
119
+ axis: AxisInt
120
+
121
+ def __init__(
122
+ self,
123
+ obj: AggObjType,
124
+ func: AggFuncType,
125
+ raw: bool,
126
+ result_type: str | None,
127
+ *,
128
+ by_row: Literal[False, "compat", "_compat"] = "compat",
129
+ engine: str = "python",
130
+ engine_kwargs: dict[str, bool] | None = None,
131
+ args,
132
+ kwargs,
133
+ ) -> None:
134
+ self.obj = obj
135
+ self.raw = raw
136
+
137
+ assert by_row is False or by_row in ["compat", "_compat"]
138
+ self.by_row = by_row
139
+
140
+ self.args = args or ()
141
+ self.kwargs = kwargs or {}
142
+
143
+ self.engine = engine
144
+ self.engine_kwargs = {} if engine_kwargs is None else engine_kwargs
145
+
146
+ if result_type not in [None, "reduce", "broadcast", "expand"]:
147
+ raise ValueError(
148
+ "invalid value for result_type, must be one "
149
+ "of {None, 'reduce', 'broadcast', 'expand'}"
150
+ )
151
+
152
+ self.result_type = result_type
153
+
154
+ self.func = func
155
+
156
+ @abc.abstractmethod
157
+ def apply(self) -> DataFrame | Series:
158
+ pass
159
+
160
+ @abc.abstractmethod
161
+ def agg_or_apply_list_like(
162
+ self, op_name: Literal["agg", "apply"]
163
+ ) -> DataFrame | Series:
164
+ pass
165
+
166
+ @abc.abstractmethod
167
+ def agg_or_apply_dict_like(
168
+ self, op_name: Literal["agg", "apply"]
169
+ ) -> DataFrame | Series:
170
+ pass
171
+
172
+ def agg(self) -> DataFrame | Series | None:
173
+ """
174
+ Provide an implementation for the aggregators.
175
+
176
+ Returns
177
+ -------
178
+ Result of aggregation, or None if agg cannot be performed by
179
+ this method.
180
+ """
181
+ obj = self.obj
182
+ func = self.func
183
+ args = self.args
184
+ kwargs = self.kwargs
185
+
186
+ if isinstance(func, str):
187
+ return self.apply_str()
188
+
189
+ if is_dict_like(func):
190
+ return self.agg_dict_like()
191
+ elif is_list_like(func):
192
+ # we require a list, but not a 'str'
193
+ return self.agg_list_like()
194
+
195
+ if callable(func):
196
+ f = com.get_cython_func(func)
197
+ if f and not args and not kwargs:
198
+ warn_alias_replacement(obj, func, f)
199
+ return getattr(obj, f)()
200
+
201
+ # caller can react
202
+ return None
203
+
204
+ def transform(self) -> DataFrame | Series:
205
+ """
206
+ Transform a DataFrame or Series.
207
+
208
+ Returns
209
+ -------
210
+ DataFrame or Series
211
+ Result of applying ``func`` along the given axis of the
212
+ Series or DataFrame.
213
+
214
+ Raises
215
+ ------
216
+ ValueError
217
+ If the transform function fails or does not transform.
218
+ """
219
+ obj = self.obj
220
+ func = self.func
221
+ axis = self.axis
222
+ args = self.args
223
+ kwargs = self.kwargs
224
+
225
+ is_series = obj.ndim == 1
226
+
227
+ if obj._get_axis_number(axis) == 1:
228
+ assert not is_series
229
+ return obj.T.transform(func, 0, *args, **kwargs).T
230
+
231
+ if is_list_like(func) and not is_dict_like(func):
232
+ func = cast(list[AggFuncTypeBase], func)
233
+ # Convert func equivalent dict
234
+ if is_series:
235
+ func = {com.get_callable_name(v) or v: v for v in func}
236
+ else:
237
+ func = {col: func for col in obj}
238
+
239
+ if is_dict_like(func):
240
+ func = cast(AggFuncTypeDict, func)
241
+ return self.transform_dict_like(func)
242
+
243
+ # func is either str or callable
244
+ func = cast(AggFuncTypeBase, func)
245
+ try:
246
+ result = self.transform_str_or_callable(func)
247
+ except TypeError:
248
+ raise
249
+ except Exception as err:
250
+ raise ValueError("Transform function failed") from err
251
+
252
+ # Functions that transform may return empty Series/DataFrame
253
+ # when the dtype is not appropriate
254
+ if (
255
+ isinstance(result, (ABCSeries, ABCDataFrame))
256
+ and result.empty
257
+ and not obj.empty
258
+ ):
259
+ raise ValueError("Transform function failed")
260
+ # error: Argument 1 to "__get__" of "AxisProperty" has incompatible type
261
+ # "Union[Series, DataFrame, GroupBy[Any], SeriesGroupBy,
262
+ # DataFrameGroupBy, BaseWindow, Resampler]"; expected "Union[DataFrame,
263
+ # Series]"
264
+ if not isinstance(result, (ABCSeries, ABCDataFrame)) or not result.index.equals(
265
+ obj.index # type: ignore[arg-type]
266
+ ):
267
+ raise ValueError("Function did not transform")
268
+
269
+ return result
270
+
271
+ def transform_dict_like(self, func) -> DataFrame:
272
+ """
273
+ Compute transform in the case of a dict-like func
274
+ """
275
+ from pandas.core.reshape.concat import concat
276
+
277
+ obj = self.obj
278
+ args = self.args
279
+ kwargs = self.kwargs
280
+
281
+ # transform is currently only for Series/DataFrame
282
+ assert isinstance(obj, ABCNDFrame)
283
+
284
+ if len(func) == 0:
285
+ raise ValueError("No transform functions were provided")
286
+
287
+ func = self.normalize_dictlike_arg("transform", obj, func)
288
+
289
+ results: dict[Hashable, DataFrame | Series] = {}
290
+ for name, how in func.items():
291
+ colg = obj._gotitem(name, ndim=1)
292
+ results[name] = colg.transform(how, 0, *args, **kwargs)
293
+ return concat(results, axis=1)
294
+
295
+ def transform_str_or_callable(self, func) -> DataFrame | Series:
296
+ """
297
+ Compute transform in the case of a string or callable func
298
+ """
299
+ obj = self.obj
300
+ args = self.args
301
+ kwargs = self.kwargs
302
+
303
+ if isinstance(func, str):
304
+ return self._apply_str(obj, func, *args, **kwargs)
305
+
306
+ if not args and not kwargs:
307
+ f = com.get_cython_func(func)
308
+ if f:
309
+ warn_alias_replacement(obj, func, f)
310
+ return getattr(obj, f)()
311
+
312
+ # Two possible ways to use a UDF - apply or call directly
313
+ try:
314
+ return obj.apply(func, args=args, **kwargs)
315
+ except Exception:
316
+ return func(obj, *args, **kwargs)
317
+
318
+ def agg_list_like(self) -> DataFrame | Series:
319
+ """
320
+ Compute aggregation in the case of a list-like argument.
321
+
322
+ Returns
323
+ -------
324
+ Result of aggregation.
325
+ """
326
+ return self.agg_or_apply_list_like(op_name="agg")
327
+
328
+ def compute_list_like(
329
+ self,
330
+ op_name: Literal["agg", "apply"],
331
+ selected_obj: Series | DataFrame,
332
+ kwargs: dict[str, Any],
333
+ ) -> tuple[list[Hashable] | Index, list[Any]]:
334
+ """
335
+ Compute agg/apply results for like-like input.
336
+
337
+ Parameters
338
+ ----------
339
+ op_name : {"agg", "apply"}
340
+ Operation being performed.
341
+ selected_obj : Series or DataFrame
342
+ Data to perform operation on.
343
+ kwargs : dict
344
+ Keyword arguments to pass to the functions.
345
+
346
+ Returns
347
+ -------
348
+ keys : list[Hashable] or Index
349
+ Index labels for result.
350
+ results : list
351
+ Data for result. When aggregating with a Series, this can contain any
352
+ Python objects.
353
+ """
354
+ func = cast(list[AggFuncTypeBase], self.func)
355
+ obj = self.obj
356
+
357
+ results = []
358
+ keys = []
359
+
360
+ # degenerate case
361
+ if selected_obj.ndim == 1:
362
+ for a in func:
363
+ colg = obj._gotitem(selected_obj.name, ndim=1, subset=selected_obj)
364
+ args = (
365
+ [self.axis, *self.args]
366
+ if include_axis(op_name, colg)
367
+ else self.args
368
+ )
369
+ new_res = getattr(colg, op_name)(a, *args, **kwargs)
370
+ results.append(new_res)
371
+
372
+ # make sure we find a good name
373
+ name = com.get_callable_name(a) or a
374
+ keys.append(name)
375
+
376
+ else:
377
+ indices = []
378
+ for index, col in enumerate(selected_obj):
379
+ colg = obj._gotitem(col, ndim=1, subset=selected_obj.iloc[:, index])
380
+ args = (
381
+ [self.axis, *self.args]
382
+ if include_axis(op_name, colg)
383
+ else self.args
384
+ )
385
+ new_res = getattr(colg, op_name)(func, *args, **kwargs)
386
+ results.append(new_res)
387
+ indices.append(index)
388
+ # error: Incompatible types in assignment (expression has type "Any |
389
+ # Index", variable has type "list[Any | Callable[..., Any] | str]")
390
+ keys = selected_obj.columns.take(indices) # type: ignore[assignment]
391
+
392
+ return keys, results
393
+
394
+ def wrap_results_list_like(
395
+ self, keys: Iterable[Hashable], results: list[Series | DataFrame]
396
+ ):
397
+ from pandas.core.reshape.concat import concat
398
+
399
+ obj = self.obj
400
+
401
+ try:
402
+ return concat(results, keys=keys, axis=1, sort=False)
403
+ except TypeError as err:
404
+ # we are concatting non-NDFrame objects,
405
+ # e.g. a list of scalars
406
+ from pandas import Series
407
+
408
+ result = Series(results, index=keys, name=obj.name)
409
+ if is_nested_object(result):
410
+ raise ValueError(
411
+ "cannot combine transform and aggregation operations"
412
+ ) from err
413
+ return result
414
+
415
+ def agg_dict_like(self) -> DataFrame | Series:
416
+ """
417
+ Compute aggregation in the case of a dict-like argument.
418
+
419
+ Returns
420
+ -------
421
+ Result of aggregation.
422
+ """
423
+ return self.agg_or_apply_dict_like(op_name="agg")
424
+
425
+ def compute_dict_like(
426
+ self,
427
+ op_name: Literal["agg", "apply"],
428
+ selected_obj: Series | DataFrame,
429
+ selection: Hashable | Sequence[Hashable],
430
+ kwargs: dict[str, Any],
431
+ ) -> tuple[list[Hashable], list[Any]]:
432
+ """
433
+ Compute agg/apply results for dict-like input.
434
+
435
+ Parameters
436
+ ----------
437
+ op_name : {"agg", "apply"}
438
+ Operation being performed.
439
+ selected_obj : Series or DataFrame
440
+ Data to perform operation on.
441
+ selection : hashable or sequence of hashables
442
+ Used by GroupBy, Window, and Resample if selection is applied to the object.
443
+ kwargs : dict
444
+ Keyword arguments to pass to the functions.
445
+
446
+ Returns
447
+ -------
448
+ keys : list[hashable]
449
+ Index labels for result.
450
+ results : list
451
+ Data for result. When aggregating with a Series, this can contain any
452
+ Python object.
453
+ """
454
+ from pandas.core.groupby.generic import (
455
+ DataFrameGroupBy,
456
+ SeriesGroupBy,
457
+ )
458
+
459
+ obj = self.obj
460
+ is_groupby = isinstance(obj, (DataFrameGroupBy, SeriesGroupBy))
461
+ func = cast(AggFuncTypeDict, self.func)
462
+ func = self.normalize_dictlike_arg(op_name, selected_obj, func)
463
+
464
+ is_non_unique_col = (
465
+ selected_obj.ndim == 2
466
+ and selected_obj.columns.nunique() < len(selected_obj.columns)
467
+ )
468
+
469
+ if selected_obj.ndim == 1:
470
+ # key only used for output
471
+ colg = obj._gotitem(selection, ndim=1)
472
+ results = [getattr(colg, op_name)(how, **kwargs) for _, how in func.items()]
473
+ keys = list(func.keys())
474
+ elif not is_groupby and is_non_unique_col:
475
+ # key used for column selection and output
476
+ # GH#51099
477
+ results = []
478
+ keys = []
479
+ for key, how in func.items():
480
+ indices = selected_obj.columns.get_indexer_for([key])
481
+ labels = selected_obj.columns.take(indices)
482
+ label_to_indices = defaultdict(list)
483
+ for index, label in zip(indices, labels):
484
+ label_to_indices[label].append(index)
485
+
486
+ key_data = [
487
+ getattr(selected_obj._ixs(indice, axis=1), op_name)(how, **kwargs)
488
+ for label, indices in label_to_indices.items()
489
+ for indice in indices
490
+ ]
491
+
492
+ keys += [key] * len(key_data)
493
+ results += key_data
494
+ else:
495
+ # key used for column selection and output
496
+ results = [
497
+ getattr(obj._gotitem(key, ndim=1), op_name)(how, **kwargs)
498
+ for key, how in func.items()
499
+ ]
500
+ keys = list(func.keys())
501
+
502
+ return keys, results
503
+
504
+ def wrap_results_dict_like(
505
+ self,
506
+ selected_obj: Series | DataFrame,
507
+ result_index: list[Hashable],
508
+ result_data: list,
509
+ ):
510
+ from pandas import Index
511
+ from pandas.core.reshape.concat import concat
512
+
513
+ obj = self.obj
514
+
515
+ # Avoid making two isinstance calls in all and any below
516
+ is_ndframe = [isinstance(r, ABCNDFrame) for r in result_data]
517
+
518
+ if all(is_ndframe):
519
+ results = dict(zip(result_index, result_data))
520
+ keys_to_use: Iterable[Hashable]
521
+ keys_to_use = [k for k in result_index if not results[k].empty]
522
+ # Have to check, if at least one DataFrame is not empty.
523
+ keys_to_use = keys_to_use if keys_to_use != [] else result_index
524
+ if selected_obj.ndim == 2:
525
+ # keys are columns, so we can preserve names
526
+ ktu = Index(keys_to_use)
527
+ ktu._set_names(selected_obj.columns.names)
528
+ keys_to_use = ktu
529
+
530
+ axis: AxisInt = 0 if isinstance(obj, ABCSeries) else 1
531
+ result = concat(
532
+ {k: results[k] for k in keys_to_use},
533
+ axis=axis,
534
+ keys=keys_to_use,
535
+ )
536
+ elif any(is_ndframe):
537
+ # There is a mix of NDFrames and scalars
538
+ raise ValueError(
539
+ "cannot perform both aggregation "
540
+ "and transformation operations "
541
+ "simultaneously"
542
+ )
543
+ else:
544
+ from pandas import Series
545
+
546
+ # we have a list of scalars
547
+ # GH 36212 use name only if obj is a series
548
+ if obj.ndim == 1:
549
+ obj = cast("Series", obj)
550
+ name = obj.name
551
+ else:
552
+ name = None
553
+
554
+ result = Series(result_data, index=result_index, name=name)
555
+
556
+ return result
557
+
558
+ def apply_str(self) -> DataFrame | Series:
559
+ """
560
+ Compute apply in case of a string.
561
+
562
+ Returns
563
+ -------
564
+ result: Series or DataFrame
565
+ """
566
+ # Caller is responsible for checking isinstance(self.f, str)
567
+ func = cast(str, self.func)
568
+
569
+ obj = self.obj
570
+
571
+ from pandas.core.groupby.generic import (
572
+ DataFrameGroupBy,
573
+ SeriesGroupBy,
574
+ )
575
+
576
+ # Support for `frame.transform('method')`
577
+ # Some methods (shift, etc.) require the axis argument, others
578
+ # don't, so inspect and insert if necessary.
579
+ method = getattr(obj, func, None)
580
+ if callable(method):
581
+ sig = inspect.getfullargspec(method)
582
+ arg_names = (*sig.args, *sig.kwonlyargs)
583
+ if self.axis != 0 and (
584
+ "axis" not in arg_names or func in ("corrwith", "skew")
585
+ ):
586
+ raise ValueError(f"Operation {func} does not support axis=1")
587
+ if "axis" in arg_names:
588
+ if isinstance(obj, (SeriesGroupBy, DataFrameGroupBy)):
589
+ # Try to avoid FutureWarning for deprecated axis keyword;
590
+ # If self.axis matches the axis we would get by not passing
591
+ # axis, we safely exclude the keyword.
592
+
593
+ default_axis = 0
594
+ if func in ["idxmax", "idxmin"]:
595
+ # DataFrameGroupBy.idxmax, idxmin axis defaults to self.axis,
596
+ # whereas other axis keywords default to 0
597
+ default_axis = self.obj.axis
598
+
599
+ if default_axis != self.axis:
600
+ self.kwargs["axis"] = self.axis
601
+ else:
602
+ self.kwargs["axis"] = self.axis
603
+ return self._apply_str(obj, func, *self.args, **self.kwargs)
604
+
605
+ def apply_list_or_dict_like(self) -> DataFrame | Series:
606
+ """
607
+ Compute apply in case of a list-like or dict-like.
608
+
609
+ Returns
610
+ -------
611
+ result: Series, DataFrame, or None
612
+ Result when self.func is a list-like or dict-like, None otherwise.
613
+ """
614
+
615
+ if self.engine == "numba":
616
+ raise NotImplementedError(
617
+ "The 'numba' engine doesn't support list-like/"
618
+ "dict likes of callables yet."
619
+ )
620
+
621
+ if self.axis == 1 and isinstance(self.obj, ABCDataFrame):
622
+ return self.obj.T.apply(self.func, 0, args=self.args, **self.kwargs).T
623
+
624
+ func = self.func
625
+ kwargs = self.kwargs
626
+
627
+ if is_dict_like(func):
628
+ result = self.agg_or_apply_dict_like(op_name="apply")
629
+ else:
630
+ result = self.agg_or_apply_list_like(op_name="apply")
631
+
632
+ result = reconstruct_and_relabel_result(result, func, **kwargs)
633
+
634
+ return result
635
+
636
+ def normalize_dictlike_arg(
637
+ self, how: str, obj: DataFrame | Series, func: AggFuncTypeDict
638
+ ) -> AggFuncTypeDict:
639
+ """
640
+ Handler for dict-like argument.
641
+
642
+ Ensures that necessary columns exist if obj is a DataFrame, and
643
+ that a nested renamer is not passed. Also normalizes to all lists
644
+ when values consists of a mix of list and non-lists.
645
+ """
646
+ assert how in ("apply", "agg", "transform")
647
+
648
+ # Can't use func.values(); wouldn't work for a Series
649
+ if (
650
+ how == "agg"
651
+ and isinstance(obj, ABCSeries)
652
+ and any(is_list_like(v) for _, v in func.items())
653
+ ) or (any(is_dict_like(v) for _, v in func.items())):
654
+ # GH 15931 - deprecation of renaming keys
655
+ raise SpecificationError("nested renamer is not supported")
656
+
657
+ if obj.ndim != 1:
658
+ # Check for missing columns on a frame
659
+ from pandas import Index
660
+
661
+ cols = Index(list(func.keys())).difference(obj.columns, sort=True)
662
+ if len(cols) > 0:
663
+ raise KeyError(f"Column(s) {list(cols)} do not exist")
664
+
665
+ aggregator_types = (list, tuple, dict)
666
+
667
+ # if we have a dict of any non-scalars
668
+ # eg. {'A' : ['mean']}, normalize all to
669
+ # be list-likes
670
+ # Cannot use func.values() because arg may be a Series
671
+ if any(isinstance(x, aggregator_types) for _, x in func.items()):
672
+ new_func: AggFuncTypeDict = {}
673
+ for k, v in func.items():
674
+ if not isinstance(v, aggregator_types):
675
+ new_func[k] = [v]
676
+ else:
677
+ new_func[k] = v
678
+ func = new_func
679
+ return func
680
+
681
+ def _apply_str(self, obj, func: str, *args, **kwargs):
682
+ """
683
+ if arg is a string, then try to operate on it:
684
+ - try to find a function (or attribute) on obj
685
+ - try to find a numpy function
686
+ - raise
687
+ """
688
+ assert isinstance(func, str)
689
+
690
+ if hasattr(obj, func):
691
+ f = getattr(obj, func)
692
+ if callable(f):
693
+ return f(*args, **kwargs)
694
+
695
+ # people may aggregate on a non-callable attribute
696
+ # but don't let them think they can pass args to it
697
+ assert len(args) == 0
698
+ assert len([kwarg for kwarg in kwargs if kwarg not in ["axis"]]) == 0
699
+ return f
700
+ elif hasattr(np, func) and hasattr(obj, "__array__"):
701
+ # in particular exclude Window
702
+ f = getattr(np, func)
703
+ return f(obj, *args, **kwargs)
704
+ else:
705
+ msg = f"'{func}' is not a valid function for '{type(obj).__name__}' object"
706
+ raise AttributeError(msg)
707
+
708
+
709
+ class NDFrameApply(Apply):
710
+ """
711
+ Methods shared by FrameApply and SeriesApply but
712
+ not GroupByApply or ResamplerWindowApply
713
+ """
714
+
715
+ obj: DataFrame | Series
716
+
717
+ @property
718
+ def index(self) -> Index:
719
+ return self.obj.index
720
+
721
+ @property
722
+ def agg_axis(self) -> Index:
723
+ return self.obj._get_agg_axis(self.axis)
724
+
725
+ def agg_or_apply_list_like(
726
+ self, op_name: Literal["agg", "apply"]
727
+ ) -> DataFrame | Series:
728
+ obj = self.obj
729
+ kwargs = self.kwargs
730
+
731
+ if op_name == "apply":
732
+ if isinstance(self, FrameApply):
733
+ by_row = self.by_row
734
+
735
+ elif isinstance(self, SeriesApply):
736
+ by_row = "_compat" if self.by_row else False
737
+ else:
738
+ by_row = False
739
+ kwargs = {**kwargs, "by_row": by_row}
740
+
741
+ if getattr(obj, "axis", 0) == 1:
742
+ raise NotImplementedError("axis other than 0 is not supported")
743
+
744
+ keys, results = self.compute_list_like(op_name, obj, kwargs)
745
+ result = self.wrap_results_list_like(keys, results)
746
+ return result
747
+
748
+ def agg_or_apply_dict_like(
749
+ self, op_name: Literal["agg", "apply"]
750
+ ) -> DataFrame | Series:
751
+ assert op_name in ["agg", "apply"]
752
+ obj = self.obj
753
+
754
+ kwargs = {}
755
+ if op_name == "apply":
756
+ by_row = "_compat" if self.by_row else False
757
+ kwargs.update({"by_row": by_row})
758
+
759
+ if getattr(obj, "axis", 0) == 1:
760
+ raise NotImplementedError("axis other than 0 is not supported")
761
+
762
+ selection = None
763
+ result_index, result_data = self.compute_dict_like(
764
+ op_name, obj, selection, kwargs
765
+ )
766
+ result = self.wrap_results_dict_like(obj, result_index, result_data)
767
+ return result
768
+
769
+
770
+ class FrameApply(NDFrameApply):
771
+ obj: DataFrame
772
+
773
+ def __init__(
774
+ self,
775
+ obj: AggObjType,
776
+ func: AggFuncType,
777
+ raw: bool,
778
+ result_type: str | None,
779
+ *,
780
+ by_row: Literal[False, "compat"] = False,
781
+ engine: str = "python",
782
+ engine_kwargs: dict[str, bool] | None = None,
783
+ args,
784
+ kwargs,
785
+ ) -> None:
786
+ if by_row is not False and by_row != "compat":
787
+ raise ValueError(f"by_row={by_row} not allowed")
788
+ super().__init__(
789
+ obj,
790
+ func,
791
+ raw,
792
+ result_type,
793
+ by_row=by_row,
794
+ engine=engine,
795
+ engine_kwargs=engine_kwargs,
796
+ args=args,
797
+ kwargs=kwargs,
798
+ )
799
+
800
+ # ---------------------------------------------------------------
801
+ # Abstract Methods
802
+
803
+ @property
804
+ @abc.abstractmethod
805
+ def result_index(self) -> Index:
806
+ pass
807
+
808
+ @property
809
+ @abc.abstractmethod
810
+ def result_columns(self) -> Index:
811
+ pass
812
+
813
+ @property
814
+ @abc.abstractmethod
815
+ def series_generator(self) -> Generator[Series, None, None]:
816
+ pass
817
+
818
+ @staticmethod
819
+ @functools.cache
820
+ @abc.abstractmethod
821
+ def generate_numba_apply_func(
822
+ func, nogil=True, nopython=True, parallel=False
823
+ ) -> Callable[[npt.NDArray, Index, Index], dict[int, Any]]:
824
+ pass
825
+
826
+ @abc.abstractmethod
827
+ def apply_with_numba(self):
828
+ pass
829
+
830
+ def validate_values_for_numba(self):
831
+ # Validate column dtyps all OK
832
+ for colname, dtype in self.obj.dtypes.items():
833
+ if not is_numeric_dtype(dtype):
834
+ raise ValueError(
835
+ f"Column {colname} must have a numeric dtype. "
836
+ f"Found '{dtype}' instead"
837
+ )
838
+ if is_extension_array_dtype(dtype):
839
+ raise ValueError(
840
+ f"Column {colname} is backed by an extension array, "
841
+ f"which is not supported by the numba engine."
842
+ )
843
+
844
+ @abc.abstractmethod
845
+ def wrap_results_for_axis(
846
+ self, results: ResType, res_index: Index
847
+ ) -> DataFrame | Series:
848
+ pass
849
+
850
+ # ---------------------------------------------------------------
851
+
852
+ @property
853
+ def res_columns(self) -> Index:
854
+ return self.result_columns
855
+
856
+ @property
857
+ def columns(self) -> Index:
858
+ return self.obj.columns
859
+
860
+ @cache_readonly
861
+ def values(self):
862
+ return self.obj.values
863
+
864
+ def apply(self) -> DataFrame | Series:
865
+ """compute the results"""
866
+
867
+ # dispatch to handle list-like or dict-like
868
+ if is_list_like(self.func):
869
+ if self.engine == "numba":
870
+ raise NotImplementedError(
871
+ "the 'numba' engine doesn't support lists of callables yet"
872
+ )
873
+ return self.apply_list_or_dict_like()
874
+
875
+ # all empty
876
+ if len(self.columns) == 0 and len(self.index) == 0:
877
+ return self.apply_empty_result()
878
+
879
+ # string dispatch
880
+ if isinstance(self.func, str):
881
+ if self.engine == "numba":
882
+ raise NotImplementedError(
883
+ "the 'numba' engine doesn't support using "
884
+ "a string as the callable function"
885
+ )
886
+ return self.apply_str()
887
+
888
+ # ufunc
889
+ elif isinstance(self.func, np.ufunc):
890
+ if self.engine == "numba":
891
+ raise NotImplementedError(
892
+ "the 'numba' engine doesn't support "
893
+ "using a numpy ufunc as the callable function"
894
+ )
895
+ with np.errstate(all="ignore"):
896
+ results = self.obj._mgr.apply("apply", func=self.func)
897
+ # _constructor will retain self.index and self.columns
898
+ return self.obj._constructor_from_mgr(results, axes=results.axes)
899
+
900
+ # broadcasting
901
+ if self.result_type == "broadcast":
902
+ if self.engine == "numba":
903
+ raise NotImplementedError(
904
+ "the 'numba' engine doesn't support result_type='broadcast'"
905
+ )
906
+ return self.apply_broadcast(self.obj)
907
+
908
+ # one axis empty
909
+ elif not all(self.obj.shape):
910
+ return self.apply_empty_result()
911
+
912
+ # raw
913
+ elif self.raw:
914
+ return self.apply_raw(engine=self.engine, engine_kwargs=self.engine_kwargs)
915
+
916
+ return self.apply_standard()
917
+
918
+ def agg(self):
919
+ obj = self.obj
920
+ axis = self.axis
921
+
922
+ # TODO: Avoid having to change state
923
+ self.obj = self.obj if self.axis == 0 else self.obj.T
924
+ self.axis = 0
925
+
926
+ result = None
927
+ try:
928
+ result = super().agg()
929
+ finally:
930
+ self.obj = obj
931
+ self.axis = axis
932
+
933
+ if axis == 1:
934
+ result = result.T if result is not None else result
935
+
936
+ if result is None:
937
+ result = self.obj.apply(self.func, axis, args=self.args, **self.kwargs)
938
+
939
+ return result
940
+
941
+ def apply_empty_result(self):
942
+ """
943
+ we have an empty result; at least 1 axis is 0
944
+
945
+ we will try to apply the function to an empty
946
+ series in order to see if this is a reduction function
947
+ """
948
+ assert callable(self.func)
949
+
950
+ # we are not asked to reduce or infer reduction
951
+ # so just return a copy of the existing object
952
+ if self.result_type not in ["reduce", None]:
953
+ return self.obj.copy()
954
+
955
+ # we may need to infer
956
+ should_reduce = self.result_type == "reduce"
957
+
958
+ from pandas import Series
959
+
960
+ if not should_reduce:
961
+ try:
962
+ if self.axis == 0:
963
+ r = self.func(
964
+ Series([], dtype=np.float64), *self.args, **self.kwargs
965
+ )
966
+ else:
967
+ r = self.func(
968
+ Series(index=self.columns, dtype=np.float64),
969
+ *self.args,
970
+ **self.kwargs,
971
+ )
972
+ except Exception:
973
+ pass
974
+ else:
975
+ should_reduce = not isinstance(r, Series)
976
+
977
+ if should_reduce:
978
+ if len(self.agg_axis):
979
+ r = self.func(Series([], dtype=np.float64), *self.args, **self.kwargs)
980
+ else:
981
+ r = np.nan
982
+
983
+ return self.obj._constructor_sliced(r, index=self.agg_axis)
984
+ else:
985
+ return self.obj.copy()
986
+
987
+ def apply_raw(self, engine="python", engine_kwargs=None):
988
+ """apply to the values as a numpy array"""
989
+
990
+ def wrap_function(func):
991
+ """
992
+ Wrap user supplied function to work around numpy issue.
993
+
994
+ see https://github.com/numpy/numpy/issues/8352
995
+ """
996
+
997
+ def wrapper(*args, **kwargs):
998
+ result = func(*args, **kwargs)
999
+ if isinstance(result, str):
1000
+ result = np.array(result, dtype=object)
1001
+ return result
1002
+
1003
+ return wrapper
1004
+
1005
+ if engine == "numba":
1006
+ engine_kwargs = {} if engine_kwargs is None else engine_kwargs
1007
+
1008
+ # error: Argument 1 to "__call__" of "_lru_cache_wrapper" has
1009
+ # incompatible type "Callable[..., Any] | str | list[Callable
1010
+ # [..., Any] | str] | dict[Hashable,Callable[..., Any] | str |
1011
+ # list[Callable[..., Any] | str]]"; expected "Hashable"
1012
+ nb_looper = generate_apply_looper(
1013
+ self.func, **engine_kwargs # type: ignore[arg-type]
1014
+ )
1015
+ result = nb_looper(self.values, self.axis)
1016
+ # If we made the result 2-D, squeeze it back to 1-D
1017
+ result = np.squeeze(result)
1018
+ else:
1019
+ result = np.apply_along_axis(
1020
+ wrap_function(self.func),
1021
+ self.axis,
1022
+ self.values,
1023
+ *self.args,
1024
+ **self.kwargs,
1025
+ )
1026
+
1027
+ # TODO: mixed type case
1028
+ if result.ndim == 2:
1029
+ return self.obj._constructor(result, index=self.index, columns=self.columns)
1030
+ else:
1031
+ return self.obj._constructor_sliced(result, index=self.agg_axis)
1032
+
1033
+ def apply_broadcast(self, target: DataFrame) -> DataFrame:
1034
+ assert callable(self.func)
1035
+
1036
+ result_values = np.empty_like(target.values)
1037
+
1038
+ # axis which we want to compare compliance
1039
+ result_compare = target.shape[0]
1040
+
1041
+ for i, col in enumerate(target.columns):
1042
+ res = self.func(target[col], *self.args, **self.kwargs)
1043
+ ares = np.asarray(res).ndim
1044
+
1045
+ # must be a scalar or 1d
1046
+ if ares > 1:
1047
+ raise ValueError("too many dims to broadcast")
1048
+ if ares == 1:
1049
+ # must match return dim
1050
+ if result_compare != len(res):
1051
+ raise ValueError("cannot broadcast result")
1052
+
1053
+ result_values[:, i] = res
1054
+
1055
+ # we *always* preserve the original index / columns
1056
+ result = self.obj._constructor(
1057
+ result_values, index=target.index, columns=target.columns
1058
+ )
1059
+ return result
1060
+
1061
+ def apply_standard(self):
1062
+ if self.engine == "python":
1063
+ results, res_index = self.apply_series_generator()
1064
+ else:
1065
+ results, res_index = self.apply_series_numba()
1066
+
1067
+ # wrap results
1068
+ return self.wrap_results(results, res_index)
1069
+
1070
+ def apply_series_generator(self) -> tuple[ResType, Index]:
1071
+ assert callable(self.func)
1072
+
1073
+ series_gen = self.series_generator
1074
+ res_index = self.result_index
1075
+
1076
+ results = {}
1077
+
1078
+ with option_context("mode.chained_assignment", None):
1079
+ for i, v in enumerate(series_gen):
1080
+ # ignore SettingWithCopy here in case the user mutates
1081
+ results[i] = self.func(v, *self.args, **self.kwargs)
1082
+ if isinstance(results[i], ABCSeries):
1083
+ # If we have a view on v, we need to make a copy because
1084
+ # series_generator will swap out the underlying data
1085
+ results[i] = results[i].copy(deep=False)
1086
+
1087
+ return results, res_index
1088
+
1089
+ def apply_series_numba(self):
1090
+ if self.engine_kwargs.get("parallel", False):
1091
+ raise NotImplementedError(
1092
+ "Parallel apply is not supported when raw=False and engine='numba'"
1093
+ )
1094
+ if not self.obj.index.is_unique or not self.columns.is_unique:
1095
+ raise NotImplementedError(
1096
+ "The index/columns must be unique when raw=False and engine='numba'"
1097
+ )
1098
+ self.validate_values_for_numba()
1099
+ results = self.apply_with_numba()
1100
+ return results, self.result_index
1101
+
1102
+ def wrap_results(self, results: ResType, res_index: Index) -> DataFrame | Series:
1103
+ from pandas import Series
1104
+
1105
+ # see if we can infer the results
1106
+ if len(results) > 0 and 0 in results and is_sequence(results[0]):
1107
+ return self.wrap_results_for_axis(results, res_index)
1108
+
1109
+ # dict of scalars
1110
+
1111
+ # the default dtype of an empty Series is `object`, but this
1112
+ # code can be hit by df.mean() where the result should have dtype
1113
+ # float64 even if it's an empty Series.
1114
+ constructor_sliced = self.obj._constructor_sliced
1115
+ if len(results) == 0 and constructor_sliced is Series:
1116
+ result = constructor_sliced(results, dtype=np.float64)
1117
+ else:
1118
+ result = constructor_sliced(results)
1119
+ result.index = res_index
1120
+
1121
+ return result
1122
+
1123
+ def apply_str(self) -> DataFrame | Series:
1124
+ # Caller is responsible for checking isinstance(self.func, str)
1125
+ # TODO: GH#39993 - Avoid special-casing by replacing with lambda
1126
+ if self.func == "size":
1127
+ # Special-cased because DataFrame.size returns a single scalar
1128
+ obj = self.obj
1129
+ value = obj.shape[self.axis]
1130
+ return obj._constructor_sliced(value, index=self.agg_axis)
1131
+ return super().apply_str()
1132
+
1133
+
1134
+ class FrameRowApply(FrameApply):
1135
+ axis: AxisInt = 0
1136
+
1137
+ @property
1138
+ def series_generator(self) -> Generator[Series, None, None]:
1139
+ return (self.obj._ixs(i, axis=1) for i in range(len(self.columns)))
1140
+
1141
+ @staticmethod
1142
+ @functools.cache
1143
+ def generate_numba_apply_func(
1144
+ func, nogil=True, nopython=True, parallel=False
1145
+ ) -> Callable[[npt.NDArray, Index, Index], dict[int, Any]]:
1146
+ numba = import_optional_dependency("numba")
1147
+ from pandas import Series
1148
+
1149
+ # Import helper from extensions to cast string object -> np strings
1150
+ # Note: This also has the side effect of loading our numba extensions
1151
+ from pandas.core._numba.extensions import maybe_cast_str
1152
+
1153
+ jitted_udf = numba.extending.register_jitable(func)
1154
+
1155
+ # Currently the parallel argument doesn't get passed through here
1156
+ # (it's disabled) since the dicts in numba aren't thread-safe.
1157
+ @numba.jit(nogil=nogil, nopython=nopython, parallel=parallel)
1158
+ def numba_func(values, col_names, df_index):
1159
+ results = {}
1160
+ for j in range(values.shape[1]):
1161
+ # Create the series
1162
+ ser = Series(
1163
+ values[:, j], index=df_index, name=maybe_cast_str(col_names[j])
1164
+ )
1165
+ results[j] = jitted_udf(ser)
1166
+ return results
1167
+
1168
+ return numba_func
1169
+
1170
+ def apply_with_numba(self) -> dict[int, Any]:
1171
+ nb_func = self.generate_numba_apply_func(
1172
+ cast(Callable, self.func), **self.engine_kwargs
1173
+ )
1174
+ from pandas.core._numba.extensions import set_numba_data
1175
+
1176
+ index = self.obj.index
1177
+ if index.dtype == "string":
1178
+ index = index.astype(object)
1179
+
1180
+ columns = self.obj.columns
1181
+ if columns.dtype == "string":
1182
+ columns = columns.astype(object)
1183
+
1184
+ # Convert from numba dict to regular dict
1185
+ # Our isinstance checks in the df constructor don't pass for numbas typed dict
1186
+ with set_numba_data(index) as index, set_numba_data(columns) as columns:
1187
+ res = dict(nb_func(self.values, columns, index))
1188
+ return res
1189
+
1190
+ @property
1191
+ def result_index(self) -> Index:
1192
+ return self.columns
1193
+
1194
+ @property
1195
+ def result_columns(self) -> Index:
1196
+ return self.index
1197
+
1198
+ def wrap_results_for_axis(
1199
+ self, results: ResType, res_index: Index
1200
+ ) -> DataFrame | Series:
1201
+ """return the results for the rows"""
1202
+
1203
+ if self.result_type == "reduce":
1204
+ # e.g. test_apply_dict GH#8735
1205
+ res = self.obj._constructor_sliced(results)
1206
+ res.index = res_index
1207
+ return res
1208
+
1209
+ elif self.result_type is None and all(
1210
+ isinstance(x, dict) for x in results.values()
1211
+ ):
1212
+ # Our operation was a to_dict op e.g.
1213
+ # test_apply_dict GH#8735, test_apply_reduce_to_dict GH#25196 #37544
1214
+ res = self.obj._constructor_sliced(results)
1215
+ res.index = res_index
1216
+ return res
1217
+
1218
+ try:
1219
+ result = self.obj._constructor(data=results)
1220
+ except ValueError as err:
1221
+ if "All arrays must be of the same length" in str(err):
1222
+ # e.g. result = [[2, 3], [1.5], ['foo', 'bar']]
1223
+ # see test_agg_listlike_result GH#29587
1224
+ res = self.obj._constructor_sliced(results)
1225
+ res.index = res_index
1226
+ return res
1227
+ else:
1228
+ raise
1229
+
1230
+ if not isinstance(results[0], ABCSeries):
1231
+ if len(result.index) == len(self.res_columns):
1232
+ result.index = self.res_columns
1233
+
1234
+ if len(result.columns) == len(res_index):
1235
+ result.columns = res_index
1236
+
1237
+ return result
1238
+
1239
+
1240
+ class FrameColumnApply(FrameApply):
1241
+ axis: AxisInt = 1
1242
+
1243
+ def apply_broadcast(self, target: DataFrame) -> DataFrame:
1244
+ result = super().apply_broadcast(target.T)
1245
+ return result.T
1246
+
1247
+ @property
1248
+ def series_generator(self) -> Generator[Series, None, None]:
1249
+ values = self.values
1250
+ values = ensure_wrapped_if_datetimelike(values)
1251
+ assert len(values) > 0
1252
+
1253
+ # We create one Series object, and will swap out the data inside
1254
+ # of it. Kids: don't do this at home.
1255
+ ser = self.obj._ixs(0, axis=0)
1256
+ mgr = ser._mgr
1257
+
1258
+ is_view = mgr.blocks[0].refs.has_reference() # type: ignore[union-attr]
1259
+
1260
+ if isinstance(ser.dtype, ExtensionDtype):
1261
+ # values will be incorrect for this block
1262
+ # TODO(EA2D): special case would be unnecessary with 2D EAs
1263
+ obj = self.obj
1264
+ for i in range(len(obj)):
1265
+ yield obj._ixs(i, axis=0)
1266
+
1267
+ else:
1268
+ for arr, name in zip(values, self.index):
1269
+ # GH#35462 re-pin mgr in case setitem changed it
1270
+ ser._mgr = mgr
1271
+ mgr.set_values(arr)
1272
+ object.__setattr__(ser, "_name", name)
1273
+ if not is_view:
1274
+ # In apply_series_generator we store the a shallow copy of the
1275
+ # result, which potentially increases the ref count of this reused
1276
+ # `ser` object (depending on the result of the applied function)
1277
+ # -> if that happened and `ser` is already a copy, then we reset
1278
+ # the refs here to avoid triggering a unnecessary CoW inside the
1279
+ # applied function (https://github.com/pandas-dev/pandas/pull/56212)
1280
+ mgr.blocks[0].refs = BlockValuesRefs(mgr.blocks[0]) # type: ignore[union-attr]
1281
+ yield ser
1282
+
1283
+ @staticmethod
1284
+ @functools.cache
1285
+ def generate_numba_apply_func(
1286
+ func, nogil=True, nopython=True, parallel=False
1287
+ ) -> Callable[[npt.NDArray, Index, Index], dict[int, Any]]:
1288
+ numba = import_optional_dependency("numba")
1289
+ from pandas import Series
1290
+ from pandas.core._numba.extensions import maybe_cast_str
1291
+
1292
+ jitted_udf = numba.extending.register_jitable(func)
1293
+
1294
+ @numba.jit(nogil=nogil, nopython=nopython, parallel=parallel)
1295
+ def numba_func(values, col_names_index, index):
1296
+ results = {}
1297
+ # Currently the parallel argument doesn't get passed through here
1298
+ # (it's disabled) since the dicts in numba aren't thread-safe.
1299
+ for i in range(values.shape[0]):
1300
+ # Create the series
1301
+ # TODO: values corrupted without the copy
1302
+ ser = Series(
1303
+ values[i].copy(),
1304
+ index=col_names_index,
1305
+ name=maybe_cast_str(index[i]),
1306
+ )
1307
+ results[i] = jitted_udf(ser)
1308
+
1309
+ return results
1310
+
1311
+ return numba_func
1312
+
1313
+ def apply_with_numba(self) -> dict[int, Any]:
1314
+ nb_func = self.generate_numba_apply_func(
1315
+ cast(Callable, self.func), **self.engine_kwargs
1316
+ )
1317
+
1318
+ from pandas.core._numba.extensions import set_numba_data
1319
+
1320
+ # Convert from numba dict to regular dict
1321
+ # Our isinstance checks in the df constructor don't pass for numbas typed dict
1322
+ with set_numba_data(self.obj.index) as index, set_numba_data(
1323
+ self.columns
1324
+ ) as columns:
1325
+ res = dict(nb_func(self.values, columns, index))
1326
+
1327
+ return res
1328
+
1329
+ @property
1330
+ def result_index(self) -> Index:
1331
+ return self.index
1332
+
1333
+ @property
1334
+ def result_columns(self) -> Index:
1335
+ return self.columns
1336
+
1337
+ def wrap_results_for_axis(
1338
+ self, results: ResType, res_index: Index
1339
+ ) -> DataFrame | Series:
1340
+ """return the results for the columns"""
1341
+ result: DataFrame | Series
1342
+
1343
+ # we have requested to expand
1344
+ if self.result_type == "expand":
1345
+ result = self.infer_to_same_shape(results, res_index)
1346
+
1347
+ # we have a non-series and don't want inference
1348
+ elif not isinstance(results[0], ABCSeries):
1349
+ result = self.obj._constructor_sliced(results)
1350
+ result.index = res_index
1351
+
1352
+ # we may want to infer results
1353
+ else:
1354
+ result = self.infer_to_same_shape(results, res_index)
1355
+
1356
+ return result
1357
+
1358
+ def infer_to_same_shape(self, results: ResType, res_index: Index) -> DataFrame:
1359
+ """infer the results to the same shape as the input object"""
1360
+ result = self.obj._constructor(data=results)
1361
+ result = result.T
1362
+
1363
+ # set the index
1364
+ result.index = res_index
1365
+
1366
+ # infer dtypes
1367
+ result = result.infer_objects(copy=False)
1368
+
1369
+ return result
1370
+
1371
+
1372
+ class SeriesApply(NDFrameApply):
1373
+ obj: Series
1374
+ axis: AxisInt = 0
1375
+ by_row: Literal[False, "compat", "_compat"] # only relevant for apply()
1376
+
1377
+ def __init__(
1378
+ self,
1379
+ obj: Series,
1380
+ func: AggFuncType,
1381
+ *,
1382
+ convert_dtype: bool | lib.NoDefault = lib.no_default,
1383
+ by_row: Literal[False, "compat", "_compat"] = "compat",
1384
+ args,
1385
+ kwargs,
1386
+ ) -> None:
1387
+ if convert_dtype is lib.no_default:
1388
+ convert_dtype = True
1389
+ else:
1390
+ warnings.warn(
1391
+ "the convert_dtype parameter is deprecated and will be removed in a "
1392
+ "future version. Do ``ser.astype(object).apply()`` "
1393
+ "instead if you want ``convert_dtype=False``.",
1394
+ FutureWarning,
1395
+ stacklevel=find_stack_level(),
1396
+ )
1397
+ self.convert_dtype = convert_dtype
1398
+
1399
+ super().__init__(
1400
+ obj,
1401
+ func,
1402
+ raw=False,
1403
+ result_type=None,
1404
+ by_row=by_row,
1405
+ args=args,
1406
+ kwargs=kwargs,
1407
+ )
1408
+
1409
+ def apply(self) -> DataFrame | Series:
1410
+ obj = self.obj
1411
+
1412
+ if len(obj) == 0:
1413
+ return self.apply_empty_result()
1414
+
1415
+ # dispatch to handle list-like or dict-like
1416
+ if is_list_like(self.func):
1417
+ return self.apply_list_or_dict_like()
1418
+
1419
+ if isinstance(self.func, str):
1420
+ # if we are a string, try to dispatch
1421
+ return self.apply_str()
1422
+
1423
+ if self.by_row == "_compat":
1424
+ return self.apply_compat()
1425
+
1426
+ # self.func is Callable
1427
+ return self.apply_standard()
1428
+
1429
+ def agg(self):
1430
+ result = super().agg()
1431
+ if result is None:
1432
+ obj = self.obj
1433
+ func = self.func
1434
+ # string, list-like, and dict-like are entirely handled in super
1435
+ assert callable(func)
1436
+
1437
+ # GH53325: The setup below is just to keep current behavior while emitting a
1438
+ # deprecation message. In the future this will all be replaced with a simple
1439
+ # `result = f(self.obj, *self.args, **self.kwargs)`.
1440
+ try:
1441
+ result = obj.apply(func, args=self.args, **self.kwargs)
1442
+ except (ValueError, AttributeError, TypeError):
1443
+ result = func(obj, *self.args, **self.kwargs)
1444
+ else:
1445
+ msg = (
1446
+ f"using {func} in {type(obj).__name__}.agg cannot aggregate and "
1447
+ f"has been deprecated. Use {type(obj).__name__}.transform to "
1448
+ f"keep behavior unchanged."
1449
+ )
1450
+ warnings.warn(msg, FutureWarning, stacklevel=find_stack_level())
1451
+
1452
+ return result
1453
+
1454
+ def apply_empty_result(self) -> Series:
1455
+ obj = self.obj
1456
+ return obj._constructor(dtype=obj.dtype, index=obj.index).__finalize__(
1457
+ obj, method="apply"
1458
+ )
1459
+
1460
+ def apply_compat(self):
1461
+ """compat apply method for funcs in listlikes and dictlikes.
1462
+
1463
+ Used for each callable when giving listlikes and dictlikes of callables to
1464
+ apply. Needed for compatibility with Pandas < v2.1.
1465
+
1466
+ .. versionadded:: 2.1.0
1467
+ """
1468
+ obj = self.obj
1469
+ func = self.func
1470
+
1471
+ if callable(func):
1472
+ f = com.get_cython_func(func)
1473
+ if f and not self.args and not self.kwargs:
1474
+ return obj.apply(func, by_row=False)
1475
+
1476
+ try:
1477
+ result = obj.apply(func, by_row="compat")
1478
+ except (ValueError, AttributeError, TypeError):
1479
+ result = obj.apply(func, by_row=False)
1480
+ return result
1481
+
1482
+ def apply_standard(self) -> DataFrame | Series:
1483
+ # caller is responsible for ensuring that f is Callable
1484
+ func = cast(Callable, self.func)
1485
+ obj = self.obj
1486
+
1487
+ if isinstance(func, np.ufunc):
1488
+ with np.errstate(all="ignore"):
1489
+ return func(obj, *self.args, **self.kwargs)
1490
+ elif not self.by_row:
1491
+ return func(obj, *self.args, **self.kwargs)
1492
+
1493
+ if self.args or self.kwargs:
1494
+ # _map_values does not support args/kwargs
1495
+ def curried(x):
1496
+ return func(x, *self.args, **self.kwargs)
1497
+
1498
+ else:
1499
+ curried = func
1500
+
1501
+ # row-wise access
1502
+ # apply doesn't have a `na_action` keyword and for backward compat reasons
1503
+ # we need to give `na_action="ignore"` for categorical data.
1504
+ # TODO: remove the `na_action="ignore"` when that default has been changed in
1505
+ # Categorical (GH51645).
1506
+ action = "ignore" if isinstance(obj.dtype, CategoricalDtype) else None
1507
+ mapped = obj._map_values(
1508
+ mapper=curried, na_action=action, convert=self.convert_dtype
1509
+ )
1510
+
1511
+ if len(mapped) and isinstance(mapped[0], ABCSeries):
1512
+ # GH#43986 Need to do list(mapped) in order to get treated as nested
1513
+ # See also GH#25959 regarding EA support
1514
+ return obj._constructor_expanddim(list(mapped), index=obj.index)
1515
+ else:
1516
+ return obj._constructor(mapped, index=obj.index).__finalize__(
1517
+ obj, method="apply"
1518
+ )
1519
+
1520
+
1521
+ class GroupByApply(Apply):
1522
+ obj: GroupBy | Resampler | BaseWindow
1523
+
1524
+ def __init__(
1525
+ self,
1526
+ obj: GroupBy[NDFrameT],
1527
+ func: AggFuncType,
1528
+ *,
1529
+ args,
1530
+ kwargs,
1531
+ ) -> None:
1532
+ kwargs = kwargs.copy()
1533
+ self.axis = obj.obj._get_axis_number(kwargs.get("axis", 0))
1534
+ super().__init__(
1535
+ obj,
1536
+ func,
1537
+ raw=False,
1538
+ result_type=None,
1539
+ args=args,
1540
+ kwargs=kwargs,
1541
+ )
1542
+
1543
+ def apply(self):
1544
+ raise NotImplementedError
1545
+
1546
+ def transform(self):
1547
+ raise NotImplementedError
1548
+
1549
+ def agg_or_apply_list_like(
1550
+ self, op_name: Literal["agg", "apply"]
1551
+ ) -> DataFrame | Series:
1552
+ obj = self.obj
1553
+ kwargs = self.kwargs
1554
+ if op_name == "apply":
1555
+ kwargs = {**kwargs, "by_row": False}
1556
+
1557
+ if getattr(obj, "axis", 0) == 1:
1558
+ raise NotImplementedError("axis other than 0 is not supported")
1559
+
1560
+ if obj._selected_obj.ndim == 1:
1561
+ # For SeriesGroupBy this matches _obj_with_exclusions
1562
+ selected_obj = obj._selected_obj
1563
+ else:
1564
+ selected_obj = obj._obj_with_exclusions
1565
+
1566
+ # Only set as_index=True on groupby objects, not Window or Resample
1567
+ # that inherit from this class.
1568
+ with com.temp_setattr(
1569
+ obj, "as_index", True, condition=hasattr(obj, "as_index")
1570
+ ):
1571
+ keys, results = self.compute_list_like(op_name, selected_obj, kwargs)
1572
+ result = self.wrap_results_list_like(keys, results)
1573
+ return result
1574
+
1575
+ def agg_or_apply_dict_like(
1576
+ self, op_name: Literal["agg", "apply"]
1577
+ ) -> DataFrame | Series:
1578
+ from pandas.core.groupby.generic import (
1579
+ DataFrameGroupBy,
1580
+ SeriesGroupBy,
1581
+ )
1582
+
1583
+ assert op_name in ["agg", "apply"]
1584
+
1585
+ obj = self.obj
1586
+ kwargs = {}
1587
+ if op_name == "apply":
1588
+ by_row = "_compat" if self.by_row else False
1589
+ kwargs.update({"by_row": by_row})
1590
+
1591
+ if getattr(obj, "axis", 0) == 1:
1592
+ raise NotImplementedError("axis other than 0 is not supported")
1593
+
1594
+ selected_obj = obj._selected_obj
1595
+ selection = obj._selection
1596
+
1597
+ is_groupby = isinstance(obj, (DataFrameGroupBy, SeriesGroupBy))
1598
+
1599
+ # Numba Groupby engine/engine-kwargs passthrough
1600
+ if is_groupby:
1601
+ engine = self.kwargs.get("engine", None)
1602
+ engine_kwargs = self.kwargs.get("engine_kwargs", None)
1603
+ kwargs.update({"engine": engine, "engine_kwargs": engine_kwargs})
1604
+
1605
+ with com.temp_setattr(
1606
+ obj, "as_index", True, condition=hasattr(obj, "as_index")
1607
+ ):
1608
+ result_index, result_data = self.compute_dict_like(
1609
+ op_name, selected_obj, selection, kwargs
1610
+ )
1611
+ result = self.wrap_results_dict_like(selected_obj, result_index, result_data)
1612
+ return result
1613
+
1614
+
1615
+ class ResamplerWindowApply(GroupByApply):
1616
+ axis: AxisInt = 0
1617
+ obj: Resampler | BaseWindow
1618
+
1619
+ def __init__(
1620
+ self,
1621
+ obj: Resampler | BaseWindow,
1622
+ func: AggFuncType,
1623
+ *,
1624
+ args,
1625
+ kwargs,
1626
+ ) -> None:
1627
+ super(GroupByApply, self).__init__(
1628
+ obj,
1629
+ func,
1630
+ raw=False,
1631
+ result_type=None,
1632
+ args=args,
1633
+ kwargs=kwargs,
1634
+ )
1635
+
1636
+ def apply(self):
1637
+ raise NotImplementedError
1638
+
1639
+ def transform(self):
1640
+ raise NotImplementedError
1641
+
1642
+
1643
+ def reconstruct_func(
1644
+ func: AggFuncType | None, **kwargs
1645
+ ) -> tuple[bool, AggFuncType, tuple[str, ...] | None, npt.NDArray[np.intp] | None]:
1646
+ """
1647
+ This is the internal function to reconstruct func given if there is relabeling
1648
+ or not and also normalize the keyword to get new order of columns.
1649
+
1650
+ If named aggregation is applied, `func` will be None, and kwargs contains the
1651
+ column and aggregation function information to be parsed;
1652
+ If named aggregation is not applied, `func` is either string (e.g. 'min') or
1653
+ Callable, or list of them (e.g. ['min', np.max]), or the dictionary of column name
1654
+ and str/Callable/list of them (e.g. {'A': 'min'}, or {'A': [np.min, lambda x: x]})
1655
+
1656
+ If relabeling is True, will return relabeling, reconstructed func, column
1657
+ names, and the reconstructed order of columns.
1658
+ If relabeling is False, the columns and order will be None.
1659
+
1660
+ Parameters
1661
+ ----------
1662
+ func: agg function (e.g. 'min' or Callable) or list of agg functions
1663
+ (e.g. ['min', np.max]) or dictionary (e.g. {'A': ['min', np.max]}).
1664
+ **kwargs: dict, kwargs used in is_multi_agg_with_relabel and
1665
+ normalize_keyword_aggregation function for relabelling
1666
+
1667
+ Returns
1668
+ -------
1669
+ relabelling: bool, if there is relabelling or not
1670
+ func: normalized and mangled func
1671
+ columns: tuple of column names
1672
+ order: array of columns indices
1673
+
1674
+ Examples
1675
+ --------
1676
+ >>> reconstruct_func(None, **{"foo": ("col", "min")})
1677
+ (True, defaultdict(<class 'list'>, {'col': ['min']}), ('foo',), array([0]))
1678
+
1679
+ >>> reconstruct_func("min")
1680
+ (False, 'min', None, None)
1681
+ """
1682
+ relabeling = func is None and is_multi_agg_with_relabel(**kwargs)
1683
+ columns: tuple[str, ...] | None = None
1684
+ order: npt.NDArray[np.intp] | None = None
1685
+
1686
+ if not relabeling:
1687
+ if isinstance(func, list) and len(func) > len(set(func)):
1688
+ # GH 28426 will raise error if duplicated function names are used and
1689
+ # there is no reassigned name
1690
+ raise SpecificationError(
1691
+ "Function names must be unique if there is no new column names "
1692
+ "assigned"
1693
+ )
1694
+ if func is None:
1695
+ # nicer error message
1696
+ raise TypeError("Must provide 'func' or tuples of '(column, aggfunc).")
1697
+
1698
+ if relabeling:
1699
+ # error: Incompatible types in assignment (expression has type
1700
+ # "MutableMapping[Hashable, list[Callable[..., Any] | str]]", variable has type
1701
+ # "Callable[..., Any] | str | list[Callable[..., Any] | str] |
1702
+ # MutableMapping[Hashable, Callable[..., Any] | str | list[Callable[..., Any] |
1703
+ # str]] | None")
1704
+ func, columns, order = normalize_keyword_aggregation( # type: ignore[assignment]
1705
+ kwargs
1706
+ )
1707
+ assert func is not None
1708
+
1709
+ return relabeling, func, columns, order
1710
+
1711
+
1712
+ def is_multi_agg_with_relabel(**kwargs) -> bool:
1713
+ """
1714
+ Check whether kwargs passed to .agg look like multi-agg with relabeling.
1715
+
1716
+ Parameters
1717
+ ----------
1718
+ **kwargs : dict
1719
+
1720
+ Returns
1721
+ -------
1722
+ bool
1723
+
1724
+ Examples
1725
+ --------
1726
+ >>> is_multi_agg_with_relabel(a="max")
1727
+ False
1728
+ >>> is_multi_agg_with_relabel(a_max=("a", "max"), a_min=("a", "min"))
1729
+ True
1730
+ >>> is_multi_agg_with_relabel()
1731
+ False
1732
+ """
1733
+ return all(isinstance(v, tuple) and len(v) == 2 for v in kwargs.values()) and (
1734
+ len(kwargs) > 0
1735
+ )
1736
+
1737
+
1738
+ def normalize_keyword_aggregation(
1739
+ kwargs: dict,
1740
+ ) -> tuple[
1741
+ MutableMapping[Hashable, list[AggFuncTypeBase]],
1742
+ tuple[str, ...],
1743
+ npt.NDArray[np.intp],
1744
+ ]:
1745
+ """
1746
+ Normalize user-provided "named aggregation" kwargs.
1747
+ Transforms from the new ``Mapping[str, NamedAgg]`` style kwargs
1748
+ to the old Dict[str, List[scalar]]].
1749
+
1750
+ Parameters
1751
+ ----------
1752
+ kwargs : dict
1753
+
1754
+ Returns
1755
+ -------
1756
+ aggspec : dict
1757
+ The transformed kwargs.
1758
+ columns : tuple[str, ...]
1759
+ The user-provided keys.
1760
+ col_idx_order : List[int]
1761
+ List of columns indices.
1762
+
1763
+ Examples
1764
+ --------
1765
+ >>> normalize_keyword_aggregation({"output": ("input", "sum")})
1766
+ (defaultdict(<class 'list'>, {'input': ['sum']}), ('output',), array([0]))
1767
+ """
1768
+ from pandas.core.indexes.base import Index
1769
+
1770
+ # Normalize the aggregation functions as Mapping[column, List[func]],
1771
+ # process normally, then fixup the names.
1772
+ # TODO: aggspec type: typing.Dict[str, List[AggScalar]]
1773
+ aggspec = defaultdict(list)
1774
+ order = []
1775
+ columns, pairs = list(zip(*kwargs.items()))
1776
+
1777
+ for column, aggfunc in pairs:
1778
+ aggspec[column].append(aggfunc)
1779
+ order.append((column, com.get_callable_name(aggfunc) or aggfunc))
1780
+
1781
+ # uniquify aggfunc name if duplicated in order list
1782
+ uniquified_order = _make_unique_kwarg_list(order)
1783
+
1784
+ # GH 25719, due to aggspec will change the order of assigned columns in aggregation
1785
+ # uniquified_aggspec will store uniquified order list and will compare it with order
1786
+ # based on index
1787
+ aggspec_order = [
1788
+ (column, com.get_callable_name(aggfunc) or aggfunc)
1789
+ for column, aggfuncs in aggspec.items()
1790
+ for aggfunc in aggfuncs
1791
+ ]
1792
+ uniquified_aggspec = _make_unique_kwarg_list(aggspec_order)
1793
+
1794
+ # get the new index of columns by comparison
1795
+ col_idx_order = Index(uniquified_aggspec).get_indexer(uniquified_order)
1796
+ return aggspec, columns, col_idx_order
1797
+
1798
+
1799
+ def _make_unique_kwarg_list(
1800
+ seq: Sequence[tuple[Any, Any]]
1801
+ ) -> Sequence[tuple[Any, Any]]:
1802
+ """
1803
+ Uniquify aggfunc name of the pairs in the order list
1804
+
1805
+ Examples:
1806
+ --------
1807
+ >>> kwarg_list = [('a', '<lambda>'), ('a', '<lambda>'), ('b', '<lambda>')]
1808
+ >>> _make_unique_kwarg_list(kwarg_list)
1809
+ [('a', '<lambda>_0'), ('a', '<lambda>_1'), ('b', '<lambda>')]
1810
+ """
1811
+ return [
1812
+ (pair[0], f"{pair[1]}_{seq[:i].count(pair)}") if seq.count(pair) > 1 else pair
1813
+ for i, pair in enumerate(seq)
1814
+ ]
1815
+
1816
+
1817
+ def relabel_result(
1818
+ result: DataFrame | Series,
1819
+ func: dict[str, list[Callable | str]],
1820
+ columns: Iterable[Hashable],
1821
+ order: Iterable[int],
1822
+ ) -> dict[Hashable, Series]:
1823
+ """
1824
+ Internal function to reorder result if relabelling is True for
1825
+ dataframe.agg, and return the reordered result in dict.
1826
+
1827
+ Parameters:
1828
+ ----------
1829
+ result: Result from aggregation
1830
+ func: Dict of (column name, funcs)
1831
+ columns: New columns name for relabelling
1832
+ order: New order for relabelling
1833
+
1834
+ Examples
1835
+ --------
1836
+ >>> from pandas.core.apply import relabel_result
1837
+ >>> result = pd.DataFrame(
1838
+ ... {"A": [np.nan, 2, np.nan], "C": [6, np.nan, np.nan], "B": [np.nan, 4, 2.5]},
1839
+ ... index=["max", "mean", "min"]
1840
+ ... )
1841
+ >>> funcs = {"A": ["max"], "C": ["max"], "B": ["mean", "min"]}
1842
+ >>> columns = ("foo", "aab", "bar", "dat")
1843
+ >>> order = [0, 1, 2, 3]
1844
+ >>> result_in_dict = relabel_result(result, funcs, columns, order)
1845
+ >>> pd.DataFrame(result_in_dict, index=columns)
1846
+ A C B
1847
+ foo 2.0 NaN NaN
1848
+ aab NaN 6.0 NaN
1849
+ bar NaN NaN 4.0
1850
+ dat NaN NaN 2.5
1851
+ """
1852
+ from pandas.core.indexes.base import Index
1853
+
1854
+ reordered_indexes = [
1855
+ pair[0] for pair in sorted(zip(columns, order), key=lambda t: t[1])
1856
+ ]
1857
+ reordered_result_in_dict: dict[Hashable, Series] = {}
1858
+ idx = 0
1859
+
1860
+ reorder_mask = not isinstance(result, ABCSeries) and len(result.columns) > 1
1861
+ for col, fun in func.items():
1862
+ s = result[col].dropna()
1863
+
1864
+ # In the `_aggregate`, the callable names are obtained and used in `result`, and
1865
+ # these names are ordered alphabetically. e.g.
1866
+ # C2 C1
1867
+ # <lambda> 1 NaN
1868
+ # amax NaN 4.0
1869
+ # max NaN 4.0
1870
+ # sum 18.0 6.0
1871
+ # Therefore, the order of functions for each column could be shuffled
1872
+ # accordingly so need to get the callable name if it is not parsed names, and
1873
+ # reorder the aggregated result for each column.
1874
+ # e.g. if df.agg(c1=("C2", sum), c2=("C2", lambda x: min(x))), correct order is
1875
+ # [sum, <lambda>], but in `result`, it will be [<lambda>, sum], and we need to
1876
+ # reorder so that aggregated values map to their functions regarding the order.
1877
+
1878
+ # However there is only one column being used for aggregation, not need to
1879
+ # reorder since the index is not sorted, and keep as is in `funcs`, e.g.
1880
+ # A
1881
+ # min 1.0
1882
+ # mean 1.5
1883
+ # mean 1.5
1884
+ if reorder_mask:
1885
+ fun = [
1886
+ com.get_callable_name(f) if not isinstance(f, str) else f for f in fun
1887
+ ]
1888
+ col_idx_order = Index(s.index).get_indexer(fun)
1889
+ s = s.iloc[col_idx_order]
1890
+
1891
+ # assign the new user-provided "named aggregation" as index names, and reindex
1892
+ # it based on the whole user-provided names.
1893
+ s.index = reordered_indexes[idx : idx + len(fun)]
1894
+ reordered_result_in_dict[col] = s.reindex(columns, copy=False)
1895
+ idx = idx + len(fun)
1896
+ return reordered_result_in_dict
1897
+
1898
+
1899
+ def reconstruct_and_relabel_result(result, func, **kwargs) -> DataFrame | Series:
1900
+ from pandas import DataFrame
1901
+
1902
+ relabeling, func, columns, order = reconstruct_func(func, **kwargs)
1903
+
1904
+ if relabeling:
1905
+ # This is to keep the order to columns occurrence unchanged, and also
1906
+ # keep the order of new columns occurrence unchanged
1907
+
1908
+ # For the return values of reconstruct_func, if relabeling is
1909
+ # False, columns and order will be None.
1910
+ assert columns is not None
1911
+ assert order is not None
1912
+
1913
+ result_in_dict = relabel_result(result, func, columns, order)
1914
+ result = DataFrame(result_in_dict, index=columns)
1915
+
1916
+ return result
1917
+
1918
+
1919
+ # TODO: Can't use, because mypy doesn't like us setting __name__
1920
+ # error: "partial[Any]" has no attribute "__name__"
1921
+ # the type is:
1922
+ # typing.Sequence[Callable[..., ScalarResult]]
1923
+ # -> typing.Sequence[Callable[..., ScalarResult]]:
1924
+
1925
+
1926
+ def _managle_lambda_list(aggfuncs: Sequence[Any]) -> Sequence[Any]:
1927
+ """
1928
+ Possibly mangle a list of aggfuncs.
1929
+
1930
+ Parameters
1931
+ ----------
1932
+ aggfuncs : Sequence
1933
+
1934
+ Returns
1935
+ -------
1936
+ mangled: list-like
1937
+ A new AggSpec sequence, where lambdas have been converted
1938
+ to have unique names.
1939
+
1940
+ Notes
1941
+ -----
1942
+ If just one aggfunc is passed, the name will not be mangled.
1943
+ """
1944
+ if len(aggfuncs) <= 1:
1945
+ # don't mangle for .agg([lambda x: .])
1946
+ return aggfuncs
1947
+ i = 0
1948
+ mangled_aggfuncs = []
1949
+ for aggfunc in aggfuncs:
1950
+ if com.get_callable_name(aggfunc) == "<lambda>":
1951
+ aggfunc = partial(aggfunc)
1952
+ aggfunc.__name__ = f"<lambda_{i}>"
1953
+ i += 1
1954
+ mangled_aggfuncs.append(aggfunc)
1955
+
1956
+ return mangled_aggfuncs
1957
+
1958
+
1959
+ def maybe_mangle_lambdas(agg_spec: Any) -> Any:
1960
+ """
1961
+ Make new lambdas with unique names.
1962
+
1963
+ Parameters
1964
+ ----------
1965
+ agg_spec : Any
1966
+ An argument to GroupBy.agg.
1967
+ Non-dict-like `agg_spec` are pass through as is.
1968
+ For dict-like `agg_spec` a new spec is returned
1969
+ with name-mangled lambdas.
1970
+
1971
+ Returns
1972
+ -------
1973
+ mangled : Any
1974
+ Same type as the input.
1975
+
1976
+ Examples
1977
+ --------
1978
+ >>> maybe_mangle_lambdas('sum')
1979
+ 'sum'
1980
+ >>> maybe_mangle_lambdas([lambda: 1, lambda: 2]) # doctest: +SKIP
1981
+ [<function __main__.<lambda_0>,
1982
+ <function pandas...._make_lambda.<locals>.f(*args, **kwargs)>]
1983
+ """
1984
+ is_dict = is_dict_like(agg_spec)
1985
+ if not (is_dict or is_list_like(agg_spec)):
1986
+ return agg_spec
1987
+ mangled_aggspec = type(agg_spec)() # dict or OrderedDict
1988
+
1989
+ if is_dict:
1990
+ for key, aggfuncs in agg_spec.items():
1991
+ if is_list_like(aggfuncs) and not is_dict_like(aggfuncs):
1992
+ mangled_aggfuncs = _managle_lambda_list(aggfuncs)
1993
+ else:
1994
+ mangled_aggfuncs = aggfuncs
1995
+
1996
+ mangled_aggspec[key] = mangled_aggfuncs
1997
+ else:
1998
+ mangled_aggspec = _managle_lambda_list(agg_spec)
1999
+
2000
+ return mangled_aggspec
2001
+
2002
+
2003
+ def validate_func_kwargs(
2004
+ kwargs: dict,
2005
+ ) -> tuple[list[str], list[str | Callable[..., Any]]]:
2006
+ """
2007
+ Validates types of user-provided "named aggregation" kwargs.
2008
+ `TypeError` is raised if aggfunc is not `str` or callable.
2009
+
2010
+ Parameters
2011
+ ----------
2012
+ kwargs : dict
2013
+
2014
+ Returns
2015
+ -------
2016
+ columns : List[str]
2017
+ List of user-provided keys.
2018
+ func : List[Union[str, callable[...,Any]]]
2019
+ List of user-provided aggfuncs
2020
+
2021
+ Examples
2022
+ --------
2023
+ >>> validate_func_kwargs({'one': 'min', 'two': 'max'})
2024
+ (['one', 'two'], ['min', 'max'])
2025
+ """
2026
+ tuple_given_message = "func is expected but received {} in **kwargs."
2027
+ columns = list(kwargs)
2028
+ func = []
2029
+ for col_func in kwargs.values():
2030
+ if not (isinstance(col_func, str) or callable(col_func)):
2031
+ raise TypeError(tuple_given_message.format(type(col_func).__name__))
2032
+ func.append(col_func)
2033
+ if not columns:
2034
+ no_arg_message = "Must provide 'func' or named aggregation **kwargs."
2035
+ raise TypeError(no_arg_message)
2036
+ return columns, func
2037
+
2038
+
2039
+ def include_axis(op_name: Literal["agg", "apply"], colg: Series | DataFrame) -> bool:
2040
+ return isinstance(colg, ABCDataFrame) or (
2041
+ isinstance(colg, ABCSeries) and op_name == "agg"
2042
+ )
2043
+
2044
+
2045
+ def warn_alias_replacement(
2046
+ obj: AggObjType,
2047
+ func: Callable,
2048
+ alias: str,
2049
+ ) -> None:
2050
+ if alias.startswith("np."):
2051
+ full_alias = alias
2052
+ else:
2053
+ full_alias = f"{type(obj).__name__}.{alias}"
2054
+ alias = f'"{alias}"'
2055
+ warnings.warn(
2056
+ f"The provided callable {func} is currently using "
2057
+ f"{full_alias}. In a future version of pandas, "
2058
+ f"the provided callable will be used directly. To keep current "
2059
+ f"behavior pass the string {alias} instead.",
2060
+ category=FutureWarning,
2061
+ stacklevel=find_stack_level(),
2062
+ )
venv/lib/python3.10/site-packages/pandas/core/arraylike.py ADDED
@@ -0,0 +1,530 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ Methods that can be shared by many array-like classes or subclasses:
3
+ Series
4
+ Index
5
+ ExtensionArray
6
+ """
7
+ from __future__ import annotations
8
+
9
+ import operator
10
+ from typing import Any
11
+
12
+ import numpy as np
13
+
14
+ from pandas._libs import lib
15
+ from pandas._libs.ops_dispatch import maybe_dispatch_ufunc_to_dunder_op
16
+
17
+ from pandas.core.dtypes.generic import ABCNDFrame
18
+
19
+ from pandas.core import roperator
20
+ from pandas.core.construction import extract_array
21
+ from pandas.core.ops.common import unpack_zerodim_and_defer
22
+
23
+ REDUCTION_ALIASES = {
24
+ "maximum": "max",
25
+ "minimum": "min",
26
+ "add": "sum",
27
+ "multiply": "prod",
28
+ }
29
+
30
+
31
+ class OpsMixin:
32
+ # -------------------------------------------------------------
33
+ # Comparisons
34
+
35
+ def _cmp_method(self, other, op):
36
+ return NotImplemented
37
+
38
+ @unpack_zerodim_and_defer("__eq__")
39
+ def __eq__(self, other):
40
+ return self._cmp_method(other, operator.eq)
41
+
42
+ @unpack_zerodim_and_defer("__ne__")
43
+ def __ne__(self, other):
44
+ return self._cmp_method(other, operator.ne)
45
+
46
+ @unpack_zerodim_and_defer("__lt__")
47
+ def __lt__(self, other):
48
+ return self._cmp_method(other, operator.lt)
49
+
50
+ @unpack_zerodim_and_defer("__le__")
51
+ def __le__(self, other):
52
+ return self._cmp_method(other, operator.le)
53
+
54
+ @unpack_zerodim_and_defer("__gt__")
55
+ def __gt__(self, other):
56
+ return self._cmp_method(other, operator.gt)
57
+
58
+ @unpack_zerodim_and_defer("__ge__")
59
+ def __ge__(self, other):
60
+ return self._cmp_method(other, operator.ge)
61
+
62
+ # -------------------------------------------------------------
63
+ # Logical Methods
64
+
65
+ def _logical_method(self, other, op):
66
+ return NotImplemented
67
+
68
+ @unpack_zerodim_and_defer("__and__")
69
+ def __and__(self, other):
70
+ return self._logical_method(other, operator.and_)
71
+
72
+ @unpack_zerodim_and_defer("__rand__")
73
+ def __rand__(self, other):
74
+ return self._logical_method(other, roperator.rand_)
75
+
76
+ @unpack_zerodim_and_defer("__or__")
77
+ def __or__(self, other):
78
+ return self._logical_method(other, operator.or_)
79
+
80
+ @unpack_zerodim_and_defer("__ror__")
81
+ def __ror__(self, other):
82
+ return self._logical_method(other, roperator.ror_)
83
+
84
+ @unpack_zerodim_and_defer("__xor__")
85
+ def __xor__(self, other):
86
+ return self._logical_method(other, operator.xor)
87
+
88
+ @unpack_zerodim_and_defer("__rxor__")
89
+ def __rxor__(self, other):
90
+ return self._logical_method(other, roperator.rxor)
91
+
92
+ # -------------------------------------------------------------
93
+ # Arithmetic Methods
94
+
95
+ def _arith_method(self, other, op):
96
+ return NotImplemented
97
+
98
+ @unpack_zerodim_and_defer("__add__")
99
+ def __add__(self, other):
100
+ """
101
+ Get Addition of DataFrame and other, column-wise.
102
+
103
+ Equivalent to ``DataFrame.add(other)``.
104
+
105
+ Parameters
106
+ ----------
107
+ other : scalar, sequence, Series, dict or DataFrame
108
+ Object to be added to the DataFrame.
109
+
110
+ Returns
111
+ -------
112
+ DataFrame
113
+ The result of adding ``other`` to DataFrame.
114
+
115
+ See Also
116
+ --------
117
+ DataFrame.add : Add a DataFrame and another object, with option for index-
118
+ or column-oriented addition.
119
+
120
+ Examples
121
+ --------
122
+ >>> df = pd.DataFrame({'height': [1.5, 2.6], 'weight': [500, 800]},
123
+ ... index=['elk', 'moose'])
124
+ >>> df
125
+ height weight
126
+ elk 1.5 500
127
+ moose 2.6 800
128
+
129
+ Adding a scalar affects all rows and columns.
130
+
131
+ >>> df[['height', 'weight']] + 1.5
132
+ height weight
133
+ elk 3.0 501.5
134
+ moose 4.1 801.5
135
+
136
+ Each element of a list is added to a column of the DataFrame, in order.
137
+
138
+ >>> df[['height', 'weight']] + [0.5, 1.5]
139
+ height weight
140
+ elk 2.0 501.5
141
+ moose 3.1 801.5
142
+
143
+ Keys of a dictionary are aligned to the DataFrame, based on column names;
144
+ each value in the dictionary is added to the corresponding column.
145
+
146
+ >>> df[['height', 'weight']] + {'height': 0.5, 'weight': 1.5}
147
+ height weight
148
+ elk 2.0 501.5
149
+ moose 3.1 801.5
150
+
151
+ When `other` is a :class:`Series`, the index of `other` is aligned with the
152
+ columns of the DataFrame.
153
+
154
+ >>> s1 = pd.Series([0.5, 1.5], index=['weight', 'height'])
155
+ >>> df[['height', 'weight']] + s1
156
+ height weight
157
+ elk 3.0 500.5
158
+ moose 4.1 800.5
159
+
160
+ Even when the index of `other` is the same as the index of the DataFrame,
161
+ the :class:`Series` will not be reoriented. If index-wise alignment is desired,
162
+ :meth:`DataFrame.add` should be used with `axis='index'`.
163
+
164
+ >>> s2 = pd.Series([0.5, 1.5], index=['elk', 'moose'])
165
+ >>> df[['height', 'weight']] + s2
166
+ elk height moose weight
167
+ elk NaN NaN NaN NaN
168
+ moose NaN NaN NaN NaN
169
+
170
+ >>> df[['height', 'weight']].add(s2, axis='index')
171
+ height weight
172
+ elk 2.0 500.5
173
+ moose 4.1 801.5
174
+
175
+ When `other` is a :class:`DataFrame`, both columns names and the
176
+ index are aligned.
177
+
178
+ >>> other = pd.DataFrame({'height': [0.2, 0.4, 0.6]},
179
+ ... index=['elk', 'moose', 'deer'])
180
+ >>> df[['height', 'weight']] + other
181
+ height weight
182
+ deer NaN NaN
183
+ elk 1.7 NaN
184
+ moose 3.0 NaN
185
+ """
186
+ return self._arith_method(other, operator.add)
187
+
188
+ @unpack_zerodim_and_defer("__radd__")
189
+ def __radd__(self, other):
190
+ return self._arith_method(other, roperator.radd)
191
+
192
+ @unpack_zerodim_and_defer("__sub__")
193
+ def __sub__(self, other):
194
+ return self._arith_method(other, operator.sub)
195
+
196
+ @unpack_zerodim_and_defer("__rsub__")
197
+ def __rsub__(self, other):
198
+ return self._arith_method(other, roperator.rsub)
199
+
200
+ @unpack_zerodim_and_defer("__mul__")
201
+ def __mul__(self, other):
202
+ return self._arith_method(other, operator.mul)
203
+
204
+ @unpack_zerodim_and_defer("__rmul__")
205
+ def __rmul__(self, other):
206
+ return self._arith_method(other, roperator.rmul)
207
+
208
+ @unpack_zerodim_and_defer("__truediv__")
209
+ def __truediv__(self, other):
210
+ return self._arith_method(other, operator.truediv)
211
+
212
+ @unpack_zerodim_and_defer("__rtruediv__")
213
+ def __rtruediv__(self, other):
214
+ return self._arith_method(other, roperator.rtruediv)
215
+
216
+ @unpack_zerodim_and_defer("__floordiv__")
217
+ def __floordiv__(self, other):
218
+ return self._arith_method(other, operator.floordiv)
219
+
220
+ @unpack_zerodim_and_defer("__rfloordiv")
221
+ def __rfloordiv__(self, other):
222
+ return self._arith_method(other, roperator.rfloordiv)
223
+
224
+ @unpack_zerodim_and_defer("__mod__")
225
+ def __mod__(self, other):
226
+ return self._arith_method(other, operator.mod)
227
+
228
+ @unpack_zerodim_and_defer("__rmod__")
229
+ def __rmod__(self, other):
230
+ return self._arith_method(other, roperator.rmod)
231
+
232
+ @unpack_zerodim_and_defer("__divmod__")
233
+ def __divmod__(self, other):
234
+ return self._arith_method(other, divmod)
235
+
236
+ @unpack_zerodim_and_defer("__rdivmod__")
237
+ def __rdivmod__(self, other):
238
+ return self._arith_method(other, roperator.rdivmod)
239
+
240
+ @unpack_zerodim_and_defer("__pow__")
241
+ def __pow__(self, other):
242
+ return self._arith_method(other, operator.pow)
243
+
244
+ @unpack_zerodim_and_defer("__rpow__")
245
+ def __rpow__(self, other):
246
+ return self._arith_method(other, roperator.rpow)
247
+
248
+
249
+ # -----------------------------------------------------------------------------
250
+ # Helpers to implement __array_ufunc__
251
+
252
+
253
+ def array_ufunc(self, ufunc: np.ufunc, method: str, *inputs: Any, **kwargs: Any):
254
+ """
255
+ Compatibility with numpy ufuncs.
256
+
257
+ See also
258
+ --------
259
+ numpy.org/doc/stable/reference/arrays.classes.html#numpy.class.__array_ufunc__
260
+ """
261
+ from pandas.core.frame import (
262
+ DataFrame,
263
+ Series,
264
+ )
265
+ from pandas.core.generic import NDFrame
266
+ from pandas.core.internals import (
267
+ ArrayManager,
268
+ BlockManager,
269
+ )
270
+
271
+ cls = type(self)
272
+
273
+ kwargs = _standardize_out_kwarg(**kwargs)
274
+
275
+ # for binary ops, use our custom dunder methods
276
+ result = maybe_dispatch_ufunc_to_dunder_op(self, ufunc, method, *inputs, **kwargs)
277
+ if result is not NotImplemented:
278
+ return result
279
+
280
+ # Determine if we should defer.
281
+ no_defer = (
282
+ np.ndarray.__array_ufunc__,
283
+ cls.__array_ufunc__,
284
+ )
285
+
286
+ for item in inputs:
287
+ higher_priority = (
288
+ hasattr(item, "__array_priority__")
289
+ and item.__array_priority__ > self.__array_priority__
290
+ )
291
+ has_array_ufunc = (
292
+ hasattr(item, "__array_ufunc__")
293
+ and type(item).__array_ufunc__ not in no_defer
294
+ and not isinstance(item, self._HANDLED_TYPES)
295
+ )
296
+ if higher_priority or has_array_ufunc:
297
+ return NotImplemented
298
+
299
+ # align all the inputs.
300
+ types = tuple(type(x) for x in inputs)
301
+ alignable = [x for x, t in zip(inputs, types) if issubclass(t, NDFrame)]
302
+
303
+ if len(alignable) > 1:
304
+ # This triggers alignment.
305
+ # At the moment, there aren't any ufuncs with more than two inputs
306
+ # so this ends up just being x1.index | x2.index, but we write
307
+ # it to handle *args.
308
+ set_types = set(types)
309
+ if len(set_types) > 1 and {DataFrame, Series}.issubset(set_types):
310
+ # We currently don't handle ufunc(DataFrame, Series)
311
+ # well. Previously this raised an internal ValueError. We might
312
+ # support it someday, so raise a NotImplementedError.
313
+ raise NotImplementedError(
314
+ f"Cannot apply ufunc {ufunc} to mixed DataFrame and Series inputs."
315
+ )
316
+ axes = self.axes
317
+ for obj in alignable[1:]:
318
+ # this relies on the fact that we aren't handling mixed
319
+ # series / frame ufuncs.
320
+ for i, (ax1, ax2) in enumerate(zip(axes, obj.axes)):
321
+ axes[i] = ax1.union(ax2)
322
+
323
+ reconstruct_axes = dict(zip(self._AXIS_ORDERS, axes))
324
+ inputs = tuple(
325
+ x.reindex(**reconstruct_axes) if issubclass(t, NDFrame) else x
326
+ for x, t in zip(inputs, types)
327
+ )
328
+ else:
329
+ reconstruct_axes = dict(zip(self._AXIS_ORDERS, self.axes))
330
+
331
+ if self.ndim == 1:
332
+ names = [getattr(x, "name") for x in inputs if hasattr(x, "name")]
333
+ name = names[0] if len(set(names)) == 1 else None
334
+ reconstruct_kwargs = {"name": name}
335
+ else:
336
+ reconstruct_kwargs = {}
337
+
338
+ def reconstruct(result):
339
+ if ufunc.nout > 1:
340
+ # np.modf, np.frexp, np.divmod
341
+ return tuple(_reconstruct(x) for x in result)
342
+
343
+ return _reconstruct(result)
344
+
345
+ def _reconstruct(result):
346
+ if lib.is_scalar(result):
347
+ return result
348
+
349
+ if result.ndim != self.ndim:
350
+ if method == "outer":
351
+ raise NotImplementedError
352
+ return result
353
+ if isinstance(result, (BlockManager, ArrayManager)):
354
+ # we went through BlockManager.apply e.g. np.sqrt
355
+ result = self._constructor_from_mgr(result, axes=result.axes)
356
+ else:
357
+ # we converted an array, lost our axes
358
+ result = self._constructor(
359
+ result, **reconstruct_axes, **reconstruct_kwargs, copy=False
360
+ )
361
+ # TODO: When we support multiple values in __finalize__, this
362
+ # should pass alignable to `__finalize__` instead of self.
363
+ # Then `np.add(a, b)` would consider attrs from both a and b
364
+ # when a and b are NDFrames.
365
+ if len(alignable) == 1:
366
+ result = result.__finalize__(self)
367
+ return result
368
+
369
+ if "out" in kwargs:
370
+ # e.g. test_multiindex_get_loc
371
+ result = dispatch_ufunc_with_out(self, ufunc, method, *inputs, **kwargs)
372
+ return reconstruct(result)
373
+
374
+ if method == "reduce":
375
+ # e.g. test.series.test_ufunc.test_reduce
376
+ result = dispatch_reduction_ufunc(self, ufunc, method, *inputs, **kwargs)
377
+ if result is not NotImplemented:
378
+ return result
379
+
380
+ # We still get here with kwargs `axis` for e.g. np.maximum.accumulate
381
+ # and `dtype` and `keepdims` for np.ptp
382
+
383
+ if self.ndim > 1 and (len(inputs) > 1 or ufunc.nout > 1):
384
+ # Just give up on preserving types in the complex case.
385
+ # In theory we could preserve them for them.
386
+ # * nout>1 is doable if BlockManager.apply took nout and
387
+ # returned a Tuple[BlockManager].
388
+ # * len(inputs) > 1 is doable when we know that we have
389
+ # aligned blocks / dtypes.
390
+
391
+ # e.g. my_ufunc, modf, logaddexp, heaviside, subtract, add
392
+ inputs = tuple(np.asarray(x) for x in inputs)
393
+ # Note: we can't use default_array_ufunc here bc reindexing means
394
+ # that `self` may not be among `inputs`
395
+ result = getattr(ufunc, method)(*inputs, **kwargs)
396
+ elif self.ndim == 1:
397
+ # ufunc(series, ...)
398
+ inputs = tuple(extract_array(x, extract_numpy=True) for x in inputs)
399
+ result = getattr(ufunc, method)(*inputs, **kwargs)
400
+ else:
401
+ # ufunc(dataframe)
402
+ if method == "__call__" and not kwargs:
403
+ # for np.<ufunc>(..) calls
404
+ # kwargs cannot necessarily be handled block-by-block, so only
405
+ # take this path if there are no kwargs
406
+ mgr = inputs[0]._mgr
407
+ result = mgr.apply(getattr(ufunc, method))
408
+ else:
409
+ # otherwise specific ufunc methods (eg np.<ufunc>.accumulate(..))
410
+ # Those can have an axis keyword and thus can't be called block-by-block
411
+ result = default_array_ufunc(inputs[0], ufunc, method, *inputs, **kwargs)
412
+ # e.g. np.negative (only one reached), with "where" and "out" in kwargs
413
+
414
+ result = reconstruct(result)
415
+ return result
416
+
417
+
418
+ def _standardize_out_kwarg(**kwargs) -> dict:
419
+ """
420
+ If kwargs contain "out1" and "out2", replace that with a tuple "out"
421
+
422
+ np.divmod, np.modf, np.frexp can have either `out=(out1, out2)` or
423
+ `out1=out1, out2=out2)`
424
+ """
425
+ if "out" not in kwargs and "out1" in kwargs and "out2" in kwargs:
426
+ out1 = kwargs.pop("out1")
427
+ out2 = kwargs.pop("out2")
428
+ out = (out1, out2)
429
+ kwargs["out"] = out
430
+ return kwargs
431
+
432
+
433
+ def dispatch_ufunc_with_out(self, ufunc: np.ufunc, method: str, *inputs, **kwargs):
434
+ """
435
+ If we have an `out` keyword, then call the ufunc without `out` and then
436
+ set the result into the given `out`.
437
+ """
438
+
439
+ # Note: we assume _standardize_out_kwarg has already been called.
440
+ out = kwargs.pop("out")
441
+ where = kwargs.pop("where", None)
442
+
443
+ result = getattr(ufunc, method)(*inputs, **kwargs)
444
+
445
+ if result is NotImplemented:
446
+ return NotImplemented
447
+
448
+ if isinstance(result, tuple):
449
+ # i.e. np.divmod, np.modf, np.frexp
450
+ if not isinstance(out, tuple) or len(out) != len(result):
451
+ raise NotImplementedError
452
+
453
+ for arr, res in zip(out, result):
454
+ _assign_where(arr, res, where)
455
+
456
+ return out
457
+
458
+ if isinstance(out, tuple):
459
+ if len(out) == 1:
460
+ out = out[0]
461
+ else:
462
+ raise NotImplementedError
463
+
464
+ _assign_where(out, result, where)
465
+ return out
466
+
467
+
468
+ def _assign_where(out, result, where) -> None:
469
+ """
470
+ Set a ufunc result into 'out', masking with a 'where' argument if necessary.
471
+ """
472
+ if where is None:
473
+ # no 'where' arg passed to ufunc
474
+ out[:] = result
475
+ else:
476
+ np.putmask(out, where, result)
477
+
478
+
479
+ def default_array_ufunc(self, ufunc: np.ufunc, method: str, *inputs, **kwargs):
480
+ """
481
+ Fallback to the behavior we would get if we did not define __array_ufunc__.
482
+
483
+ Notes
484
+ -----
485
+ We are assuming that `self` is among `inputs`.
486
+ """
487
+ if not any(x is self for x in inputs):
488
+ raise NotImplementedError
489
+
490
+ new_inputs = [x if x is not self else np.asarray(x) for x in inputs]
491
+
492
+ return getattr(ufunc, method)(*new_inputs, **kwargs)
493
+
494
+
495
+ def dispatch_reduction_ufunc(self, ufunc: np.ufunc, method: str, *inputs, **kwargs):
496
+ """
497
+ Dispatch ufunc reductions to self's reduction methods.
498
+ """
499
+ assert method == "reduce"
500
+
501
+ if len(inputs) != 1 or inputs[0] is not self:
502
+ return NotImplemented
503
+
504
+ if ufunc.__name__ not in REDUCTION_ALIASES:
505
+ return NotImplemented
506
+
507
+ method_name = REDUCTION_ALIASES[ufunc.__name__]
508
+
509
+ # NB: we are assuming that min/max represent minimum/maximum methods,
510
+ # which would not be accurate for e.g. Timestamp.min
511
+ if not hasattr(self, method_name):
512
+ return NotImplemented
513
+
514
+ if self.ndim > 1:
515
+ if isinstance(self, ABCNDFrame):
516
+ # TODO: test cases where this doesn't hold, i.e. 2D DTA/TDA
517
+ kwargs["numeric_only"] = False
518
+
519
+ if "axis" not in kwargs:
520
+ # For DataFrame reductions we don't want the default axis=0
521
+ # Note: np.min is not a ufunc, but uses array_function_dispatch,
522
+ # so calls DataFrame.min (without ever getting here) with the np.min
523
+ # default of axis=None, which DataFrame.min catches and changes to axis=0.
524
+ # np.minimum.reduce(df) gets here bc axis is not in kwargs,
525
+ # so we set axis=0 to match the behaviorof np.minimum.reduce(df.values)
526
+ kwargs["axis"] = 0
527
+
528
+ # By default, numpy's reductions do not skip NaNs, so we have to
529
+ # pass skipna=False
530
+ return getattr(self, method_name)(skipna=False, **kwargs)
venv/lib/python3.10/site-packages/pandas/core/arrays/__init__.py ADDED
@@ -0,0 +1,43 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from pandas.core.arrays.arrow import ArrowExtensionArray
2
+ from pandas.core.arrays.base import (
3
+ ExtensionArray,
4
+ ExtensionOpsMixin,
5
+ ExtensionScalarOpsMixin,
6
+ )
7
+ from pandas.core.arrays.boolean import BooleanArray
8
+ from pandas.core.arrays.categorical import Categorical
9
+ from pandas.core.arrays.datetimes import DatetimeArray
10
+ from pandas.core.arrays.floating import FloatingArray
11
+ from pandas.core.arrays.integer import IntegerArray
12
+ from pandas.core.arrays.interval import IntervalArray
13
+ from pandas.core.arrays.masked import BaseMaskedArray
14
+ from pandas.core.arrays.numpy_ import NumpyExtensionArray
15
+ from pandas.core.arrays.period import (
16
+ PeriodArray,
17
+ period_array,
18
+ )
19
+ from pandas.core.arrays.sparse import SparseArray
20
+ from pandas.core.arrays.string_ import StringArray
21
+ from pandas.core.arrays.string_arrow import ArrowStringArray
22
+ from pandas.core.arrays.timedeltas import TimedeltaArray
23
+
24
+ __all__ = [
25
+ "ArrowExtensionArray",
26
+ "ExtensionArray",
27
+ "ExtensionOpsMixin",
28
+ "ExtensionScalarOpsMixin",
29
+ "ArrowStringArray",
30
+ "BaseMaskedArray",
31
+ "BooleanArray",
32
+ "Categorical",
33
+ "DatetimeArray",
34
+ "FloatingArray",
35
+ "IntegerArray",
36
+ "IntervalArray",
37
+ "NumpyExtensionArray",
38
+ "PeriodArray",
39
+ "period_array",
40
+ "SparseArray",
41
+ "StringArray",
42
+ "TimedeltaArray",
43
+ ]
venv/lib/python3.10/site-packages/pandas/core/arrays/_arrow_string_mixins.py ADDED
@@ -0,0 +1,84 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from __future__ import annotations
2
+
3
+ from typing import Literal
4
+
5
+ import numpy as np
6
+
7
+ from pandas.compat import pa_version_under10p1
8
+
9
+ if not pa_version_under10p1:
10
+ import pyarrow as pa
11
+ import pyarrow.compute as pc
12
+
13
+
14
+ class ArrowStringArrayMixin:
15
+ _pa_array = None
16
+
17
+ def __init__(self, *args, **kwargs) -> None:
18
+ raise NotImplementedError
19
+
20
+ def _str_pad(
21
+ self,
22
+ width: int,
23
+ side: Literal["left", "right", "both"] = "left",
24
+ fillchar: str = " ",
25
+ ):
26
+ if side == "left":
27
+ pa_pad = pc.utf8_lpad
28
+ elif side == "right":
29
+ pa_pad = pc.utf8_rpad
30
+ elif side == "both":
31
+ pa_pad = pc.utf8_center
32
+ else:
33
+ raise ValueError(
34
+ f"Invalid side: {side}. Side must be one of 'left', 'right', 'both'"
35
+ )
36
+ return type(self)(pa_pad(self._pa_array, width=width, padding=fillchar))
37
+
38
+ def _str_get(self, i: int):
39
+ lengths = pc.utf8_length(self._pa_array)
40
+ if i >= 0:
41
+ out_of_bounds = pc.greater_equal(i, lengths)
42
+ start = i
43
+ stop = i + 1
44
+ step = 1
45
+ else:
46
+ out_of_bounds = pc.greater(-i, lengths)
47
+ start = i
48
+ stop = i - 1
49
+ step = -1
50
+ not_out_of_bounds = pc.invert(out_of_bounds.fill_null(True))
51
+ selected = pc.utf8_slice_codeunits(
52
+ self._pa_array, start=start, stop=stop, step=step
53
+ )
54
+ null_value = pa.scalar(
55
+ None, type=self._pa_array.type # type: ignore[attr-defined]
56
+ )
57
+ result = pc.if_else(not_out_of_bounds, selected, null_value)
58
+ return type(self)(result)
59
+
60
+ def _str_slice_replace(
61
+ self, start: int | None = None, stop: int | None = None, repl: str | None = None
62
+ ):
63
+ if repl is None:
64
+ repl = ""
65
+ if start is None:
66
+ start = 0
67
+ if stop is None:
68
+ stop = np.iinfo(np.int64).max
69
+ return type(self)(pc.utf8_replace_slice(self._pa_array, start, stop, repl))
70
+
71
+ def _str_capitalize(self):
72
+ return type(self)(pc.utf8_capitalize(self._pa_array))
73
+
74
+ def _str_title(self):
75
+ return type(self)(pc.utf8_title(self._pa_array))
76
+
77
+ def _str_swapcase(self):
78
+ return type(self)(pc.utf8_swapcase(self._pa_array))
79
+
80
+ def _str_removesuffix(self, suffix: str):
81
+ ends_with = pc.ends_with(self._pa_array, pattern=suffix)
82
+ removed = pc.utf8_slice_codeunits(self._pa_array, 0, stop=-len(suffix))
83
+ result = pc.if_else(ends_with, removed, self._pa_array)
84
+ return type(self)(result)
venv/lib/python3.10/site-packages/pandas/core/arrays/_mixins.py ADDED
@@ -0,0 +1,547 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from __future__ import annotations
2
+
3
+ from functools import wraps
4
+ from typing import (
5
+ TYPE_CHECKING,
6
+ Any,
7
+ Literal,
8
+ cast,
9
+ overload,
10
+ )
11
+
12
+ import numpy as np
13
+
14
+ from pandas._libs import lib
15
+ from pandas._libs.arrays import NDArrayBacked
16
+ from pandas._libs.tslibs import is_supported_dtype
17
+ from pandas._typing import (
18
+ ArrayLike,
19
+ AxisInt,
20
+ Dtype,
21
+ F,
22
+ FillnaOptions,
23
+ PositionalIndexer2D,
24
+ PositionalIndexerTuple,
25
+ ScalarIndexer,
26
+ Self,
27
+ SequenceIndexer,
28
+ Shape,
29
+ TakeIndexer,
30
+ npt,
31
+ )
32
+ from pandas.errors import AbstractMethodError
33
+ from pandas.util._decorators import doc
34
+ from pandas.util._validators import (
35
+ validate_bool_kwarg,
36
+ validate_fillna_kwargs,
37
+ validate_insert_loc,
38
+ )
39
+
40
+ from pandas.core.dtypes.common import pandas_dtype
41
+ from pandas.core.dtypes.dtypes import (
42
+ DatetimeTZDtype,
43
+ ExtensionDtype,
44
+ PeriodDtype,
45
+ )
46
+ from pandas.core.dtypes.missing import array_equivalent
47
+
48
+ from pandas.core import missing
49
+ from pandas.core.algorithms import (
50
+ take,
51
+ unique,
52
+ value_counts_internal as value_counts,
53
+ )
54
+ from pandas.core.array_algos.quantile import quantile_with_mask
55
+ from pandas.core.array_algos.transforms import shift
56
+ from pandas.core.arrays.base import ExtensionArray
57
+ from pandas.core.construction import extract_array
58
+ from pandas.core.indexers import check_array_indexer
59
+ from pandas.core.sorting import nargminmax
60
+
61
+ if TYPE_CHECKING:
62
+ from collections.abc import Sequence
63
+
64
+ from pandas._typing import (
65
+ NumpySorter,
66
+ NumpyValueArrayLike,
67
+ )
68
+
69
+ from pandas import Series
70
+
71
+
72
+ def ravel_compat(meth: F) -> F:
73
+ """
74
+ Decorator to ravel a 2D array before passing it to a cython operation,
75
+ then reshape the result to our own shape.
76
+ """
77
+
78
+ @wraps(meth)
79
+ def method(self, *args, **kwargs):
80
+ if self.ndim == 1:
81
+ return meth(self, *args, **kwargs)
82
+
83
+ flags = self._ndarray.flags
84
+ flat = self.ravel("K")
85
+ result = meth(flat, *args, **kwargs)
86
+ order = "F" if flags.f_contiguous else "C"
87
+ return result.reshape(self.shape, order=order)
88
+
89
+ return cast(F, method)
90
+
91
+
92
+ class NDArrayBackedExtensionArray(NDArrayBacked, ExtensionArray):
93
+ """
94
+ ExtensionArray that is backed by a single NumPy ndarray.
95
+ """
96
+
97
+ _ndarray: np.ndarray
98
+
99
+ # scalar used to denote NA value inside our self._ndarray, e.g. -1
100
+ # for Categorical, iNaT for Period. Outside of object dtype,
101
+ # self.isna() should be exactly locations in self._ndarray with
102
+ # _internal_fill_value.
103
+ _internal_fill_value: Any
104
+
105
+ def _box_func(self, x):
106
+ """
107
+ Wrap numpy type in our dtype.type if necessary.
108
+ """
109
+ return x
110
+
111
+ def _validate_scalar(self, value):
112
+ # used by NDArrayBackedExtensionIndex.insert
113
+ raise AbstractMethodError(self)
114
+
115
+ # ------------------------------------------------------------------------
116
+
117
+ def view(self, dtype: Dtype | None = None) -> ArrayLike:
118
+ # We handle datetime64, datetime64tz, timedelta64, and period
119
+ # dtypes here. Everything else we pass through to the underlying
120
+ # ndarray.
121
+ if dtype is None or dtype is self.dtype:
122
+ return self._from_backing_data(self._ndarray)
123
+
124
+ if isinstance(dtype, type):
125
+ # we sometimes pass non-dtype objects, e.g np.ndarray;
126
+ # pass those through to the underlying ndarray
127
+ return self._ndarray.view(dtype)
128
+
129
+ dtype = pandas_dtype(dtype)
130
+ arr = self._ndarray
131
+
132
+ if isinstance(dtype, PeriodDtype):
133
+ cls = dtype.construct_array_type()
134
+ return cls(arr.view("i8"), dtype=dtype)
135
+ elif isinstance(dtype, DatetimeTZDtype):
136
+ dt_cls = dtype.construct_array_type()
137
+ dt64_values = arr.view(f"M8[{dtype.unit}]")
138
+ return dt_cls._simple_new(dt64_values, dtype=dtype)
139
+ elif lib.is_np_dtype(dtype, "M") and is_supported_dtype(dtype):
140
+ from pandas.core.arrays import DatetimeArray
141
+
142
+ dt64_values = arr.view(dtype)
143
+ return DatetimeArray._simple_new(dt64_values, dtype=dtype)
144
+
145
+ elif lib.is_np_dtype(dtype, "m") and is_supported_dtype(dtype):
146
+ from pandas.core.arrays import TimedeltaArray
147
+
148
+ td64_values = arr.view(dtype)
149
+ return TimedeltaArray._simple_new(td64_values, dtype=dtype)
150
+
151
+ # error: Argument "dtype" to "view" of "_ArrayOrScalarCommon" has incompatible
152
+ # type "Union[ExtensionDtype, dtype[Any]]"; expected "Union[dtype[Any], None,
153
+ # type, _SupportsDType, str, Union[Tuple[Any, int], Tuple[Any, Union[int,
154
+ # Sequence[int]]], List[Any], _DTypeDict, Tuple[Any, Any]]]"
155
+ return arr.view(dtype=dtype) # type: ignore[arg-type]
156
+
157
+ def take(
158
+ self,
159
+ indices: TakeIndexer,
160
+ *,
161
+ allow_fill: bool = False,
162
+ fill_value: Any = None,
163
+ axis: AxisInt = 0,
164
+ ) -> Self:
165
+ if allow_fill:
166
+ fill_value = self._validate_scalar(fill_value)
167
+
168
+ new_data = take(
169
+ self._ndarray,
170
+ indices,
171
+ allow_fill=allow_fill,
172
+ fill_value=fill_value,
173
+ axis=axis,
174
+ )
175
+ return self._from_backing_data(new_data)
176
+
177
+ # ------------------------------------------------------------------------
178
+
179
+ def equals(self, other) -> bool:
180
+ if type(self) is not type(other):
181
+ return False
182
+ if self.dtype != other.dtype:
183
+ return False
184
+ return bool(array_equivalent(self._ndarray, other._ndarray, dtype_equal=True))
185
+
186
+ @classmethod
187
+ def _from_factorized(cls, values, original):
188
+ assert values.dtype == original._ndarray.dtype
189
+ return original._from_backing_data(values)
190
+
191
+ def _values_for_argsort(self) -> np.ndarray:
192
+ return self._ndarray
193
+
194
+ def _values_for_factorize(self):
195
+ return self._ndarray, self._internal_fill_value
196
+
197
+ def _hash_pandas_object(
198
+ self, *, encoding: str, hash_key: str, categorize: bool
199
+ ) -> npt.NDArray[np.uint64]:
200
+ from pandas.core.util.hashing import hash_array
201
+
202
+ values = self._ndarray
203
+ return hash_array(
204
+ values, encoding=encoding, hash_key=hash_key, categorize=categorize
205
+ )
206
+
207
+ # Signature of "argmin" incompatible with supertype "ExtensionArray"
208
+ def argmin(self, axis: AxisInt = 0, skipna: bool = True): # type: ignore[override]
209
+ # override base class by adding axis keyword
210
+ validate_bool_kwarg(skipna, "skipna")
211
+ if not skipna and self._hasna:
212
+ raise NotImplementedError
213
+ return nargminmax(self, "argmin", axis=axis)
214
+
215
+ # Signature of "argmax" incompatible with supertype "ExtensionArray"
216
+ def argmax(self, axis: AxisInt = 0, skipna: bool = True): # type: ignore[override]
217
+ # override base class by adding axis keyword
218
+ validate_bool_kwarg(skipna, "skipna")
219
+ if not skipna and self._hasna:
220
+ raise NotImplementedError
221
+ return nargminmax(self, "argmax", axis=axis)
222
+
223
+ def unique(self) -> Self:
224
+ new_data = unique(self._ndarray)
225
+ return self._from_backing_data(new_data)
226
+
227
+ @classmethod
228
+ @doc(ExtensionArray._concat_same_type)
229
+ def _concat_same_type(
230
+ cls,
231
+ to_concat: Sequence[Self],
232
+ axis: AxisInt = 0,
233
+ ) -> Self:
234
+ if not lib.dtypes_all_equal([x.dtype for x in to_concat]):
235
+ dtypes = {str(x.dtype) for x in to_concat}
236
+ raise ValueError("to_concat must have the same dtype", dtypes)
237
+
238
+ return super()._concat_same_type(to_concat, axis=axis)
239
+
240
+ @doc(ExtensionArray.searchsorted)
241
+ def searchsorted(
242
+ self,
243
+ value: NumpyValueArrayLike | ExtensionArray,
244
+ side: Literal["left", "right"] = "left",
245
+ sorter: NumpySorter | None = None,
246
+ ) -> npt.NDArray[np.intp] | np.intp:
247
+ npvalue = self._validate_setitem_value(value)
248
+ return self._ndarray.searchsorted(npvalue, side=side, sorter=sorter)
249
+
250
+ @doc(ExtensionArray.shift)
251
+ def shift(self, periods: int = 1, fill_value=None):
252
+ # NB: shift is always along axis=0
253
+ axis = 0
254
+ fill_value = self._validate_scalar(fill_value)
255
+ new_values = shift(self._ndarray, periods, axis, fill_value)
256
+
257
+ return self._from_backing_data(new_values)
258
+
259
+ def __setitem__(self, key, value) -> None:
260
+ key = check_array_indexer(self, key)
261
+ value = self._validate_setitem_value(value)
262
+ self._ndarray[key] = value
263
+
264
+ def _validate_setitem_value(self, value):
265
+ return value
266
+
267
+ @overload
268
+ def __getitem__(self, key: ScalarIndexer) -> Any:
269
+ ...
270
+
271
+ @overload
272
+ def __getitem__(
273
+ self,
274
+ key: SequenceIndexer | PositionalIndexerTuple,
275
+ ) -> Self:
276
+ ...
277
+
278
+ def __getitem__(
279
+ self,
280
+ key: PositionalIndexer2D,
281
+ ) -> Self | Any:
282
+ if lib.is_integer(key):
283
+ # fast-path
284
+ result = self._ndarray[key]
285
+ if self.ndim == 1:
286
+ return self._box_func(result)
287
+ return self._from_backing_data(result)
288
+
289
+ # error: Incompatible types in assignment (expression has type "ExtensionArray",
290
+ # variable has type "Union[int, slice, ndarray]")
291
+ key = extract_array(key, extract_numpy=True) # type: ignore[assignment]
292
+ key = check_array_indexer(self, key)
293
+ result = self._ndarray[key]
294
+ if lib.is_scalar(result):
295
+ return self._box_func(result)
296
+
297
+ result = self._from_backing_data(result)
298
+ return result
299
+
300
+ def _fill_mask_inplace(
301
+ self, method: str, limit: int | None, mask: npt.NDArray[np.bool_]
302
+ ) -> None:
303
+ # (for now) when self.ndim == 2, we assume axis=0
304
+ func = missing.get_fill_func(method, ndim=self.ndim)
305
+ func(self._ndarray.T, limit=limit, mask=mask.T)
306
+
307
+ def _pad_or_backfill(
308
+ self,
309
+ *,
310
+ method: FillnaOptions,
311
+ limit: int | None = None,
312
+ limit_area: Literal["inside", "outside"] | None = None,
313
+ copy: bool = True,
314
+ ) -> Self:
315
+ mask = self.isna()
316
+ if mask.any():
317
+ # (for now) when self.ndim == 2, we assume axis=0
318
+ func = missing.get_fill_func(method, ndim=self.ndim)
319
+
320
+ npvalues = self._ndarray.T
321
+ if copy:
322
+ npvalues = npvalues.copy()
323
+ func(npvalues, limit=limit, limit_area=limit_area, mask=mask.T)
324
+ npvalues = npvalues.T
325
+
326
+ if copy:
327
+ new_values = self._from_backing_data(npvalues)
328
+ else:
329
+ new_values = self
330
+
331
+ else:
332
+ if copy:
333
+ new_values = self.copy()
334
+ else:
335
+ new_values = self
336
+ return new_values
337
+
338
+ @doc(ExtensionArray.fillna)
339
+ def fillna(
340
+ self, value=None, method=None, limit: int | None = None, copy: bool = True
341
+ ) -> Self:
342
+ value, method = validate_fillna_kwargs(
343
+ value, method, validate_scalar_dict_value=False
344
+ )
345
+
346
+ mask = self.isna()
347
+ # error: Argument 2 to "check_value_size" has incompatible type
348
+ # "ExtensionArray"; expected "ndarray"
349
+ value = missing.check_value_size(
350
+ value, mask, len(self) # type: ignore[arg-type]
351
+ )
352
+
353
+ if mask.any():
354
+ if method is not None:
355
+ # (for now) when self.ndim == 2, we assume axis=0
356
+ func = missing.get_fill_func(method, ndim=self.ndim)
357
+ npvalues = self._ndarray.T
358
+ if copy:
359
+ npvalues = npvalues.copy()
360
+ func(npvalues, limit=limit, mask=mask.T)
361
+ npvalues = npvalues.T
362
+
363
+ # TODO: NumpyExtensionArray didn't used to copy, need tests
364
+ # for this
365
+ new_values = self._from_backing_data(npvalues)
366
+ else:
367
+ # fill with value
368
+ if copy:
369
+ new_values = self.copy()
370
+ else:
371
+ new_values = self[:]
372
+ new_values[mask] = value
373
+ else:
374
+ # We validate the fill_value even if there is nothing to fill
375
+ if value is not None:
376
+ self._validate_setitem_value(value)
377
+
378
+ if not copy:
379
+ new_values = self[:]
380
+ else:
381
+ new_values = self.copy()
382
+ return new_values
383
+
384
+ # ------------------------------------------------------------------------
385
+ # Reductions
386
+
387
+ def _wrap_reduction_result(self, axis: AxisInt | None, result):
388
+ if axis is None or self.ndim == 1:
389
+ return self._box_func(result)
390
+ return self._from_backing_data(result)
391
+
392
+ # ------------------------------------------------------------------------
393
+ # __array_function__ methods
394
+
395
+ def _putmask(self, mask: npt.NDArray[np.bool_], value) -> None:
396
+ """
397
+ Analogue to np.putmask(self, mask, value)
398
+
399
+ Parameters
400
+ ----------
401
+ mask : np.ndarray[bool]
402
+ value : scalar or listlike
403
+
404
+ Raises
405
+ ------
406
+ TypeError
407
+ If value cannot be cast to self.dtype.
408
+ """
409
+ value = self._validate_setitem_value(value)
410
+
411
+ np.putmask(self._ndarray, mask, value)
412
+
413
+ def _where(self: Self, mask: npt.NDArray[np.bool_], value) -> Self:
414
+ """
415
+ Analogue to np.where(mask, self, value)
416
+
417
+ Parameters
418
+ ----------
419
+ mask : np.ndarray[bool]
420
+ value : scalar or listlike
421
+
422
+ Raises
423
+ ------
424
+ TypeError
425
+ If value cannot be cast to self.dtype.
426
+ """
427
+ value = self._validate_setitem_value(value)
428
+
429
+ res_values = np.where(mask, self._ndarray, value)
430
+ if res_values.dtype != self._ndarray.dtype:
431
+ raise AssertionError(
432
+ # GH#56410
433
+ "Something has gone wrong, please report a bug at "
434
+ "github.com/pandas-dev/pandas/"
435
+ )
436
+ return self._from_backing_data(res_values)
437
+
438
+ # ------------------------------------------------------------------------
439
+ # Index compat methods
440
+
441
+ def insert(self, loc: int, item) -> Self:
442
+ """
443
+ Make new ExtensionArray inserting new item at location. Follows
444
+ Python list.append semantics for negative values.
445
+
446
+ Parameters
447
+ ----------
448
+ loc : int
449
+ item : object
450
+
451
+ Returns
452
+ -------
453
+ type(self)
454
+ """
455
+ loc = validate_insert_loc(loc, len(self))
456
+
457
+ code = self._validate_scalar(item)
458
+
459
+ new_vals = np.concatenate(
460
+ (
461
+ self._ndarray[:loc],
462
+ np.asarray([code], dtype=self._ndarray.dtype),
463
+ self._ndarray[loc:],
464
+ )
465
+ )
466
+ return self._from_backing_data(new_vals)
467
+
468
+ # ------------------------------------------------------------------------
469
+ # Additional array methods
470
+ # These are not part of the EA API, but we implement them because
471
+ # pandas assumes they're there.
472
+
473
+ def value_counts(self, dropna: bool = True) -> Series:
474
+ """
475
+ Return a Series containing counts of unique values.
476
+
477
+ Parameters
478
+ ----------
479
+ dropna : bool, default True
480
+ Don't include counts of NA values.
481
+
482
+ Returns
483
+ -------
484
+ Series
485
+ """
486
+ if self.ndim != 1:
487
+ raise NotImplementedError
488
+
489
+ from pandas import (
490
+ Index,
491
+ Series,
492
+ )
493
+
494
+ if dropna:
495
+ # error: Unsupported operand type for ~ ("ExtensionArray")
496
+ values = self[~self.isna()]._ndarray # type: ignore[operator]
497
+ else:
498
+ values = self._ndarray
499
+
500
+ result = value_counts(values, sort=False, dropna=dropna)
501
+
502
+ index_arr = self._from_backing_data(np.asarray(result.index._data))
503
+ index = Index(index_arr, name=result.index.name)
504
+ return Series(result._values, index=index, name=result.name, copy=False)
505
+
506
+ def _quantile(
507
+ self,
508
+ qs: npt.NDArray[np.float64],
509
+ interpolation: str,
510
+ ) -> Self:
511
+ # TODO: disable for Categorical if not ordered?
512
+
513
+ mask = np.asarray(self.isna())
514
+ arr = self._ndarray
515
+ fill_value = self._internal_fill_value
516
+
517
+ res_values = quantile_with_mask(arr, mask, fill_value, qs, interpolation)
518
+
519
+ res_values = self._cast_quantile_result(res_values)
520
+ return self._from_backing_data(res_values)
521
+
522
+ # TODO: see if we can share this with other dispatch-wrapping methods
523
+ def _cast_quantile_result(self, res_values: np.ndarray) -> np.ndarray:
524
+ """
525
+ Cast the result of quantile_with_mask to an appropriate dtype
526
+ to pass to _from_backing_data in _quantile.
527
+ """
528
+ return res_values
529
+
530
+ # ------------------------------------------------------------------------
531
+ # numpy-like methods
532
+
533
+ @classmethod
534
+ def _empty(cls, shape: Shape, dtype: ExtensionDtype) -> Self:
535
+ """
536
+ Analogous to np.empty(shape, dtype=dtype)
537
+
538
+ Parameters
539
+ ----------
540
+ shape : tuple[int]
541
+ dtype : ExtensionDtype
542
+ """
543
+ # The base implementation uses a naive approach to find the dtype
544
+ # for the backing ndarray
545
+ arr = cls._from_sequence([], dtype=dtype)
546
+ backing = np.empty(shape, dtype=arr._ndarray.dtype)
547
+ return arr._from_backing_data(backing)
venv/lib/python3.10/site-packages/pandas/core/arrays/_ranges.py ADDED
@@ -0,0 +1,207 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ Helper functions to generate range-like data for DatetimeArray
3
+ (and possibly TimedeltaArray/PeriodArray)
4
+ """
5
+ from __future__ import annotations
6
+
7
+ from typing import TYPE_CHECKING
8
+
9
+ import numpy as np
10
+
11
+ from pandas._libs.lib import i8max
12
+ from pandas._libs.tslibs import (
13
+ BaseOffset,
14
+ OutOfBoundsDatetime,
15
+ Timedelta,
16
+ Timestamp,
17
+ iNaT,
18
+ )
19
+
20
+ if TYPE_CHECKING:
21
+ from pandas._typing import npt
22
+
23
+
24
+ def generate_regular_range(
25
+ start: Timestamp | Timedelta | None,
26
+ end: Timestamp | Timedelta | None,
27
+ periods: int | None,
28
+ freq: BaseOffset,
29
+ unit: str = "ns",
30
+ ) -> npt.NDArray[np.intp]:
31
+ """
32
+ Generate a range of dates or timestamps with the spans between dates
33
+ described by the given `freq` DateOffset.
34
+
35
+ Parameters
36
+ ----------
37
+ start : Timedelta, Timestamp or None
38
+ First point of produced date range.
39
+ end : Timedelta, Timestamp or None
40
+ Last point of produced date range.
41
+ periods : int or None
42
+ Number of periods in produced date range.
43
+ freq : Tick
44
+ Describes space between dates in produced date range.
45
+ unit : str, default "ns"
46
+ The resolution the output is meant to represent.
47
+
48
+ Returns
49
+ -------
50
+ ndarray[np.int64]
51
+ Representing the given resolution.
52
+ """
53
+ istart = start._value if start is not None else None
54
+ iend = end._value if end is not None else None
55
+ freq.nanos # raises if non-fixed frequency
56
+ td = Timedelta(freq)
57
+ b: int
58
+ e: int
59
+ try:
60
+ td = td.as_unit(unit, round_ok=False)
61
+ except ValueError as err:
62
+ raise ValueError(
63
+ f"freq={freq} is incompatible with unit={unit}. "
64
+ "Use a lower freq or a higher unit instead."
65
+ ) from err
66
+ stride = int(td._value)
67
+
68
+ if periods is None and istart is not None and iend is not None:
69
+ b = istart
70
+ # cannot just use e = Timestamp(end) + 1 because arange breaks when
71
+ # stride is too large, see GH10887
72
+ e = b + (iend - b) // stride * stride + stride // 2 + 1
73
+ elif istart is not None and periods is not None:
74
+ b = istart
75
+ e = _generate_range_overflow_safe(b, periods, stride, side="start")
76
+ elif iend is not None and periods is not None:
77
+ e = iend + stride
78
+ b = _generate_range_overflow_safe(e, periods, stride, side="end")
79
+ else:
80
+ raise ValueError(
81
+ "at least 'start' or 'end' should be specified if a 'period' is given."
82
+ )
83
+
84
+ with np.errstate(over="raise"):
85
+ # If the range is sufficiently large, np.arange may overflow
86
+ # and incorrectly return an empty array if not caught.
87
+ try:
88
+ values = np.arange(b, e, stride, dtype=np.int64)
89
+ except FloatingPointError:
90
+ xdr = [b]
91
+ while xdr[-1] != e:
92
+ xdr.append(xdr[-1] + stride)
93
+ values = np.array(xdr[:-1], dtype=np.int64)
94
+ return values
95
+
96
+
97
+ def _generate_range_overflow_safe(
98
+ endpoint: int, periods: int, stride: int, side: str = "start"
99
+ ) -> int:
100
+ """
101
+ Calculate the second endpoint for passing to np.arange, checking
102
+ to avoid an integer overflow. Catch OverflowError and re-raise
103
+ as OutOfBoundsDatetime.
104
+
105
+ Parameters
106
+ ----------
107
+ endpoint : int
108
+ nanosecond timestamp of the known endpoint of the desired range
109
+ periods : int
110
+ number of periods in the desired range
111
+ stride : int
112
+ nanoseconds between periods in the desired range
113
+ side : {'start', 'end'}
114
+ which end of the range `endpoint` refers to
115
+
116
+ Returns
117
+ -------
118
+ other_end : int
119
+
120
+ Raises
121
+ ------
122
+ OutOfBoundsDatetime
123
+ """
124
+ # GH#14187 raise instead of incorrectly wrapping around
125
+ assert side in ["start", "end"]
126
+
127
+ i64max = np.uint64(i8max)
128
+ msg = f"Cannot generate range with {side}={endpoint} and periods={periods}"
129
+
130
+ with np.errstate(over="raise"):
131
+ # if periods * strides cannot be multiplied within the *uint64* bounds,
132
+ # we cannot salvage the operation by recursing, so raise
133
+ try:
134
+ addend = np.uint64(periods) * np.uint64(np.abs(stride))
135
+ except FloatingPointError as err:
136
+ raise OutOfBoundsDatetime(msg) from err
137
+
138
+ if np.abs(addend) <= i64max:
139
+ # relatively easy case without casting concerns
140
+ return _generate_range_overflow_safe_signed(endpoint, periods, stride, side)
141
+
142
+ elif (endpoint > 0 and side == "start" and stride > 0) or (
143
+ endpoint < 0 < stride and side == "end"
144
+ ):
145
+ # no chance of not-overflowing
146
+ raise OutOfBoundsDatetime(msg)
147
+
148
+ elif side == "end" and endpoint - stride <= i64max < endpoint:
149
+ # in _generate_regular_range we added `stride` thereby overflowing
150
+ # the bounds. Adjust to fix this.
151
+ return _generate_range_overflow_safe(
152
+ endpoint - stride, periods - 1, stride, side
153
+ )
154
+
155
+ # split into smaller pieces
156
+ mid_periods = periods // 2
157
+ remaining = periods - mid_periods
158
+ assert 0 < remaining < periods, (remaining, periods, endpoint, stride)
159
+
160
+ midpoint = int(_generate_range_overflow_safe(endpoint, mid_periods, stride, side))
161
+ return _generate_range_overflow_safe(midpoint, remaining, stride, side)
162
+
163
+
164
+ def _generate_range_overflow_safe_signed(
165
+ endpoint: int, periods: int, stride: int, side: str
166
+ ) -> int:
167
+ """
168
+ A special case for _generate_range_overflow_safe where `periods * stride`
169
+ can be calculated without overflowing int64 bounds.
170
+ """
171
+ assert side in ["start", "end"]
172
+ if side == "end":
173
+ stride *= -1
174
+
175
+ with np.errstate(over="raise"):
176
+ addend = np.int64(periods) * np.int64(stride)
177
+ try:
178
+ # easy case with no overflows
179
+ result = np.int64(endpoint) + addend
180
+ if result == iNaT:
181
+ # Putting this into a DatetimeArray/TimedeltaArray
182
+ # would incorrectly be interpreted as NaT
183
+ raise OverflowError
184
+ return int(result)
185
+ except (FloatingPointError, OverflowError):
186
+ # with endpoint negative and addend positive we risk
187
+ # FloatingPointError; with reversed signed we risk OverflowError
188
+ pass
189
+
190
+ # if stride and endpoint had opposite signs, then endpoint + addend
191
+ # should never overflow. so they must have the same signs
192
+ assert (stride > 0 and endpoint >= 0) or (stride < 0 and endpoint <= 0)
193
+
194
+ if stride > 0:
195
+ # watch out for very special case in which we just slightly
196
+ # exceed implementation bounds, but when passing the result to
197
+ # np.arange will get a result slightly within the bounds
198
+
199
+ uresult = np.uint64(endpoint) + np.uint64(addend)
200
+ i64max = np.uint64(i8max)
201
+ assert uresult > i64max
202
+ if uresult <= i64max + np.uint64(stride):
203
+ return int(uresult)
204
+
205
+ raise OutOfBoundsDatetime(
206
+ f"Cannot generate range with {side}={endpoint} and periods={periods}"
207
+ )
venv/lib/python3.10/site-packages/pandas/core/arrays/_utils.py ADDED
@@ -0,0 +1,63 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from __future__ import annotations
2
+
3
+ from typing import (
4
+ TYPE_CHECKING,
5
+ Any,
6
+ )
7
+
8
+ import numpy as np
9
+
10
+ from pandas._libs import lib
11
+ from pandas.errors import LossySetitemError
12
+
13
+ from pandas.core.dtypes.cast import np_can_hold_element
14
+ from pandas.core.dtypes.common import is_numeric_dtype
15
+
16
+ if TYPE_CHECKING:
17
+ from pandas._typing import (
18
+ ArrayLike,
19
+ npt,
20
+ )
21
+
22
+
23
+ def to_numpy_dtype_inference(
24
+ arr: ArrayLike, dtype: npt.DTypeLike | None, na_value, hasna: bool
25
+ ) -> tuple[npt.DTypeLike, Any]:
26
+ if dtype is None and is_numeric_dtype(arr.dtype):
27
+ dtype_given = False
28
+ if hasna:
29
+ if arr.dtype.kind == "b":
30
+ dtype = np.dtype(np.object_)
31
+ else:
32
+ if arr.dtype.kind in "iu":
33
+ dtype = np.dtype(np.float64)
34
+ else:
35
+ dtype = arr.dtype.numpy_dtype # type: ignore[union-attr]
36
+ if na_value is lib.no_default:
37
+ na_value = np.nan
38
+ else:
39
+ dtype = arr.dtype.numpy_dtype # type: ignore[union-attr]
40
+ elif dtype is not None:
41
+ dtype = np.dtype(dtype)
42
+ dtype_given = True
43
+ else:
44
+ dtype_given = True
45
+
46
+ if na_value is lib.no_default:
47
+ if dtype is None or not hasna:
48
+ na_value = arr.dtype.na_value
49
+ elif dtype.kind == "f": # type: ignore[union-attr]
50
+ na_value = np.nan
51
+ elif dtype.kind == "M": # type: ignore[union-attr]
52
+ na_value = np.datetime64("nat")
53
+ elif dtype.kind == "m": # type: ignore[union-attr]
54
+ na_value = np.timedelta64("nat")
55
+ else:
56
+ na_value = arr.dtype.na_value
57
+
58
+ if not dtype_given and hasna:
59
+ try:
60
+ np_can_hold_element(dtype, na_value) # type: ignore[arg-type]
61
+ except LossySetitemError:
62
+ dtype = np.dtype(np.object_)
63
+ return dtype, na_value
venv/lib/python3.10/site-packages/pandas/core/arrays/base.py ADDED
@@ -0,0 +1,2588 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ An interface for extending pandas with custom arrays.
3
+
4
+ .. warning::
5
+
6
+ This is an experimental API and subject to breaking changes
7
+ without warning.
8
+ """
9
+ from __future__ import annotations
10
+
11
+ import operator
12
+ from typing import (
13
+ TYPE_CHECKING,
14
+ Any,
15
+ Callable,
16
+ ClassVar,
17
+ Literal,
18
+ cast,
19
+ overload,
20
+ )
21
+ import warnings
22
+
23
+ import numpy as np
24
+
25
+ from pandas._libs import (
26
+ algos as libalgos,
27
+ lib,
28
+ )
29
+ from pandas.compat import set_function_name
30
+ from pandas.compat.numpy import function as nv
31
+ from pandas.errors import AbstractMethodError
32
+ from pandas.util._decorators import (
33
+ Appender,
34
+ Substitution,
35
+ cache_readonly,
36
+ )
37
+ from pandas.util._exceptions import find_stack_level
38
+ from pandas.util._validators import (
39
+ validate_bool_kwarg,
40
+ validate_fillna_kwargs,
41
+ validate_insert_loc,
42
+ )
43
+
44
+ from pandas.core.dtypes.cast import maybe_cast_pointwise_result
45
+ from pandas.core.dtypes.common import (
46
+ is_list_like,
47
+ is_scalar,
48
+ pandas_dtype,
49
+ )
50
+ from pandas.core.dtypes.dtypes import ExtensionDtype
51
+ from pandas.core.dtypes.generic import (
52
+ ABCDataFrame,
53
+ ABCIndex,
54
+ ABCSeries,
55
+ )
56
+ from pandas.core.dtypes.missing import isna
57
+
58
+ from pandas.core import (
59
+ arraylike,
60
+ missing,
61
+ roperator,
62
+ )
63
+ from pandas.core.algorithms import (
64
+ duplicated,
65
+ factorize_array,
66
+ isin,
67
+ map_array,
68
+ mode,
69
+ rank,
70
+ unique,
71
+ )
72
+ from pandas.core.array_algos.quantile import quantile_with_mask
73
+ from pandas.core.missing import _fill_limit_area_1d
74
+ from pandas.core.sorting import (
75
+ nargminmax,
76
+ nargsort,
77
+ )
78
+
79
+ if TYPE_CHECKING:
80
+ from collections.abc import (
81
+ Iterator,
82
+ Sequence,
83
+ )
84
+
85
+ from pandas._typing import (
86
+ ArrayLike,
87
+ AstypeArg,
88
+ AxisInt,
89
+ Dtype,
90
+ DtypeObj,
91
+ FillnaOptions,
92
+ InterpolateOptions,
93
+ NumpySorter,
94
+ NumpyValueArrayLike,
95
+ PositionalIndexer,
96
+ ScalarIndexer,
97
+ Self,
98
+ SequenceIndexer,
99
+ Shape,
100
+ SortKind,
101
+ TakeIndexer,
102
+ npt,
103
+ )
104
+
105
+ from pandas import Index
106
+
107
+ _extension_array_shared_docs: dict[str, str] = {}
108
+
109
+
110
+ class ExtensionArray:
111
+ """
112
+ Abstract base class for custom 1-D array types.
113
+
114
+ pandas will recognize instances of this class as proper arrays
115
+ with a custom type and will not attempt to coerce them to objects. They
116
+ may be stored directly inside a :class:`DataFrame` or :class:`Series`.
117
+
118
+ Attributes
119
+ ----------
120
+ dtype
121
+ nbytes
122
+ ndim
123
+ shape
124
+
125
+ Methods
126
+ -------
127
+ argsort
128
+ astype
129
+ copy
130
+ dropna
131
+ duplicated
132
+ factorize
133
+ fillna
134
+ equals
135
+ insert
136
+ interpolate
137
+ isin
138
+ isna
139
+ ravel
140
+ repeat
141
+ searchsorted
142
+ shift
143
+ take
144
+ tolist
145
+ unique
146
+ view
147
+ _accumulate
148
+ _concat_same_type
149
+ _explode
150
+ _formatter
151
+ _from_factorized
152
+ _from_sequence
153
+ _from_sequence_of_strings
154
+ _hash_pandas_object
155
+ _pad_or_backfill
156
+ _reduce
157
+ _values_for_argsort
158
+ _values_for_factorize
159
+
160
+ Notes
161
+ -----
162
+ The interface includes the following abstract methods that must be
163
+ implemented by subclasses:
164
+
165
+ * _from_sequence
166
+ * _from_factorized
167
+ * __getitem__
168
+ * __len__
169
+ * __eq__
170
+ * dtype
171
+ * nbytes
172
+ * isna
173
+ * take
174
+ * copy
175
+ * _concat_same_type
176
+ * interpolate
177
+
178
+ A default repr displaying the type, (truncated) data, length,
179
+ and dtype is provided. It can be customized or replaced by
180
+ by overriding:
181
+
182
+ * __repr__ : A default repr for the ExtensionArray.
183
+ * _formatter : Print scalars inside a Series or DataFrame.
184
+
185
+ Some methods require casting the ExtensionArray to an ndarray of Python
186
+ objects with ``self.astype(object)``, which may be expensive. When
187
+ performance is a concern, we highly recommend overriding the following
188
+ methods:
189
+
190
+ * fillna
191
+ * _pad_or_backfill
192
+ * dropna
193
+ * unique
194
+ * factorize / _values_for_factorize
195
+ * argsort, argmax, argmin / _values_for_argsort
196
+ * searchsorted
197
+ * map
198
+
199
+ The remaining methods implemented on this class should be performant,
200
+ as they only compose abstract methods. Still, a more efficient
201
+ implementation may be available, and these methods can be overridden.
202
+
203
+ One can implement methods to handle array accumulations or reductions.
204
+
205
+ * _accumulate
206
+ * _reduce
207
+
208
+ One can implement methods to handle parsing from strings that will be used
209
+ in methods such as ``pandas.io.parsers.read_csv``.
210
+
211
+ * _from_sequence_of_strings
212
+
213
+ This class does not inherit from 'abc.ABCMeta' for performance reasons.
214
+ Methods and properties required by the interface raise
215
+ ``pandas.errors.AbstractMethodError`` and no ``register`` method is
216
+ provided for registering virtual subclasses.
217
+
218
+ ExtensionArrays are limited to 1 dimension.
219
+
220
+ They may be backed by none, one, or many NumPy arrays. For example,
221
+ ``pandas.Categorical`` is an extension array backed by two arrays,
222
+ one for codes and one for categories. An array of IPv6 address may
223
+ be backed by a NumPy structured array with two fields, one for the
224
+ lower 64 bits and one for the upper 64 bits. Or they may be backed
225
+ by some other storage type, like Python lists. Pandas makes no
226
+ assumptions on how the data are stored, just that it can be converted
227
+ to a NumPy array.
228
+ The ExtensionArray interface does not impose any rules on how this data
229
+ is stored. However, currently, the backing data cannot be stored in
230
+ attributes called ``.values`` or ``._values`` to ensure full compatibility
231
+ with pandas internals. But other names as ``.data``, ``._data``,
232
+ ``._items``, ... can be freely used.
233
+
234
+ If implementing NumPy's ``__array_ufunc__`` interface, pandas expects
235
+ that
236
+
237
+ 1. You defer by returning ``NotImplemented`` when any Series are present
238
+ in `inputs`. Pandas will extract the arrays and call the ufunc again.
239
+ 2. You define a ``_HANDLED_TYPES`` tuple as an attribute on the class.
240
+ Pandas inspect this to determine whether the ufunc is valid for the
241
+ types present.
242
+
243
+ See :ref:`extending.extension.ufunc` for more.
244
+
245
+ By default, ExtensionArrays are not hashable. Immutable subclasses may
246
+ override this behavior.
247
+
248
+ Examples
249
+ --------
250
+ Please see the following:
251
+
252
+ https://github.com/pandas-dev/pandas/blob/main/pandas/tests/extension/list/array.py
253
+ """
254
+
255
+ # '_typ' is for pandas.core.dtypes.generic.ABCExtensionArray.
256
+ # Don't override this.
257
+ _typ = "extension"
258
+
259
+ # similar to __array_priority__, positions ExtensionArray after Index,
260
+ # Series, and DataFrame. EA subclasses may override to choose which EA
261
+ # subclass takes priority. If overriding, the value should always be
262
+ # strictly less than 2000 to be below Index.__pandas_priority__.
263
+ __pandas_priority__ = 1000
264
+
265
+ # ------------------------------------------------------------------------
266
+ # Constructors
267
+ # ------------------------------------------------------------------------
268
+
269
+ @classmethod
270
+ def _from_sequence(cls, scalars, *, dtype: Dtype | None = None, copy: bool = False):
271
+ """
272
+ Construct a new ExtensionArray from a sequence of scalars.
273
+
274
+ Parameters
275
+ ----------
276
+ scalars : Sequence
277
+ Each element will be an instance of the scalar type for this
278
+ array, ``cls.dtype.type`` or be converted into this type in this method.
279
+ dtype : dtype, optional
280
+ Construct for this particular dtype. This should be a Dtype
281
+ compatible with the ExtensionArray.
282
+ copy : bool, default False
283
+ If True, copy the underlying data.
284
+
285
+ Returns
286
+ -------
287
+ ExtensionArray
288
+
289
+ Examples
290
+ --------
291
+ >>> pd.arrays.IntegerArray._from_sequence([4, 5])
292
+ <IntegerArray>
293
+ [4, 5]
294
+ Length: 2, dtype: Int64
295
+ """
296
+ raise AbstractMethodError(cls)
297
+
298
+ @classmethod
299
+ def _from_scalars(cls, scalars, *, dtype: DtypeObj) -> Self:
300
+ """
301
+ Strict analogue to _from_sequence, allowing only sequences of scalars
302
+ that should be specifically inferred to the given dtype.
303
+
304
+ Parameters
305
+ ----------
306
+ scalars : sequence
307
+ dtype : ExtensionDtype
308
+
309
+ Raises
310
+ ------
311
+ TypeError or ValueError
312
+
313
+ Notes
314
+ -----
315
+ This is called in a try/except block when casting the result of a
316
+ pointwise operation.
317
+ """
318
+ try:
319
+ return cls._from_sequence(scalars, dtype=dtype, copy=False)
320
+ except (ValueError, TypeError):
321
+ raise
322
+ except Exception:
323
+ warnings.warn(
324
+ "_from_scalars should only raise ValueError or TypeError. "
325
+ "Consider overriding _from_scalars where appropriate.",
326
+ stacklevel=find_stack_level(),
327
+ )
328
+ raise
329
+
330
+ @classmethod
331
+ def _from_sequence_of_strings(
332
+ cls, strings, *, dtype: Dtype | None = None, copy: bool = False
333
+ ):
334
+ """
335
+ Construct a new ExtensionArray from a sequence of strings.
336
+
337
+ Parameters
338
+ ----------
339
+ strings : Sequence
340
+ Each element will be an instance of the scalar type for this
341
+ array, ``cls.dtype.type``.
342
+ dtype : dtype, optional
343
+ Construct for this particular dtype. This should be a Dtype
344
+ compatible with the ExtensionArray.
345
+ copy : bool, default False
346
+ If True, copy the underlying data.
347
+
348
+ Returns
349
+ -------
350
+ ExtensionArray
351
+
352
+ Examples
353
+ --------
354
+ >>> pd.arrays.IntegerArray._from_sequence_of_strings(["1", "2", "3"])
355
+ <IntegerArray>
356
+ [1, 2, 3]
357
+ Length: 3, dtype: Int64
358
+ """
359
+ raise AbstractMethodError(cls)
360
+
361
+ @classmethod
362
+ def _from_factorized(cls, values, original):
363
+ """
364
+ Reconstruct an ExtensionArray after factorization.
365
+
366
+ Parameters
367
+ ----------
368
+ values : ndarray
369
+ An integer ndarray with the factorized values.
370
+ original : ExtensionArray
371
+ The original ExtensionArray that factorize was called on.
372
+
373
+ See Also
374
+ --------
375
+ factorize : Top-level factorize method that dispatches here.
376
+ ExtensionArray.factorize : Encode the extension array as an enumerated type.
377
+
378
+ Examples
379
+ --------
380
+ >>> interv_arr = pd.arrays.IntervalArray([pd.Interval(0, 1),
381
+ ... pd.Interval(1, 5), pd.Interval(1, 5)])
382
+ >>> codes, uniques = pd.factorize(interv_arr)
383
+ >>> pd.arrays.IntervalArray._from_factorized(uniques, interv_arr)
384
+ <IntervalArray>
385
+ [(0, 1], (1, 5]]
386
+ Length: 2, dtype: interval[int64, right]
387
+ """
388
+ raise AbstractMethodError(cls)
389
+
390
+ # ------------------------------------------------------------------------
391
+ # Must be a Sequence
392
+ # ------------------------------------------------------------------------
393
+ @overload
394
+ def __getitem__(self, item: ScalarIndexer) -> Any:
395
+ ...
396
+
397
+ @overload
398
+ def __getitem__(self, item: SequenceIndexer) -> Self:
399
+ ...
400
+
401
+ def __getitem__(self, item: PositionalIndexer) -> Self | Any:
402
+ """
403
+ Select a subset of self.
404
+
405
+ Parameters
406
+ ----------
407
+ item : int, slice, or ndarray
408
+ * int: The position in 'self' to get.
409
+
410
+ * slice: A slice object, where 'start', 'stop', and 'step' are
411
+ integers or None
412
+
413
+ * ndarray: A 1-d boolean NumPy ndarray the same length as 'self'
414
+
415
+ * list[int]: A list of int
416
+
417
+ Returns
418
+ -------
419
+ item : scalar or ExtensionArray
420
+
421
+ Notes
422
+ -----
423
+ For scalar ``item``, return a scalar value suitable for the array's
424
+ type. This should be an instance of ``self.dtype.type``.
425
+
426
+ For slice ``key``, return an instance of ``ExtensionArray``, even
427
+ if the slice is length 0 or 1.
428
+
429
+ For a boolean mask, return an instance of ``ExtensionArray``, filtered
430
+ to the values where ``item`` is True.
431
+ """
432
+ raise AbstractMethodError(self)
433
+
434
+ def __setitem__(self, key, value) -> None:
435
+ """
436
+ Set one or more values inplace.
437
+
438
+ This method is not required to satisfy the pandas extension array
439
+ interface.
440
+
441
+ Parameters
442
+ ----------
443
+ key : int, ndarray, or slice
444
+ When called from, e.g. ``Series.__setitem__``, ``key`` will be
445
+ one of
446
+
447
+ * scalar int
448
+ * ndarray of integers.
449
+ * boolean ndarray
450
+ * slice object
451
+
452
+ value : ExtensionDtype.type, Sequence[ExtensionDtype.type], or object
453
+ value or values to be set of ``key``.
454
+
455
+ Returns
456
+ -------
457
+ None
458
+ """
459
+ # Some notes to the ExtensionArray implementer who may have ended up
460
+ # here. While this method is not required for the interface, if you
461
+ # *do* choose to implement __setitem__, then some semantics should be
462
+ # observed:
463
+ #
464
+ # * Setting multiple values : ExtensionArrays should support setting
465
+ # multiple values at once, 'key' will be a sequence of integers and
466
+ # 'value' will be a same-length sequence.
467
+ #
468
+ # * Broadcasting : For a sequence 'key' and a scalar 'value',
469
+ # each position in 'key' should be set to 'value'.
470
+ #
471
+ # * Coercion : Most users will expect basic coercion to work. For
472
+ # example, a string like '2018-01-01' is coerced to a datetime
473
+ # when setting on a datetime64ns array. In general, if the
474
+ # __init__ method coerces that value, then so should __setitem__
475
+ # Note, also, that Series/DataFrame.where internally use __setitem__
476
+ # on a copy of the data.
477
+ raise NotImplementedError(f"{type(self)} does not implement __setitem__.")
478
+
479
+ def __len__(self) -> int:
480
+ """
481
+ Length of this array
482
+
483
+ Returns
484
+ -------
485
+ length : int
486
+ """
487
+ raise AbstractMethodError(self)
488
+
489
+ def __iter__(self) -> Iterator[Any]:
490
+ """
491
+ Iterate over elements of the array.
492
+ """
493
+ # This needs to be implemented so that pandas recognizes extension
494
+ # arrays as list-like. The default implementation makes successive
495
+ # calls to ``__getitem__``, which may be slower than necessary.
496
+ for i in range(len(self)):
497
+ yield self[i]
498
+
499
+ def __contains__(self, item: object) -> bool | np.bool_:
500
+ """
501
+ Return for `item in self`.
502
+ """
503
+ # GH37867
504
+ # comparisons of any item to pd.NA always return pd.NA, so e.g. "a" in [pd.NA]
505
+ # would raise a TypeError. The implementation below works around that.
506
+ if is_scalar(item) and isna(item):
507
+ if not self._can_hold_na:
508
+ return False
509
+ elif item is self.dtype.na_value or isinstance(item, self.dtype.type):
510
+ return self._hasna
511
+ else:
512
+ return False
513
+ else:
514
+ # error: Item "ExtensionArray" of "Union[ExtensionArray, ndarray]" has no
515
+ # attribute "any"
516
+ return (item == self).any() # type: ignore[union-attr]
517
+
518
+ # error: Signature of "__eq__" incompatible with supertype "object"
519
+ def __eq__(self, other: object) -> ArrayLike: # type: ignore[override]
520
+ """
521
+ Return for `self == other` (element-wise equality).
522
+ """
523
+ # Implementer note: this should return a boolean numpy ndarray or
524
+ # a boolean ExtensionArray.
525
+ # When `other` is one of Series, Index, or DataFrame, this method should
526
+ # return NotImplemented (to ensure that those objects are responsible for
527
+ # first unpacking the arrays, and then dispatch the operation to the
528
+ # underlying arrays)
529
+ raise AbstractMethodError(self)
530
+
531
+ # error: Signature of "__ne__" incompatible with supertype "object"
532
+ def __ne__(self, other: object) -> ArrayLike: # type: ignore[override]
533
+ """
534
+ Return for `self != other` (element-wise in-equality).
535
+ """
536
+ # error: Unsupported operand type for ~ ("ExtensionArray")
537
+ return ~(self == other) # type: ignore[operator]
538
+
539
+ def to_numpy(
540
+ self,
541
+ dtype: npt.DTypeLike | None = None,
542
+ copy: bool = False,
543
+ na_value: object = lib.no_default,
544
+ ) -> np.ndarray:
545
+ """
546
+ Convert to a NumPy ndarray.
547
+
548
+ This is similar to :meth:`numpy.asarray`, but may provide additional control
549
+ over how the conversion is done.
550
+
551
+ Parameters
552
+ ----------
553
+ dtype : str or numpy.dtype, optional
554
+ The dtype to pass to :meth:`numpy.asarray`.
555
+ copy : bool, default False
556
+ Whether to ensure that the returned value is a not a view on
557
+ another array. Note that ``copy=False`` does not *ensure* that
558
+ ``to_numpy()`` is no-copy. Rather, ``copy=True`` ensure that
559
+ a copy is made, even if not strictly necessary.
560
+ na_value : Any, optional
561
+ The value to use for missing values. The default value depends
562
+ on `dtype` and the type of the array.
563
+
564
+ Returns
565
+ -------
566
+ numpy.ndarray
567
+ """
568
+ result = np.asarray(self, dtype=dtype)
569
+ if copy or na_value is not lib.no_default:
570
+ result = result.copy()
571
+ if na_value is not lib.no_default:
572
+ result[self.isna()] = na_value
573
+ return result
574
+
575
+ # ------------------------------------------------------------------------
576
+ # Required attributes
577
+ # ------------------------------------------------------------------------
578
+
579
+ @property
580
+ def dtype(self) -> ExtensionDtype:
581
+ """
582
+ An instance of ExtensionDtype.
583
+
584
+ Examples
585
+ --------
586
+ >>> pd.array([1, 2, 3]).dtype
587
+ Int64Dtype()
588
+ """
589
+ raise AbstractMethodError(self)
590
+
591
+ @property
592
+ def shape(self) -> Shape:
593
+ """
594
+ Return a tuple of the array dimensions.
595
+
596
+ Examples
597
+ --------
598
+ >>> arr = pd.array([1, 2, 3])
599
+ >>> arr.shape
600
+ (3,)
601
+ """
602
+ return (len(self),)
603
+
604
+ @property
605
+ def size(self) -> int:
606
+ """
607
+ The number of elements in the array.
608
+ """
609
+ # error: Incompatible return value type (got "signedinteger[_64Bit]",
610
+ # expected "int") [return-value]
611
+ return np.prod(self.shape) # type: ignore[return-value]
612
+
613
+ @property
614
+ def ndim(self) -> int:
615
+ """
616
+ Extension Arrays are only allowed to be 1-dimensional.
617
+
618
+ Examples
619
+ --------
620
+ >>> arr = pd.array([1, 2, 3])
621
+ >>> arr.ndim
622
+ 1
623
+ """
624
+ return 1
625
+
626
+ @property
627
+ def nbytes(self) -> int:
628
+ """
629
+ The number of bytes needed to store this object in memory.
630
+
631
+ Examples
632
+ --------
633
+ >>> pd.array([1, 2, 3]).nbytes
634
+ 27
635
+ """
636
+ # If this is expensive to compute, return an approximate lower bound
637
+ # on the number of bytes needed.
638
+ raise AbstractMethodError(self)
639
+
640
+ # ------------------------------------------------------------------------
641
+ # Additional Methods
642
+ # ------------------------------------------------------------------------
643
+
644
+ @overload
645
+ def astype(self, dtype: npt.DTypeLike, copy: bool = ...) -> np.ndarray:
646
+ ...
647
+
648
+ @overload
649
+ def astype(self, dtype: ExtensionDtype, copy: bool = ...) -> ExtensionArray:
650
+ ...
651
+
652
+ @overload
653
+ def astype(self, dtype: AstypeArg, copy: bool = ...) -> ArrayLike:
654
+ ...
655
+
656
+ def astype(self, dtype: AstypeArg, copy: bool = True) -> ArrayLike:
657
+ """
658
+ Cast to a NumPy array or ExtensionArray with 'dtype'.
659
+
660
+ Parameters
661
+ ----------
662
+ dtype : str or dtype
663
+ Typecode or data-type to which the array is cast.
664
+ copy : bool, default True
665
+ Whether to copy the data, even if not necessary. If False,
666
+ a copy is made only if the old dtype does not match the
667
+ new dtype.
668
+
669
+ Returns
670
+ -------
671
+ np.ndarray or pandas.api.extensions.ExtensionArray
672
+ An ``ExtensionArray`` if ``dtype`` is ``ExtensionDtype``,
673
+ otherwise a Numpy ndarray with ``dtype`` for its dtype.
674
+
675
+ Examples
676
+ --------
677
+ >>> arr = pd.array([1, 2, 3])
678
+ >>> arr
679
+ <IntegerArray>
680
+ [1, 2, 3]
681
+ Length: 3, dtype: Int64
682
+
683
+ Casting to another ``ExtensionDtype`` returns an ``ExtensionArray``:
684
+
685
+ >>> arr1 = arr.astype('Float64')
686
+ >>> arr1
687
+ <FloatingArray>
688
+ [1.0, 2.0, 3.0]
689
+ Length: 3, dtype: Float64
690
+ >>> arr1.dtype
691
+ Float64Dtype()
692
+
693
+ Otherwise, we will get a Numpy ndarray:
694
+
695
+ >>> arr2 = arr.astype('float64')
696
+ >>> arr2
697
+ array([1., 2., 3.])
698
+ >>> arr2.dtype
699
+ dtype('float64')
700
+ """
701
+ dtype = pandas_dtype(dtype)
702
+ if dtype == self.dtype:
703
+ if not copy:
704
+ return self
705
+ else:
706
+ return self.copy()
707
+
708
+ if isinstance(dtype, ExtensionDtype):
709
+ cls = dtype.construct_array_type()
710
+ return cls._from_sequence(self, dtype=dtype, copy=copy)
711
+
712
+ elif lib.is_np_dtype(dtype, "M"):
713
+ from pandas.core.arrays import DatetimeArray
714
+
715
+ return DatetimeArray._from_sequence(self, dtype=dtype, copy=copy)
716
+
717
+ elif lib.is_np_dtype(dtype, "m"):
718
+ from pandas.core.arrays import TimedeltaArray
719
+
720
+ return TimedeltaArray._from_sequence(self, dtype=dtype, copy=copy)
721
+
722
+ if not copy:
723
+ return np.asarray(self, dtype=dtype)
724
+ else:
725
+ return np.array(self, dtype=dtype, copy=copy)
726
+
727
+ def isna(self) -> np.ndarray | ExtensionArraySupportsAnyAll:
728
+ """
729
+ A 1-D array indicating if each value is missing.
730
+
731
+ Returns
732
+ -------
733
+ numpy.ndarray or pandas.api.extensions.ExtensionArray
734
+ In most cases, this should return a NumPy ndarray. For
735
+ exceptional cases like ``SparseArray``, where returning
736
+ an ndarray would be expensive, an ExtensionArray may be
737
+ returned.
738
+
739
+ Notes
740
+ -----
741
+ If returning an ExtensionArray, then
742
+
743
+ * ``na_values._is_boolean`` should be True
744
+ * `na_values` should implement :func:`ExtensionArray._reduce`
745
+ * ``na_values.any`` and ``na_values.all`` should be implemented
746
+
747
+ Examples
748
+ --------
749
+ >>> arr = pd.array([1, 2, np.nan, np.nan])
750
+ >>> arr.isna()
751
+ array([False, False, True, True])
752
+ """
753
+ raise AbstractMethodError(self)
754
+
755
+ @property
756
+ def _hasna(self) -> bool:
757
+ # GH#22680
758
+ """
759
+ Equivalent to `self.isna().any()`.
760
+
761
+ Some ExtensionArray subclasses may be able to optimize this check.
762
+ """
763
+ return bool(self.isna().any())
764
+
765
+ def _values_for_argsort(self) -> np.ndarray:
766
+ """
767
+ Return values for sorting.
768
+
769
+ Returns
770
+ -------
771
+ ndarray
772
+ The transformed values should maintain the ordering between values
773
+ within the array.
774
+
775
+ See Also
776
+ --------
777
+ ExtensionArray.argsort : Return the indices that would sort this array.
778
+
779
+ Notes
780
+ -----
781
+ The caller is responsible for *not* modifying these values in-place, so
782
+ it is safe for implementers to give views on ``self``.
783
+
784
+ Functions that use this (e.g. ``ExtensionArray.argsort``) should ignore
785
+ entries with missing values in the original array (according to
786
+ ``self.isna()``). This means that the corresponding entries in the returned
787
+ array don't need to be modified to sort correctly.
788
+
789
+ Examples
790
+ --------
791
+ In most cases, this is the underlying Numpy array of the ``ExtensionArray``:
792
+
793
+ >>> arr = pd.array([1, 2, 3])
794
+ >>> arr._values_for_argsort()
795
+ array([1, 2, 3])
796
+ """
797
+ # Note: this is used in `ExtensionArray.argsort/argmin/argmax`.
798
+ return np.array(self)
799
+
800
+ def argsort(
801
+ self,
802
+ *,
803
+ ascending: bool = True,
804
+ kind: SortKind = "quicksort",
805
+ na_position: str = "last",
806
+ **kwargs,
807
+ ) -> np.ndarray:
808
+ """
809
+ Return the indices that would sort this array.
810
+
811
+ Parameters
812
+ ----------
813
+ ascending : bool, default True
814
+ Whether the indices should result in an ascending
815
+ or descending sort.
816
+ kind : {'quicksort', 'mergesort', 'heapsort', 'stable'}, optional
817
+ Sorting algorithm.
818
+ na_position : {'first', 'last'}, default 'last'
819
+ If ``'first'``, put ``NaN`` values at the beginning.
820
+ If ``'last'``, put ``NaN`` values at the end.
821
+ *args, **kwargs:
822
+ Passed through to :func:`numpy.argsort`.
823
+
824
+ Returns
825
+ -------
826
+ np.ndarray[np.intp]
827
+ Array of indices that sort ``self``. If NaN values are contained,
828
+ NaN values are placed at the end.
829
+
830
+ See Also
831
+ --------
832
+ numpy.argsort : Sorting implementation used internally.
833
+
834
+ Examples
835
+ --------
836
+ >>> arr = pd.array([3, 1, 2, 5, 4])
837
+ >>> arr.argsort()
838
+ array([1, 2, 0, 4, 3])
839
+ """
840
+ # Implementer note: You have two places to override the behavior of
841
+ # argsort.
842
+ # 1. _values_for_argsort : construct the values passed to np.argsort
843
+ # 2. argsort : total control over sorting. In case of overriding this,
844
+ # it is recommended to also override argmax/argmin
845
+ ascending = nv.validate_argsort_with_ascending(ascending, (), kwargs)
846
+
847
+ values = self._values_for_argsort()
848
+ return nargsort(
849
+ values,
850
+ kind=kind,
851
+ ascending=ascending,
852
+ na_position=na_position,
853
+ mask=np.asarray(self.isna()),
854
+ )
855
+
856
+ def argmin(self, skipna: bool = True) -> int:
857
+ """
858
+ Return the index of minimum value.
859
+
860
+ In case of multiple occurrences of the minimum value, the index
861
+ corresponding to the first occurrence is returned.
862
+
863
+ Parameters
864
+ ----------
865
+ skipna : bool, default True
866
+
867
+ Returns
868
+ -------
869
+ int
870
+
871
+ See Also
872
+ --------
873
+ ExtensionArray.argmax : Return the index of the maximum value.
874
+
875
+ Examples
876
+ --------
877
+ >>> arr = pd.array([3, 1, 2, 5, 4])
878
+ >>> arr.argmin()
879
+ 1
880
+ """
881
+ # Implementer note: You have two places to override the behavior of
882
+ # argmin.
883
+ # 1. _values_for_argsort : construct the values used in nargminmax
884
+ # 2. argmin itself : total control over sorting.
885
+ validate_bool_kwarg(skipna, "skipna")
886
+ if not skipna and self._hasna:
887
+ raise NotImplementedError
888
+ return nargminmax(self, "argmin")
889
+
890
+ def argmax(self, skipna: bool = True) -> int:
891
+ """
892
+ Return the index of maximum value.
893
+
894
+ In case of multiple occurrences of the maximum value, the index
895
+ corresponding to the first occurrence is returned.
896
+
897
+ Parameters
898
+ ----------
899
+ skipna : bool, default True
900
+
901
+ Returns
902
+ -------
903
+ int
904
+
905
+ See Also
906
+ --------
907
+ ExtensionArray.argmin : Return the index of the minimum value.
908
+
909
+ Examples
910
+ --------
911
+ >>> arr = pd.array([3, 1, 2, 5, 4])
912
+ >>> arr.argmax()
913
+ 3
914
+ """
915
+ # Implementer note: You have two places to override the behavior of
916
+ # argmax.
917
+ # 1. _values_for_argsort : construct the values used in nargminmax
918
+ # 2. argmax itself : total control over sorting.
919
+ validate_bool_kwarg(skipna, "skipna")
920
+ if not skipna and self._hasna:
921
+ raise NotImplementedError
922
+ return nargminmax(self, "argmax")
923
+
924
+ def interpolate(
925
+ self,
926
+ *,
927
+ method: InterpolateOptions,
928
+ axis: int,
929
+ index: Index,
930
+ limit,
931
+ limit_direction,
932
+ limit_area,
933
+ copy: bool,
934
+ **kwargs,
935
+ ) -> Self:
936
+ """
937
+ See DataFrame.interpolate.__doc__.
938
+
939
+ Examples
940
+ --------
941
+ >>> arr = pd.arrays.NumpyExtensionArray(np.array([0, 1, np.nan, 3]))
942
+ >>> arr.interpolate(method="linear",
943
+ ... limit=3,
944
+ ... limit_direction="forward",
945
+ ... index=pd.Index([1, 2, 3, 4]),
946
+ ... fill_value=1,
947
+ ... copy=False,
948
+ ... axis=0,
949
+ ... limit_area="inside"
950
+ ... )
951
+ <NumpyExtensionArray>
952
+ [0.0, 1.0, 2.0, 3.0]
953
+ Length: 4, dtype: float64
954
+ """
955
+ # NB: we return type(self) even if copy=False
956
+ raise NotImplementedError(
957
+ f"{type(self).__name__} does not implement interpolate"
958
+ )
959
+
960
+ def _pad_or_backfill(
961
+ self,
962
+ *,
963
+ method: FillnaOptions,
964
+ limit: int | None = None,
965
+ limit_area: Literal["inside", "outside"] | None = None,
966
+ copy: bool = True,
967
+ ) -> Self:
968
+ """
969
+ Pad or backfill values, used by Series/DataFrame ffill and bfill.
970
+
971
+ Parameters
972
+ ----------
973
+ method : {'backfill', 'bfill', 'pad', 'ffill'}
974
+ Method to use for filling holes in reindexed Series:
975
+
976
+ * pad / ffill: propagate last valid observation forward to next valid.
977
+ * backfill / bfill: use NEXT valid observation to fill gap.
978
+
979
+ limit : int, default None
980
+ This is the maximum number of consecutive
981
+ NaN values to forward/backward fill. In other words, if there is
982
+ a gap with more than this number of consecutive NaNs, it will only
983
+ be partially filled. If method is not specified, this is the
984
+ maximum number of entries along the entire axis where NaNs will be
985
+ filled.
986
+
987
+ copy : bool, default True
988
+ Whether to make a copy of the data before filling. If False, then
989
+ the original should be modified and no new memory should be allocated.
990
+ For ExtensionArray subclasses that cannot do this, it is at the
991
+ author's discretion whether to ignore "copy=False" or to raise.
992
+ The base class implementation ignores the keyword if any NAs are
993
+ present.
994
+
995
+ Returns
996
+ -------
997
+ Same type as self
998
+
999
+ Examples
1000
+ --------
1001
+ >>> arr = pd.array([np.nan, np.nan, 2, 3, np.nan, np.nan])
1002
+ >>> arr._pad_or_backfill(method="backfill", limit=1)
1003
+ <IntegerArray>
1004
+ [<NA>, 2, 2, 3, <NA>, <NA>]
1005
+ Length: 6, dtype: Int64
1006
+ """
1007
+
1008
+ # If a 3rd-party EA has implemented this functionality in fillna,
1009
+ # we warn that they need to implement _pad_or_backfill instead.
1010
+ if (
1011
+ type(self).fillna is not ExtensionArray.fillna
1012
+ and type(self)._pad_or_backfill is ExtensionArray._pad_or_backfill
1013
+ ):
1014
+ # Check for _pad_or_backfill here allows us to call
1015
+ # super()._pad_or_backfill without getting this warning
1016
+ warnings.warn(
1017
+ "ExtensionArray.fillna 'method' keyword is deprecated. "
1018
+ "In a future version. arr._pad_or_backfill will be called "
1019
+ "instead. 3rd-party ExtensionArray authors need to implement "
1020
+ "_pad_or_backfill.",
1021
+ DeprecationWarning,
1022
+ stacklevel=find_stack_level(),
1023
+ )
1024
+ if limit_area is not None:
1025
+ raise NotImplementedError(
1026
+ f"{type(self).__name__} does not implement limit_area "
1027
+ "(added in pandas 2.2). 3rd-party ExtnsionArray authors "
1028
+ "need to add this argument to _pad_or_backfill."
1029
+ )
1030
+ return self.fillna(method=method, limit=limit)
1031
+
1032
+ mask = self.isna()
1033
+
1034
+ if mask.any():
1035
+ # NB: the base class does not respect the "copy" keyword
1036
+ meth = missing.clean_fill_method(method)
1037
+
1038
+ npmask = np.asarray(mask)
1039
+ if limit_area is not None and not npmask.all():
1040
+ _fill_limit_area_1d(npmask, limit_area)
1041
+ if meth == "pad":
1042
+ indexer = libalgos.get_fill_indexer(npmask, limit=limit)
1043
+ return self.take(indexer, allow_fill=True)
1044
+ else:
1045
+ # i.e. meth == "backfill"
1046
+ indexer = libalgos.get_fill_indexer(npmask[::-1], limit=limit)[::-1]
1047
+ return self[::-1].take(indexer, allow_fill=True)
1048
+
1049
+ else:
1050
+ if not copy:
1051
+ return self
1052
+ new_values = self.copy()
1053
+ return new_values
1054
+
1055
+ def fillna(
1056
+ self,
1057
+ value: object | ArrayLike | None = None,
1058
+ method: FillnaOptions | None = None,
1059
+ limit: int | None = None,
1060
+ copy: bool = True,
1061
+ ) -> Self:
1062
+ """
1063
+ Fill NA/NaN values using the specified method.
1064
+
1065
+ Parameters
1066
+ ----------
1067
+ value : scalar, array-like
1068
+ If a scalar value is passed it is used to fill all missing values.
1069
+ Alternatively, an array-like "value" can be given. It's expected
1070
+ that the array-like have the same length as 'self'.
1071
+ method : {'backfill', 'bfill', 'pad', 'ffill', None}, default None
1072
+ Method to use for filling holes in reindexed Series:
1073
+
1074
+ * pad / ffill: propagate last valid observation forward to next valid.
1075
+ * backfill / bfill: use NEXT valid observation to fill gap.
1076
+
1077
+ .. deprecated:: 2.1.0
1078
+
1079
+ limit : int, default None
1080
+ If method is specified, this is the maximum number of consecutive
1081
+ NaN values to forward/backward fill. In other words, if there is
1082
+ a gap with more than this number of consecutive NaNs, it will only
1083
+ be partially filled. If method is not specified, this is the
1084
+ maximum number of entries along the entire axis where NaNs will be
1085
+ filled.
1086
+
1087
+ .. deprecated:: 2.1.0
1088
+
1089
+ copy : bool, default True
1090
+ Whether to make a copy of the data before filling. If False, then
1091
+ the original should be modified and no new memory should be allocated.
1092
+ For ExtensionArray subclasses that cannot do this, it is at the
1093
+ author's discretion whether to ignore "copy=False" or to raise.
1094
+ The base class implementation ignores the keyword in pad/backfill
1095
+ cases.
1096
+
1097
+ Returns
1098
+ -------
1099
+ ExtensionArray
1100
+ With NA/NaN filled.
1101
+
1102
+ Examples
1103
+ --------
1104
+ >>> arr = pd.array([np.nan, np.nan, 2, 3, np.nan, np.nan])
1105
+ >>> arr.fillna(0)
1106
+ <IntegerArray>
1107
+ [0, 0, 2, 3, 0, 0]
1108
+ Length: 6, dtype: Int64
1109
+ """
1110
+ if method is not None:
1111
+ warnings.warn(
1112
+ f"The 'method' keyword in {type(self).__name__}.fillna is "
1113
+ "deprecated and will be removed in a future version.",
1114
+ FutureWarning,
1115
+ stacklevel=find_stack_level(),
1116
+ )
1117
+
1118
+ value, method = validate_fillna_kwargs(value, method)
1119
+
1120
+ mask = self.isna()
1121
+ # error: Argument 2 to "check_value_size" has incompatible type
1122
+ # "ExtensionArray"; expected "ndarray"
1123
+ value = missing.check_value_size(
1124
+ value, mask, len(self) # type: ignore[arg-type]
1125
+ )
1126
+
1127
+ if mask.any():
1128
+ if method is not None:
1129
+ meth = missing.clean_fill_method(method)
1130
+
1131
+ npmask = np.asarray(mask)
1132
+ if meth == "pad":
1133
+ indexer = libalgos.get_fill_indexer(npmask, limit=limit)
1134
+ return self.take(indexer, allow_fill=True)
1135
+ else:
1136
+ # i.e. meth == "backfill"
1137
+ indexer = libalgos.get_fill_indexer(npmask[::-1], limit=limit)[::-1]
1138
+ return self[::-1].take(indexer, allow_fill=True)
1139
+ else:
1140
+ # fill with value
1141
+ if not copy:
1142
+ new_values = self[:]
1143
+ else:
1144
+ new_values = self.copy()
1145
+ new_values[mask] = value
1146
+ else:
1147
+ if not copy:
1148
+ new_values = self[:]
1149
+ else:
1150
+ new_values = self.copy()
1151
+ return new_values
1152
+
1153
+ def dropna(self) -> Self:
1154
+ """
1155
+ Return ExtensionArray without NA values.
1156
+
1157
+ Returns
1158
+ -------
1159
+
1160
+ Examples
1161
+ --------
1162
+ >>> pd.array([1, 2, np.nan]).dropna()
1163
+ <IntegerArray>
1164
+ [1, 2]
1165
+ Length: 2, dtype: Int64
1166
+ """
1167
+ # error: Unsupported operand type for ~ ("ExtensionArray")
1168
+ return self[~self.isna()] # type: ignore[operator]
1169
+
1170
+ def duplicated(
1171
+ self, keep: Literal["first", "last", False] = "first"
1172
+ ) -> npt.NDArray[np.bool_]:
1173
+ """
1174
+ Return boolean ndarray denoting duplicate values.
1175
+
1176
+ Parameters
1177
+ ----------
1178
+ keep : {'first', 'last', False}, default 'first'
1179
+ - ``first`` : Mark duplicates as ``True`` except for the first occurrence.
1180
+ - ``last`` : Mark duplicates as ``True`` except for the last occurrence.
1181
+ - False : Mark all duplicates as ``True``.
1182
+
1183
+ Returns
1184
+ -------
1185
+ ndarray[bool]
1186
+
1187
+ Examples
1188
+ --------
1189
+ >>> pd.array([1, 1, 2, 3, 3], dtype="Int64").duplicated()
1190
+ array([False, True, False, False, True])
1191
+ """
1192
+ mask = self.isna().astype(np.bool_, copy=False)
1193
+ return duplicated(values=self, keep=keep, mask=mask)
1194
+
1195
+ def shift(self, periods: int = 1, fill_value: object = None) -> ExtensionArray:
1196
+ """
1197
+ Shift values by desired number.
1198
+
1199
+ Newly introduced missing values are filled with
1200
+ ``self.dtype.na_value``.
1201
+
1202
+ Parameters
1203
+ ----------
1204
+ periods : int, default 1
1205
+ The number of periods to shift. Negative values are allowed
1206
+ for shifting backwards.
1207
+
1208
+ fill_value : object, optional
1209
+ The scalar value to use for newly introduced missing values.
1210
+ The default is ``self.dtype.na_value``.
1211
+
1212
+ Returns
1213
+ -------
1214
+ ExtensionArray
1215
+ Shifted.
1216
+
1217
+ Notes
1218
+ -----
1219
+ If ``self`` is empty or ``periods`` is 0, a copy of ``self`` is
1220
+ returned.
1221
+
1222
+ If ``periods > len(self)``, then an array of size
1223
+ len(self) is returned, with all values filled with
1224
+ ``self.dtype.na_value``.
1225
+
1226
+ For 2-dimensional ExtensionArrays, we are always shifting along axis=0.
1227
+
1228
+ Examples
1229
+ --------
1230
+ >>> arr = pd.array([1, 2, 3])
1231
+ >>> arr.shift(2)
1232
+ <IntegerArray>
1233
+ [<NA>, <NA>, 1]
1234
+ Length: 3, dtype: Int64
1235
+ """
1236
+ # Note: this implementation assumes that `self.dtype.na_value` can be
1237
+ # stored in an instance of your ExtensionArray with `self.dtype`.
1238
+ if not len(self) or periods == 0:
1239
+ return self.copy()
1240
+
1241
+ if isna(fill_value):
1242
+ fill_value = self.dtype.na_value
1243
+
1244
+ empty = self._from_sequence(
1245
+ [fill_value] * min(abs(periods), len(self)), dtype=self.dtype
1246
+ )
1247
+ if periods > 0:
1248
+ a = empty
1249
+ b = self[:-periods]
1250
+ else:
1251
+ a = self[abs(periods) :]
1252
+ b = empty
1253
+ return self._concat_same_type([a, b])
1254
+
1255
+ def unique(self) -> Self:
1256
+ """
1257
+ Compute the ExtensionArray of unique values.
1258
+
1259
+ Returns
1260
+ -------
1261
+ pandas.api.extensions.ExtensionArray
1262
+
1263
+ Examples
1264
+ --------
1265
+ >>> arr = pd.array([1, 2, 3, 1, 2, 3])
1266
+ >>> arr.unique()
1267
+ <IntegerArray>
1268
+ [1, 2, 3]
1269
+ Length: 3, dtype: Int64
1270
+ """
1271
+ uniques = unique(self.astype(object))
1272
+ return self._from_sequence(uniques, dtype=self.dtype)
1273
+
1274
+ def searchsorted(
1275
+ self,
1276
+ value: NumpyValueArrayLike | ExtensionArray,
1277
+ side: Literal["left", "right"] = "left",
1278
+ sorter: NumpySorter | None = None,
1279
+ ) -> npt.NDArray[np.intp] | np.intp:
1280
+ """
1281
+ Find indices where elements should be inserted to maintain order.
1282
+
1283
+ Find the indices into a sorted array `self` (a) such that, if the
1284
+ corresponding elements in `value` were inserted before the indices,
1285
+ the order of `self` would be preserved.
1286
+
1287
+ Assuming that `self` is sorted:
1288
+
1289
+ ====== ================================
1290
+ `side` returned index `i` satisfies
1291
+ ====== ================================
1292
+ left ``self[i-1] < value <= self[i]``
1293
+ right ``self[i-1] <= value < self[i]``
1294
+ ====== ================================
1295
+
1296
+ Parameters
1297
+ ----------
1298
+ value : array-like, list or scalar
1299
+ Value(s) to insert into `self`.
1300
+ side : {'left', 'right'}, optional
1301
+ If 'left', the index of the first suitable location found is given.
1302
+ If 'right', return the last such index. If there is no suitable
1303
+ index, return either 0 or N (where N is the length of `self`).
1304
+ sorter : 1-D array-like, optional
1305
+ Optional array of integer indices that sort array a into ascending
1306
+ order. They are typically the result of argsort.
1307
+
1308
+ Returns
1309
+ -------
1310
+ array of ints or int
1311
+ If value is array-like, array of insertion points.
1312
+ If value is scalar, a single integer.
1313
+
1314
+ See Also
1315
+ --------
1316
+ numpy.searchsorted : Similar method from NumPy.
1317
+
1318
+ Examples
1319
+ --------
1320
+ >>> arr = pd.array([1, 2, 3, 5])
1321
+ >>> arr.searchsorted([4])
1322
+ array([3])
1323
+ """
1324
+ # Note: the base tests provided by pandas only test the basics.
1325
+ # We do not test
1326
+ # 1. Values outside the range of the `data_for_sorting` fixture
1327
+ # 2. Values between the values in the `data_for_sorting` fixture
1328
+ # 3. Missing values.
1329
+ arr = self.astype(object)
1330
+ if isinstance(value, ExtensionArray):
1331
+ value = value.astype(object)
1332
+ return arr.searchsorted(value, side=side, sorter=sorter)
1333
+
1334
+ def equals(self, other: object) -> bool:
1335
+ """
1336
+ Return if another array is equivalent to this array.
1337
+
1338
+ Equivalent means that both arrays have the same shape and dtype, and
1339
+ all values compare equal. Missing values in the same location are
1340
+ considered equal (in contrast with normal equality).
1341
+
1342
+ Parameters
1343
+ ----------
1344
+ other : ExtensionArray
1345
+ Array to compare to this Array.
1346
+
1347
+ Returns
1348
+ -------
1349
+ boolean
1350
+ Whether the arrays are equivalent.
1351
+
1352
+ Examples
1353
+ --------
1354
+ >>> arr1 = pd.array([1, 2, np.nan])
1355
+ >>> arr2 = pd.array([1, 2, np.nan])
1356
+ >>> arr1.equals(arr2)
1357
+ True
1358
+ """
1359
+ if type(self) != type(other):
1360
+ return False
1361
+ other = cast(ExtensionArray, other)
1362
+ if self.dtype != other.dtype:
1363
+ return False
1364
+ elif len(self) != len(other):
1365
+ return False
1366
+ else:
1367
+ equal_values = self == other
1368
+ if isinstance(equal_values, ExtensionArray):
1369
+ # boolean array with NA -> fill with False
1370
+ equal_values = equal_values.fillna(False)
1371
+ # error: Unsupported left operand type for & ("ExtensionArray")
1372
+ equal_na = self.isna() & other.isna() # type: ignore[operator]
1373
+ return bool((equal_values | equal_na).all())
1374
+
1375
+ def isin(self, values: ArrayLike) -> npt.NDArray[np.bool_]:
1376
+ """
1377
+ Pointwise comparison for set containment in the given values.
1378
+
1379
+ Roughly equivalent to `np.array([x in values for x in self])`
1380
+
1381
+ Parameters
1382
+ ----------
1383
+ values : np.ndarray or ExtensionArray
1384
+
1385
+ Returns
1386
+ -------
1387
+ np.ndarray[bool]
1388
+
1389
+ Examples
1390
+ --------
1391
+ >>> arr = pd.array([1, 2, 3])
1392
+ >>> arr.isin([1])
1393
+ <BooleanArray>
1394
+ [True, False, False]
1395
+ Length: 3, dtype: boolean
1396
+ """
1397
+ return isin(np.asarray(self), values)
1398
+
1399
+ def _values_for_factorize(self) -> tuple[np.ndarray, Any]:
1400
+ """
1401
+ Return an array and missing value suitable for factorization.
1402
+
1403
+ Returns
1404
+ -------
1405
+ values : ndarray
1406
+ An array suitable for factorization. This should maintain order
1407
+ and be a supported dtype (Float64, Int64, UInt64, String, Object).
1408
+ By default, the extension array is cast to object dtype.
1409
+ na_value : object
1410
+ The value in `values` to consider missing. This will be treated
1411
+ as NA in the factorization routines, so it will be coded as
1412
+ `-1` and not included in `uniques`. By default,
1413
+ ``np.nan`` is used.
1414
+
1415
+ Notes
1416
+ -----
1417
+ The values returned by this method are also used in
1418
+ :func:`pandas.util.hash_pandas_object`. If needed, this can be
1419
+ overridden in the ``self._hash_pandas_object()`` method.
1420
+
1421
+ Examples
1422
+ --------
1423
+ >>> pd.array([1, 2, 3])._values_for_factorize()
1424
+ (array([1, 2, 3], dtype=object), nan)
1425
+ """
1426
+ return self.astype(object), np.nan
1427
+
1428
+ def factorize(
1429
+ self,
1430
+ use_na_sentinel: bool = True,
1431
+ ) -> tuple[np.ndarray, ExtensionArray]:
1432
+ """
1433
+ Encode the extension array as an enumerated type.
1434
+
1435
+ Parameters
1436
+ ----------
1437
+ use_na_sentinel : bool, default True
1438
+ If True, the sentinel -1 will be used for NaN values. If False,
1439
+ NaN values will be encoded as non-negative integers and will not drop the
1440
+ NaN from the uniques of the values.
1441
+
1442
+ .. versionadded:: 1.5.0
1443
+
1444
+ Returns
1445
+ -------
1446
+ codes : ndarray
1447
+ An integer NumPy array that's an indexer into the original
1448
+ ExtensionArray.
1449
+ uniques : ExtensionArray
1450
+ An ExtensionArray containing the unique values of `self`.
1451
+
1452
+ .. note::
1453
+
1454
+ uniques will *not* contain an entry for the NA value of
1455
+ the ExtensionArray if there are any missing values present
1456
+ in `self`.
1457
+
1458
+ See Also
1459
+ --------
1460
+ factorize : Top-level factorize method that dispatches here.
1461
+
1462
+ Notes
1463
+ -----
1464
+ :meth:`pandas.factorize` offers a `sort` keyword as well.
1465
+
1466
+ Examples
1467
+ --------
1468
+ >>> idx1 = pd.PeriodIndex(["2014-01", "2014-01", "2014-02", "2014-02",
1469
+ ... "2014-03", "2014-03"], freq="M")
1470
+ >>> arr, idx = idx1.factorize()
1471
+ >>> arr
1472
+ array([0, 0, 1, 1, 2, 2])
1473
+ >>> idx
1474
+ PeriodIndex(['2014-01', '2014-02', '2014-03'], dtype='period[M]')
1475
+ """
1476
+ # Implementer note: There are two ways to override the behavior of
1477
+ # pandas.factorize
1478
+ # 1. _values_for_factorize and _from_factorize.
1479
+ # Specify the values passed to pandas' internal factorization
1480
+ # routines, and how to convert from those values back to the
1481
+ # original ExtensionArray.
1482
+ # 2. ExtensionArray.factorize.
1483
+ # Complete control over factorization.
1484
+ arr, na_value = self._values_for_factorize()
1485
+
1486
+ codes, uniques = factorize_array(
1487
+ arr, use_na_sentinel=use_na_sentinel, na_value=na_value
1488
+ )
1489
+
1490
+ uniques_ea = self._from_factorized(uniques, self)
1491
+ return codes, uniques_ea
1492
+
1493
+ _extension_array_shared_docs[
1494
+ "repeat"
1495
+ ] = """
1496
+ Repeat elements of a %(klass)s.
1497
+
1498
+ Returns a new %(klass)s where each element of the current %(klass)s
1499
+ is repeated consecutively a given number of times.
1500
+
1501
+ Parameters
1502
+ ----------
1503
+ repeats : int or array of ints
1504
+ The number of repetitions for each element. This should be a
1505
+ non-negative integer. Repeating 0 times will return an empty
1506
+ %(klass)s.
1507
+ axis : None
1508
+ Must be ``None``. Has no effect but is accepted for compatibility
1509
+ with numpy.
1510
+
1511
+ Returns
1512
+ -------
1513
+ %(klass)s
1514
+ Newly created %(klass)s with repeated elements.
1515
+
1516
+ See Also
1517
+ --------
1518
+ Series.repeat : Equivalent function for Series.
1519
+ Index.repeat : Equivalent function for Index.
1520
+ numpy.repeat : Similar method for :class:`numpy.ndarray`.
1521
+ ExtensionArray.take : Take arbitrary positions.
1522
+
1523
+ Examples
1524
+ --------
1525
+ >>> cat = pd.Categorical(['a', 'b', 'c'])
1526
+ >>> cat
1527
+ ['a', 'b', 'c']
1528
+ Categories (3, object): ['a', 'b', 'c']
1529
+ >>> cat.repeat(2)
1530
+ ['a', 'a', 'b', 'b', 'c', 'c']
1531
+ Categories (3, object): ['a', 'b', 'c']
1532
+ >>> cat.repeat([1, 2, 3])
1533
+ ['a', 'b', 'b', 'c', 'c', 'c']
1534
+ Categories (3, object): ['a', 'b', 'c']
1535
+ """
1536
+
1537
+ @Substitution(klass="ExtensionArray")
1538
+ @Appender(_extension_array_shared_docs["repeat"])
1539
+ def repeat(self, repeats: int | Sequence[int], axis: AxisInt | None = None) -> Self:
1540
+ nv.validate_repeat((), {"axis": axis})
1541
+ ind = np.arange(len(self)).repeat(repeats)
1542
+ return self.take(ind)
1543
+
1544
+ # ------------------------------------------------------------------------
1545
+ # Indexing methods
1546
+ # ------------------------------------------------------------------------
1547
+
1548
+ def take(
1549
+ self,
1550
+ indices: TakeIndexer,
1551
+ *,
1552
+ allow_fill: bool = False,
1553
+ fill_value: Any = None,
1554
+ ) -> Self:
1555
+ """
1556
+ Take elements from an array.
1557
+
1558
+ Parameters
1559
+ ----------
1560
+ indices : sequence of int or one-dimensional np.ndarray of int
1561
+ Indices to be taken.
1562
+ allow_fill : bool, default False
1563
+ How to handle negative values in `indices`.
1564
+
1565
+ * False: negative values in `indices` indicate positional indices
1566
+ from the right (the default). This is similar to
1567
+ :func:`numpy.take`.
1568
+
1569
+ * True: negative values in `indices` indicate
1570
+ missing values. These values are set to `fill_value`. Any other
1571
+ other negative values raise a ``ValueError``.
1572
+
1573
+ fill_value : any, optional
1574
+ Fill value to use for NA-indices when `allow_fill` is True.
1575
+ This may be ``None``, in which case the default NA value for
1576
+ the type, ``self.dtype.na_value``, is used.
1577
+
1578
+ For many ExtensionArrays, there will be two representations of
1579
+ `fill_value`: a user-facing "boxed" scalar, and a low-level
1580
+ physical NA value. `fill_value` should be the user-facing version,
1581
+ and the implementation should handle translating that to the
1582
+ physical version for processing the take if necessary.
1583
+
1584
+ Returns
1585
+ -------
1586
+ ExtensionArray
1587
+
1588
+ Raises
1589
+ ------
1590
+ IndexError
1591
+ When the indices are out of bounds for the array.
1592
+ ValueError
1593
+ When `indices` contains negative values other than ``-1``
1594
+ and `allow_fill` is True.
1595
+
1596
+ See Also
1597
+ --------
1598
+ numpy.take : Take elements from an array along an axis.
1599
+ api.extensions.take : Take elements from an array.
1600
+
1601
+ Notes
1602
+ -----
1603
+ ExtensionArray.take is called by ``Series.__getitem__``, ``.loc``,
1604
+ ``iloc``, when `indices` is a sequence of values. Additionally,
1605
+ it's called by :meth:`Series.reindex`, or any other method
1606
+ that causes realignment, with a `fill_value`.
1607
+
1608
+ Examples
1609
+ --------
1610
+ Here's an example implementation, which relies on casting the
1611
+ extension array to object dtype. This uses the helper method
1612
+ :func:`pandas.api.extensions.take`.
1613
+
1614
+ .. code-block:: python
1615
+
1616
+ def take(self, indices, allow_fill=False, fill_value=None):
1617
+ from pandas.core.algorithms import take
1618
+
1619
+ # If the ExtensionArray is backed by an ndarray, then
1620
+ # just pass that here instead of coercing to object.
1621
+ data = self.astype(object)
1622
+
1623
+ if allow_fill and fill_value is None:
1624
+ fill_value = self.dtype.na_value
1625
+
1626
+ # fill value should always be translated from the scalar
1627
+ # type for the array, to the physical storage type for
1628
+ # the data, before passing to take.
1629
+
1630
+ result = take(data, indices, fill_value=fill_value,
1631
+ allow_fill=allow_fill)
1632
+ return self._from_sequence(result, dtype=self.dtype)
1633
+ """
1634
+ # Implementer note: The `fill_value` parameter should be a user-facing
1635
+ # value, an instance of self.dtype.type. When passed `fill_value=None`,
1636
+ # the default of `self.dtype.na_value` should be used.
1637
+ # This may differ from the physical storage type your ExtensionArray
1638
+ # uses. In this case, your implementation is responsible for casting
1639
+ # the user-facing type to the storage type, before using
1640
+ # pandas.api.extensions.take
1641
+ raise AbstractMethodError(self)
1642
+
1643
+ def copy(self) -> Self:
1644
+ """
1645
+ Return a copy of the array.
1646
+
1647
+ Returns
1648
+ -------
1649
+ ExtensionArray
1650
+
1651
+ Examples
1652
+ --------
1653
+ >>> arr = pd.array([1, 2, 3])
1654
+ >>> arr2 = arr.copy()
1655
+ >>> arr[0] = 2
1656
+ >>> arr2
1657
+ <IntegerArray>
1658
+ [1, 2, 3]
1659
+ Length: 3, dtype: Int64
1660
+ """
1661
+ raise AbstractMethodError(self)
1662
+
1663
+ def view(self, dtype: Dtype | None = None) -> ArrayLike:
1664
+ """
1665
+ Return a view on the array.
1666
+
1667
+ Parameters
1668
+ ----------
1669
+ dtype : str, np.dtype, or ExtensionDtype, optional
1670
+ Default None.
1671
+
1672
+ Returns
1673
+ -------
1674
+ ExtensionArray or np.ndarray
1675
+ A view on the :class:`ExtensionArray`'s data.
1676
+
1677
+ Examples
1678
+ --------
1679
+ This gives view on the underlying data of an ``ExtensionArray`` and is not a
1680
+ copy. Modifications on either the view or the original ``ExtensionArray``
1681
+ will be reflectd on the underlying data:
1682
+
1683
+ >>> arr = pd.array([1, 2, 3])
1684
+ >>> arr2 = arr.view()
1685
+ >>> arr[0] = 2
1686
+ >>> arr2
1687
+ <IntegerArray>
1688
+ [2, 2, 3]
1689
+ Length: 3, dtype: Int64
1690
+ """
1691
+ # NB:
1692
+ # - This must return a *new* object referencing the same data, not self.
1693
+ # - The only case that *must* be implemented is with dtype=None,
1694
+ # giving a view with the same dtype as self.
1695
+ if dtype is not None:
1696
+ raise NotImplementedError(dtype)
1697
+ return self[:]
1698
+
1699
+ # ------------------------------------------------------------------------
1700
+ # Printing
1701
+ # ------------------------------------------------------------------------
1702
+
1703
+ def __repr__(self) -> str:
1704
+ if self.ndim > 1:
1705
+ return self._repr_2d()
1706
+
1707
+ from pandas.io.formats.printing import format_object_summary
1708
+
1709
+ # the short repr has no trailing newline, while the truncated
1710
+ # repr does. So we include a newline in our template, and strip
1711
+ # any trailing newlines from format_object_summary
1712
+ data = format_object_summary(
1713
+ self, self._formatter(), indent_for_name=False
1714
+ ).rstrip(", \n")
1715
+ class_name = f"<{type(self).__name__}>\n"
1716
+ footer = self._get_repr_footer()
1717
+ return f"{class_name}{data}\n{footer}"
1718
+
1719
+ def _get_repr_footer(self) -> str:
1720
+ # GH#24278
1721
+ if self.ndim > 1:
1722
+ return f"Shape: {self.shape}, dtype: {self.dtype}"
1723
+ return f"Length: {len(self)}, dtype: {self.dtype}"
1724
+
1725
+ def _repr_2d(self) -> str:
1726
+ from pandas.io.formats.printing import format_object_summary
1727
+
1728
+ # the short repr has no trailing newline, while the truncated
1729
+ # repr does. So we include a newline in our template, and strip
1730
+ # any trailing newlines from format_object_summary
1731
+ lines = [
1732
+ format_object_summary(x, self._formatter(), indent_for_name=False).rstrip(
1733
+ ", \n"
1734
+ )
1735
+ for x in self
1736
+ ]
1737
+ data = ",\n".join(lines)
1738
+ class_name = f"<{type(self).__name__}>"
1739
+ footer = self._get_repr_footer()
1740
+ return f"{class_name}\n[\n{data}\n]\n{footer}"
1741
+
1742
+ def _formatter(self, boxed: bool = False) -> Callable[[Any], str | None]:
1743
+ """
1744
+ Formatting function for scalar values.
1745
+
1746
+ This is used in the default '__repr__'. The returned formatting
1747
+ function receives instances of your scalar type.
1748
+
1749
+ Parameters
1750
+ ----------
1751
+ boxed : bool, default False
1752
+ An indicated for whether or not your array is being printed
1753
+ within a Series, DataFrame, or Index (True), or just by
1754
+ itself (False). This may be useful if you want scalar values
1755
+ to appear differently within a Series versus on its own (e.g.
1756
+ quoted or not).
1757
+
1758
+ Returns
1759
+ -------
1760
+ Callable[[Any], str]
1761
+ A callable that gets instances of the scalar type and
1762
+ returns a string. By default, :func:`repr` is used
1763
+ when ``boxed=False`` and :func:`str` is used when
1764
+ ``boxed=True``.
1765
+
1766
+ Examples
1767
+ --------
1768
+ >>> class MyExtensionArray(pd.arrays.NumpyExtensionArray):
1769
+ ... def _formatter(self, boxed=False):
1770
+ ... return lambda x: '*' + str(x) + '*' if boxed else repr(x) + '*'
1771
+ >>> MyExtensionArray(np.array([1, 2, 3, 4]))
1772
+ <MyExtensionArray>
1773
+ [1*, 2*, 3*, 4*]
1774
+ Length: 4, dtype: int64
1775
+ """
1776
+ if boxed:
1777
+ return str
1778
+ return repr
1779
+
1780
+ # ------------------------------------------------------------------------
1781
+ # Reshaping
1782
+ # ------------------------------------------------------------------------
1783
+
1784
+ def transpose(self, *axes: int) -> ExtensionArray:
1785
+ """
1786
+ Return a transposed view on this array.
1787
+
1788
+ Because ExtensionArrays are always 1D, this is a no-op. It is included
1789
+ for compatibility with np.ndarray.
1790
+
1791
+ Returns
1792
+ -------
1793
+ ExtensionArray
1794
+
1795
+ Examples
1796
+ --------
1797
+ >>> pd.array([1, 2, 3]).transpose()
1798
+ <IntegerArray>
1799
+ [1, 2, 3]
1800
+ Length: 3, dtype: Int64
1801
+ """
1802
+ return self[:]
1803
+
1804
+ @property
1805
+ def T(self) -> ExtensionArray:
1806
+ return self.transpose()
1807
+
1808
+ def ravel(self, order: Literal["C", "F", "A", "K"] | None = "C") -> ExtensionArray:
1809
+ """
1810
+ Return a flattened view on this array.
1811
+
1812
+ Parameters
1813
+ ----------
1814
+ order : {None, 'C', 'F', 'A', 'K'}, default 'C'
1815
+
1816
+ Returns
1817
+ -------
1818
+ ExtensionArray
1819
+
1820
+ Notes
1821
+ -----
1822
+ - Because ExtensionArrays are 1D-only, this is a no-op.
1823
+ - The "order" argument is ignored, is for compatibility with NumPy.
1824
+
1825
+ Examples
1826
+ --------
1827
+ >>> pd.array([1, 2, 3]).ravel()
1828
+ <IntegerArray>
1829
+ [1, 2, 3]
1830
+ Length: 3, dtype: Int64
1831
+ """
1832
+ return self
1833
+
1834
+ @classmethod
1835
+ def _concat_same_type(cls, to_concat: Sequence[Self]) -> Self:
1836
+ """
1837
+ Concatenate multiple array of this dtype.
1838
+
1839
+ Parameters
1840
+ ----------
1841
+ to_concat : sequence of this type
1842
+
1843
+ Returns
1844
+ -------
1845
+ ExtensionArray
1846
+
1847
+ Examples
1848
+ --------
1849
+ >>> arr1 = pd.array([1, 2, 3])
1850
+ >>> arr2 = pd.array([4, 5, 6])
1851
+ >>> pd.arrays.IntegerArray._concat_same_type([arr1, arr2])
1852
+ <IntegerArray>
1853
+ [1, 2, 3, 4, 5, 6]
1854
+ Length: 6, dtype: Int64
1855
+ """
1856
+ # Implementer note: this method will only be called with a sequence of
1857
+ # ExtensionArrays of this class and with the same dtype as self. This
1858
+ # should allow "easy" concatenation (no upcasting needed), and result
1859
+ # in a new ExtensionArray of the same dtype.
1860
+ # Note: this strict behaviour is only guaranteed starting with pandas 1.1
1861
+ raise AbstractMethodError(cls)
1862
+
1863
+ # The _can_hold_na attribute is set to True so that pandas internals
1864
+ # will use the ExtensionDtype.na_value as the NA value in operations
1865
+ # such as take(), reindex(), shift(), etc. In addition, those results
1866
+ # will then be of the ExtensionArray subclass rather than an array
1867
+ # of objects
1868
+ @cache_readonly
1869
+ def _can_hold_na(self) -> bool:
1870
+ return self.dtype._can_hold_na
1871
+
1872
+ def _accumulate(
1873
+ self, name: str, *, skipna: bool = True, **kwargs
1874
+ ) -> ExtensionArray:
1875
+ """
1876
+ Return an ExtensionArray performing an accumulation operation.
1877
+
1878
+ The underlying data type might change.
1879
+
1880
+ Parameters
1881
+ ----------
1882
+ name : str
1883
+ Name of the function, supported values are:
1884
+ - cummin
1885
+ - cummax
1886
+ - cumsum
1887
+ - cumprod
1888
+ skipna : bool, default True
1889
+ If True, skip NA values.
1890
+ **kwargs
1891
+ Additional keyword arguments passed to the accumulation function.
1892
+ Currently, there is no supported kwarg.
1893
+
1894
+ Returns
1895
+ -------
1896
+ array
1897
+
1898
+ Raises
1899
+ ------
1900
+ NotImplementedError : subclass does not define accumulations
1901
+
1902
+ Examples
1903
+ --------
1904
+ >>> arr = pd.array([1, 2, 3])
1905
+ >>> arr._accumulate(name='cumsum')
1906
+ <IntegerArray>
1907
+ [1, 3, 6]
1908
+ Length: 3, dtype: Int64
1909
+ """
1910
+ raise NotImplementedError(f"cannot perform {name} with type {self.dtype}")
1911
+
1912
+ def _reduce(
1913
+ self, name: str, *, skipna: bool = True, keepdims: bool = False, **kwargs
1914
+ ):
1915
+ """
1916
+ Return a scalar result of performing the reduction operation.
1917
+
1918
+ Parameters
1919
+ ----------
1920
+ name : str
1921
+ Name of the function, supported values are:
1922
+ { any, all, min, max, sum, mean, median, prod,
1923
+ std, var, sem, kurt, skew }.
1924
+ skipna : bool, default True
1925
+ If True, skip NaN values.
1926
+ keepdims : bool, default False
1927
+ If False, a scalar is returned.
1928
+ If True, the result has dimension with size one along the reduced axis.
1929
+
1930
+ .. versionadded:: 2.1
1931
+
1932
+ This parameter is not required in the _reduce signature to keep backward
1933
+ compatibility, but will become required in the future. If the parameter
1934
+ is not found in the method signature, a FutureWarning will be emitted.
1935
+ **kwargs
1936
+ Additional keyword arguments passed to the reduction function.
1937
+ Currently, `ddof` is the only supported kwarg.
1938
+
1939
+ Returns
1940
+ -------
1941
+ scalar
1942
+
1943
+ Raises
1944
+ ------
1945
+ TypeError : subclass does not define reductions
1946
+
1947
+ Examples
1948
+ --------
1949
+ >>> pd.array([1, 2, 3])._reduce("min")
1950
+ 1
1951
+ """
1952
+ meth = getattr(self, name, None)
1953
+ if meth is None:
1954
+ raise TypeError(
1955
+ f"'{type(self).__name__}' with dtype {self.dtype} "
1956
+ f"does not support reduction '{name}'"
1957
+ )
1958
+ result = meth(skipna=skipna, **kwargs)
1959
+ if keepdims:
1960
+ result = np.array([result])
1961
+
1962
+ return result
1963
+
1964
+ # https://github.com/python/typeshed/issues/2148#issuecomment-520783318
1965
+ # Incompatible types in assignment (expression has type "None", base class
1966
+ # "object" defined the type as "Callable[[object], int]")
1967
+ __hash__: ClassVar[None] # type: ignore[assignment]
1968
+
1969
+ # ------------------------------------------------------------------------
1970
+ # Non-Optimized Default Methods; in the case of the private methods here,
1971
+ # these are not guaranteed to be stable across pandas versions.
1972
+
1973
+ def _values_for_json(self) -> np.ndarray:
1974
+ """
1975
+ Specify how to render our entries in to_json.
1976
+
1977
+ Notes
1978
+ -----
1979
+ The dtype on the returned ndarray is not restricted, but for non-native
1980
+ types that are not specifically handled in objToJSON.c, to_json is
1981
+ liable to raise. In these cases, it may be safer to return an ndarray
1982
+ of strings.
1983
+ """
1984
+ return np.asarray(self)
1985
+
1986
+ def _hash_pandas_object(
1987
+ self, *, encoding: str, hash_key: str, categorize: bool
1988
+ ) -> npt.NDArray[np.uint64]:
1989
+ """
1990
+ Hook for hash_pandas_object.
1991
+
1992
+ Default is to use the values returned by _values_for_factorize.
1993
+
1994
+ Parameters
1995
+ ----------
1996
+ encoding : str
1997
+ Encoding for data & key when strings.
1998
+ hash_key : str
1999
+ Hash_key for string key to encode.
2000
+ categorize : bool
2001
+ Whether to first categorize object arrays before hashing. This is more
2002
+ efficient when the array contains duplicate values.
2003
+
2004
+ Returns
2005
+ -------
2006
+ np.ndarray[uint64]
2007
+
2008
+ Examples
2009
+ --------
2010
+ >>> pd.array([1, 2])._hash_pandas_object(encoding='utf-8',
2011
+ ... hash_key="1000000000000000",
2012
+ ... categorize=False
2013
+ ... )
2014
+ array([ 6238072747940578789, 15839785061582574730], dtype=uint64)
2015
+ """
2016
+ from pandas.core.util.hashing import hash_array
2017
+
2018
+ values, _ = self._values_for_factorize()
2019
+ return hash_array(
2020
+ values, encoding=encoding, hash_key=hash_key, categorize=categorize
2021
+ )
2022
+
2023
+ def _explode(self) -> tuple[Self, npt.NDArray[np.uint64]]:
2024
+ """
2025
+ Transform each element of list-like to a row.
2026
+
2027
+ For arrays that do not contain list-like elements the default
2028
+ implementation of this method just returns a copy and an array
2029
+ of ones (unchanged index).
2030
+
2031
+ Returns
2032
+ -------
2033
+ ExtensionArray
2034
+ Array with the exploded values.
2035
+ np.ndarray[uint64]
2036
+ The original lengths of each list-like for determining the
2037
+ resulting index.
2038
+
2039
+ See Also
2040
+ --------
2041
+ Series.explode : The method on the ``Series`` object that this
2042
+ extension array method is meant to support.
2043
+
2044
+ Examples
2045
+ --------
2046
+ >>> import pyarrow as pa
2047
+ >>> a = pd.array([[1, 2, 3], [4], [5, 6]],
2048
+ ... dtype=pd.ArrowDtype(pa.list_(pa.int64())))
2049
+ >>> a._explode()
2050
+ (<ArrowExtensionArray>
2051
+ [1, 2, 3, 4, 5, 6]
2052
+ Length: 6, dtype: int64[pyarrow], array([3, 1, 2], dtype=int32))
2053
+ """
2054
+ values = self.copy()
2055
+ counts = np.ones(shape=(len(self),), dtype=np.uint64)
2056
+ return values, counts
2057
+
2058
+ def tolist(self) -> list:
2059
+ """
2060
+ Return a list of the values.
2061
+
2062
+ These are each a scalar type, which is a Python scalar
2063
+ (for str, int, float) or a pandas scalar
2064
+ (for Timestamp/Timedelta/Interval/Period)
2065
+
2066
+ Returns
2067
+ -------
2068
+ list
2069
+
2070
+ Examples
2071
+ --------
2072
+ >>> arr = pd.array([1, 2, 3])
2073
+ >>> arr.tolist()
2074
+ [1, 2, 3]
2075
+ """
2076
+ if self.ndim > 1:
2077
+ return [x.tolist() for x in self]
2078
+ return list(self)
2079
+
2080
+ def delete(self, loc: PositionalIndexer) -> Self:
2081
+ indexer = np.delete(np.arange(len(self)), loc)
2082
+ return self.take(indexer)
2083
+
2084
+ def insert(self, loc: int, item) -> Self:
2085
+ """
2086
+ Insert an item at the given position.
2087
+
2088
+ Parameters
2089
+ ----------
2090
+ loc : int
2091
+ item : scalar-like
2092
+
2093
+ Returns
2094
+ -------
2095
+ same type as self
2096
+
2097
+ Notes
2098
+ -----
2099
+ This method should be both type and dtype-preserving. If the item
2100
+ cannot be held in an array of this type/dtype, either ValueError or
2101
+ TypeError should be raised.
2102
+
2103
+ The default implementation relies on _from_sequence to raise on invalid
2104
+ items.
2105
+
2106
+ Examples
2107
+ --------
2108
+ >>> arr = pd.array([1, 2, 3])
2109
+ >>> arr.insert(2, -1)
2110
+ <IntegerArray>
2111
+ [1, 2, -1, 3]
2112
+ Length: 4, dtype: Int64
2113
+ """
2114
+ loc = validate_insert_loc(loc, len(self))
2115
+
2116
+ item_arr = type(self)._from_sequence([item], dtype=self.dtype)
2117
+
2118
+ return type(self)._concat_same_type([self[:loc], item_arr, self[loc:]])
2119
+
2120
+ def _putmask(self, mask: npt.NDArray[np.bool_], value) -> None:
2121
+ """
2122
+ Analogue to np.putmask(self, mask, value)
2123
+
2124
+ Parameters
2125
+ ----------
2126
+ mask : np.ndarray[bool]
2127
+ value : scalar or listlike
2128
+ If listlike, must be arraylike with same length as self.
2129
+
2130
+ Returns
2131
+ -------
2132
+ None
2133
+
2134
+ Notes
2135
+ -----
2136
+ Unlike np.putmask, we do not repeat listlike values with mismatched length.
2137
+ 'value' should either be a scalar or an arraylike with the same length
2138
+ as self.
2139
+ """
2140
+ if is_list_like(value):
2141
+ val = value[mask]
2142
+ else:
2143
+ val = value
2144
+
2145
+ self[mask] = val
2146
+
2147
+ def _where(self, mask: npt.NDArray[np.bool_], value) -> Self:
2148
+ """
2149
+ Analogue to np.where(mask, self, value)
2150
+
2151
+ Parameters
2152
+ ----------
2153
+ mask : np.ndarray[bool]
2154
+ value : scalar or listlike
2155
+
2156
+ Returns
2157
+ -------
2158
+ same type as self
2159
+ """
2160
+ result = self.copy()
2161
+
2162
+ if is_list_like(value):
2163
+ val = value[~mask]
2164
+ else:
2165
+ val = value
2166
+
2167
+ result[~mask] = val
2168
+ return result
2169
+
2170
+ # TODO(3.0): this can be removed once GH#33302 deprecation is enforced
2171
+ def _fill_mask_inplace(
2172
+ self, method: str, limit: int | None, mask: npt.NDArray[np.bool_]
2173
+ ) -> None:
2174
+ """
2175
+ Replace values in locations specified by 'mask' using pad or backfill.
2176
+
2177
+ See also
2178
+ --------
2179
+ ExtensionArray.fillna
2180
+ """
2181
+ func = missing.get_fill_func(method)
2182
+ npvalues = self.astype(object)
2183
+ # NB: if we don't copy mask here, it may be altered inplace, which
2184
+ # would mess up the `self[mask] = ...` below.
2185
+ func(npvalues, limit=limit, mask=mask.copy())
2186
+ new_values = self._from_sequence(npvalues, dtype=self.dtype)
2187
+ self[mask] = new_values[mask]
2188
+
2189
+ def _rank(
2190
+ self,
2191
+ *,
2192
+ axis: AxisInt = 0,
2193
+ method: str = "average",
2194
+ na_option: str = "keep",
2195
+ ascending: bool = True,
2196
+ pct: bool = False,
2197
+ ):
2198
+ """
2199
+ See Series.rank.__doc__.
2200
+ """
2201
+ if axis != 0:
2202
+ raise NotImplementedError
2203
+
2204
+ return rank(
2205
+ self._values_for_argsort(),
2206
+ axis=axis,
2207
+ method=method,
2208
+ na_option=na_option,
2209
+ ascending=ascending,
2210
+ pct=pct,
2211
+ )
2212
+
2213
+ @classmethod
2214
+ def _empty(cls, shape: Shape, dtype: ExtensionDtype):
2215
+ """
2216
+ Create an ExtensionArray with the given shape and dtype.
2217
+
2218
+ See also
2219
+ --------
2220
+ ExtensionDtype.empty
2221
+ ExtensionDtype.empty is the 'official' public version of this API.
2222
+ """
2223
+ # Implementer note: while ExtensionDtype.empty is the public way to
2224
+ # call this method, it is still required to implement this `_empty`
2225
+ # method as well (it is called internally in pandas)
2226
+ obj = cls._from_sequence([], dtype=dtype)
2227
+
2228
+ taker = np.broadcast_to(np.intp(-1), shape)
2229
+ result = obj.take(taker, allow_fill=True)
2230
+ if not isinstance(result, cls) or dtype != result.dtype:
2231
+ raise NotImplementedError(
2232
+ f"Default 'empty' implementation is invalid for dtype='{dtype}'"
2233
+ )
2234
+ return result
2235
+
2236
+ def _quantile(self, qs: npt.NDArray[np.float64], interpolation: str) -> Self:
2237
+ """
2238
+ Compute the quantiles of self for each quantile in `qs`.
2239
+
2240
+ Parameters
2241
+ ----------
2242
+ qs : np.ndarray[float64]
2243
+ interpolation: str
2244
+
2245
+ Returns
2246
+ -------
2247
+ same type as self
2248
+ """
2249
+ mask = np.asarray(self.isna())
2250
+ arr = np.asarray(self)
2251
+ fill_value = np.nan
2252
+
2253
+ res_values = quantile_with_mask(arr, mask, fill_value, qs, interpolation)
2254
+ return type(self)._from_sequence(res_values)
2255
+
2256
+ def _mode(self, dropna: bool = True) -> Self:
2257
+ """
2258
+ Returns the mode(s) of the ExtensionArray.
2259
+
2260
+ Always returns `ExtensionArray` even if only one value.
2261
+
2262
+ Parameters
2263
+ ----------
2264
+ dropna : bool, default True
2265
+ Don't consider counts of NA values.
2266
+
2267
+ Returns
2268
+ -------
2269
+ same type as self
2270
+ Sorted, if possible.
2271
+ """
2272
+ # error: Incompatible return value type (got "Union[ExtensionArray,
2273
+ # ndarray[Any, Any]]", expected "Self")
2274
+ return mode(self, dropna=dropna) # type: ignore[return-value]
2275
+
2276
+ def __array_ufunc__(self, ufunc: np.ufunc, method: str, *inputs, **kwargs):
2277
+ if any(
2278
+ isinstance(other, (ABCSeries, ABCIndex, ABCDataFrame)) for other in inputs
2279
+ ):
2280
+ return NotImplemented
2281
+
2282
+ result = arraylike.maybe_dispatch_ufunc_to_dunder_op(
2283
+ self, ufunc, method, *inputs, **kwargs
2284
+ )
2285
+ if result is not NotImplemented:
2286
+ return result
2287
+
2288
+ if "out" in kwargs:
2289
+ return arraylike.dispatch_ufunc_with_out(
2290
+ self, ufunc, method, *inputs, **kwargs
2291
+ )
2292
+
2293
+ if method == "reduce":
2294
+ result = arraylike.dispatch_reduction_ufunc(
2295
+ self, ufunc, method, *inputs, **kwargs
2296
+ )
2297
+ if result is not NotImplemented:
2298
+ return result
2299
+
2300
+ return arraylike.default_array_ufunc(self, ufunc, method, *inputs, **kwargs)
2301
+
2302
+ def map(self, mapper, na_action=None):
2303
+ """
2304
+ Map values using an input mapping or function.
2305
+
2306
+ Parameters
2307
+ ----------
2308
+ mapper : function, dict, or Series
2309
+ Mapping correspondence.
2310
+ na_action : {None, 'ignore'}, default None
2311
+ If 'ignore', propagate NA values, without passing them to the
2312
+ mapping correspondence. If 'ignore' is not supported, a
2313
+ ``NotImplementedError`` should be raised.
2314
+
2315
+ Returns
2316
+ -------
2317
+ Union[ndarray, Index, ExtensionArray]
2318
+ The output of the mapping function applied to the array.
2319
+ If the function returns a tuple with more than one element
2320
+ a MultiIndex will be returned.
2321
+ """
2322
+ return map_array(self, mapper, na_action=na_action)
2323
+
2324
+ # ------------------------------------------------------------------------
2325
+ # GroupBy Methods
2326
+
2327
+ def _groupby_op(
2328
+ self,
2329
+ *,
2330
+ how: str,
2331
+ has_dropped_na: bool,
2332
+ min_count: int,
2333
+ ngroups: int,
2334
+ ids: npt.NDArray[np.intp],
2335
+ **kwargs,
2336
+ ) -> ArrayLike:
2337
+ """
2338
+ Dispatch GroupBy reduction or transformation operation.
2339
+
2340
+ This is an *experimental* API to allow ExtensionArray authors to implement
2341
+ reductions and transformations. The API is subject to change.
2342
+
2343
+ Parameters
2344
+ ----------
2345
+ how : {'any', 'all', 'sum', 'prod', 'min', 'max', 'mean', 'median',
2346
+ 'median', 'var', 'std', 'sem', 'nth', 'last', 'ohlc',
2347
+ 'cumprod', 'cumsum', 'cummin', 'cummax', 'rank'}
2348
+ has_dropped_na : bool
2349
+ min_count : int
2350
+ ngroups : int
2351
+ ids : np.ndarray[np.intp]
2352
+ ids[i] gives the integer label for the group that self[i] belongs to.
2353
+ **kwargs : operation-specific
2354
+ 'any', 'all' -> ['skipna']
2355
+ 'var', 'std', 'sem' -> ['ddof']
2356
+ 'cumprod', 'cumsum', 'cummin', 'cummax' -> ['skipna']
2357
+ 'rank' -> ['ties_method', 'ascending', 'na_option', 'pct']
2358
+
2359
+ Returns
2360
+ -------
2361
+ np.ndarray or ExtensionArray
2362
+ """
2363
+ from pandas.core.arrays.string_ import StringDtype
2364
+ from pandas.core.groupby.ops import WrappedCythonOp
2365
+
2366
+ kind = WrappedCythonOp.get_kind_from_how(how)
2367
+ op = WrappedCythonOp(how=how, kind=kind, has_dropped_na=has_dropped_na)
2368
+
2369
+ # GH#43682
2370
+ if isinstance(self.dtype, StringDtype):
2371
+ # StringArray
2372
+ if op.how not in ["any", "all"]:
2373
+ # Fail early to avoid conversion to object
2374
+ op._get_cython_function(op.kind, op.how, np.dtype(object), False)
2375
+ npvalues = self.to_numpy(object, na_value=np.nan)
2376
+ else:
2377
+ raise NotImplementedError(
2378
+ f"function is not implemented for this dtype: {self.dtype}"
2379
+ )
2380
+
2381
+ res_values = op._cython_op_ndim_compat(
2382
+ npvalues,
2383
+ min_count=min_count,
2384
+ ngroups=ngroups,
2385
+ comp_ids=ids,
2386
+ mask=None,
2387
+ **kwargs,
2388
+ )
2389
+
2390
+ if op.how in op.cast_blocklist:
2391
+ # i.e. how in ["rank"], since other cast_blocklist methods don't go
2392
+ # through cython_operation
2393
+ return res_values
2394
+
2395
+ if isinstance(self.dtype, StringDtype):
2396
+ dtype = self.dtype
2397
+ string_array_cls = dtype.construct_array_type()
2398
+ return string_array_cls._from_sequence(res_values, dtype=dtype)
2399
+
2400
+ else:
2401
+ raise NotImplementedError
2402
+
2403
+
2404
+ class ExtensionArraySupportsAnyAll(ExtensionArray):
2405
+ def any(self, *, skipna: bool = True) -> bool:
2406
+ raise AbstractMethodError(self)
2407
+
2408
+ def all(self, *, skipna: bool = True) -> bool:
2409
+ raise AbstractMethodError(self)
2410
+
2411
+
2412
+ class ExtensionOpsMixin:
2413
+ """
2414
+ A base class for linking the operators to their dunder names.
2415
+
2416
+ .. note::
2417
+
2418
+ You may want to set ``__array_priority__`` if you want your
2419
+ implementation to be called when involved in binary operations
2420
+ with NumPy arrays.
2421
+ """
2422
+
2423
+ @classmethod
2424
+ def _create_arithmetic_method(cls, op):
2425
+ raise AbstractMethodError(cls)
2426
+
2427
+ @classmethod
2428
+ def _add_arithmetic_ops(cls) -> None:
2429
+ setattr(cls, "__add__", cls._create_arithmetic_method(operator.add))
2430
+ setattr(cls, "__radd__", cls._create_arithmetic_method(roperator.radd))
2431
+ setattr(cls, "__sub__", cls._create_arithmetic_method(operator.sub))
2432
+ setattr(cls, "__rsub__", cls._create_arithmetic_method(roperator.rsub))
2433
+ setattr(cls, "__mul__", cls._create_arithmetic_method(operator.mul))
2434
+ setattr(cls, "__rmul__", cls._create_arithmetic_method(roperator.rmul))
2435
+ setattr(cls, "__pow__", cls._create_arithmetic_method(operator.pow))
2436
+ setattr(cls, "__rpow__", cls._create_arithmetic_method(roperator.rpow))
2437
+ setattr(cls, "__mod__", cls._create_arithmetic_method(operator.mod))
2438
+ setattr(cls, "__rmod__", cls._create_arithmetic_method(roperator.rmod))
2439
+ setattr(cls, "__floordiv__", cls._create_arithmetic_method(operator.floordiv))
2440
+ setattr(
2441
+ cls, "__rfloordiv__", cls._create_arithmetic_method(roperator.rfloordiv)
2442
+ )
2443
+ setattr(cls, "__truediv__", cls._create_arithmetic_method(operator.truediv))
2444
+ setattr(cls, "__rtruediv__", cls._create_arithmetic_method(roperator.rtruediv))
2445
+ setattr(cls, "__divmod__", cls._create_arithmetic_method(divmod))
2446
+ setattr(cls, "__rdivmod__", cls._create_arithmetic_method(roperator.rdivmod))
2447
+
2448
+ @classmethod
2449
+ def _create_comparison_method(cls, op):
2450
+ raise AbstractMethodError(cls)
2451
+
2452
+ @classmethod
2453
+ def _add_comparison_ops(cls) -> None:
2454
+ setattr(cls, "__eq__", cls._create_comparison_method(operator.eq))
2455
+ setattr(cls, "__ne__", cls._create_comparison_method(operator.ne))
2456
+ setattr(cls, "__lt__", cls._create_comparison_method(operator.lt))
2457
+ setattr(cls, "__gt__", cls._create_comparison_method(operator.gt))
2458
+ setattr(cls, "__le__", cls._create_comparison_method(operator.le))
2459
+ setattr(cls, "__ge__", cls._create_comparison_method(operator.ge))
2460
+
2461
+ @classmethod
2462
+ def _create_logical_method(cls, op):
2463
+ raise AbstractMethodError(cls)
2464
+
2465
+ @classmethod
2466
+ def _add_logical_ops(cls) -> None:
2467
+ setattr(cls, "__and__", cls._create_logical_method(operator.and_))
2468
+ setattr(cls, "__rand__", cls._create_logical_method(roperator.rand_))
2469
+ setattr(cls, "__or__", cls._create_logical_method(operator.or_))
2470
+ setattr(cls, "__ror__", cls._create_logical_method(roperator.ror_))
2471
+ setattr(cls, "__xor__", cls._create_logical_method(operator.xor))
2472
+ setattr(cls, "__rxor__", cls._create_logical_method(roperator.rxor))
2473
+
2474
+
2475
+ class ExtensionScalarOpsMixin(ExtensionOpsMixin):
2476
+ """
2477
+ A mixin for defining ops on an ExtensionArray.
2478
+
2479
+ It is assumed that the underlying scalar objects have the operators
2480
+ already defined.
2481
+
2482
+ Notes
2483
+ -----
2484
+ If you have defined a subclass MyExtensionArray(ExtensionArray), then
2485
+ use MyExtensionArray(ExtensionArray, ExtensionScalarOpsMixin) to
2486
+ get the arithmetic operators. After the definition of MyExtensionArray,
2487
+ insert the lines
2488
+
2489
+ MyExtensionArray._add_arithmetic_ops()
2490
+ MyExtensionArray._add_comparison_ops()
2491
+
2492
+ to link the operators to your class.
2493
+
2494
+ .. note::
2495
+
2496
+ You may want to set ``__array_priority__`` if you want your
2497
+ implementation to be called when involved in binary operations
2498
+ with NumPy arrays.
2499
+ """
2500
+
2501
+ @classmethod
2502
+ def _create_method(cls, op, coerce_to_dtype: bool = True, result_dtype=None):
2503
+ """
2504
+ A class method that returns a method that will correspond to an
2505
+ operator for an ExtensionArray subclass, by dispatching to the
2506
+ relevant operator defined on the individual elements of the
2507
+ ExtensionArray.
2508
+
2509
+ Parameters
2510
+ ----------
2511
+ op : function
2512
+ An operator that takes arguments op(a, b)
2513
+ coerce_to_dtype : bool, default True
2514
+ boolean indicating whether to attempt to convert
2515
+ the result to the underlying ExtensionArray dtype.
2516
+ If it's not possible to create a new ExtensionArray with the
2517
+ values, an ndarray is returned instead.
2518
+
2519
+ Returns
2520
+ -------
2521
+ Callable[[Any, Any], Union[ndarray, ExtensionArray]]
2522
+ A method that can be bound to a class. When used, the method
2523
+ receives the two arguments, one of which is the instance of
2524
+ this class, and should return an ExtensionArray or an ndarray.
2525
+
2526
+ Returning an ndarray may be necessary when the result of the
2527
+ `op` cannot be stored in the ExtensionArray. The dtype of the
2528
+ ndarray uses NumPy's normal inference rules.
2529
+
2530
+ Examples
2531
+ --------
2532
+ Given an ExtensionArray subclass called MyExtensionArray, use
2533
+
2534
+ __add__ = cls._create_method(operator.add)
2535
+
2536
+ in the class definition of MyExtensionArray to create the operator
2537
+ for addition, that will be based on the operator implementation
2538
+ of the underlying elements of the ExtensionArray
2539
+ """
2540
+
2541
+ def _binop(self, other):
2542
+ def convert_values(param):
2543
+ if isinstance(param, ExtensionArray) or is_list_like(param):
2544
+ ovalues = param
2545
+ else: # Assume its an object
2546
+ ovalues = [param] * len(self)
2547
+ return ovalues
2548
+
2549
+ if isinstance(other, (ABCSeries, ABCIndex, ABCDataFrame)):
2550
+ # rely on pandas to unbox and dispatch to us
2551
+ return NotImplemented
2552
+
2553
+ lvalues = self
2554
+ rvalues = convert_values(other)
2555
+
2556
+ # If the operator is not defined for the underlying objects,
2557
+ # a TypeError should be raised
2558
+ res = [op(a, b) for (a, b) in zip(lvalues, rvalues)]
2559
+
2560
+ def _maybe_convert(arr):
2561
+ if coerce_to_dtype:
2562
+ # https://github.com/pandas-dev/pandas/issues/22850
2563
+ # We catch all regular exceptions here, and fall back
2564
+ # to an ndarray.
2565
+ res = maybe_cast_pointwise_result(arr, self.dtype, same_dtype=False)
2566
+ if not isinstance(res, type(self)):
2567
+ # exception raised in _from_sequence; ensure we have ndarray
2568
+ res = np.asarray(arr)
2569
+ else:
2570
+ res = np.asarray(arr, dtype=result_dtype)
2571
+ return res
2572
+
2573
+ if op.__name__ in {"divmod", "rdivmod"}:
2574
+ a, b = zip(*res)
2575
+ return _maybe_convert(a), _maybe_convert(b)
2576
+
2577
+ return _maybe_convert(res)
2578
+
2579
+ op_name = f"__{op.__name__}__"
2580
+ return set_function_name(_binop, op_name, cls)
2581
+
2582
+ @classmethod
2583
+ def _create_arithmetic_method(cls, op):
2584
+ return cls._create_method(op)
2585
+
2586
+ @classmethod
2587
+ def _create_comparison_method(cls, op):
2588
+ return cls._create_method(op, coerce_to_dtype=False, result_dtype=bool)
venv/lib/python3.10/site-packages/pandas/core/arrays/categorical.py ADDED
The diff for this file is too large to render. See raw diff
 
venv/lib/python3.10/site-packages/pandas/core/arrays/datetimelike.py ADDED
@@ -0,0 +1,2556 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from __future__ import annotations
2
+
3
+ from datetime import (
4
+ datetime,
5
+ timedelta,
6
+ )
7
+ from functools import wraps
8
+ import operator
9
+ from typing import (
10
+ TYPE_CHECKING,
11
+ Any,
12
+ Callable,
13
+ Literal,
14
+ Union,
15
+ cast,
16
+ final,
17
+ overload,
18
+ )
19
+ import warnings
20
+
21
+ import numpy as np
22
+
23
+ from pandas._libs import (
24
+ algos,
25
+ lib,
26
+ )
27
+ from pandas._libs.arrays import NDArrayBacked
28
+ from pandas._libs.tslibs import (
29
+ BaseOffset,
30
+ IncompatibleFrequency,
31
+ NaT,
32
+ NaTType,
33
+ Period,
34
+ Resolution,
35
+ Tick,
36
+ Timedelta,
37
+ Timestamp,
38
+ add_overflowsafe,
39
+ astype_overflowsafe,
40
+ get_unit_from_dtype,
41
+ iNaT,
42
+ ints_to_pydatetime,
43
+ ints_to_pytimedelta,
44
+ periods_per_day,
45
+ to_offset,
46
+ )
47
+ from pandas._libs.tslibs.fields import (
48
+ RoundTo,
49
+ round_nsint64,
50
+ )
51
+ from pandas._libs.tslibs.np_datetime import compare_mismatched_resolutions
52
+ from pandas._libs.tslibs.timedeltas import get_unit_for_round
53
+ from pandas._libs.tslibs.timestamps import integer_op_not_supported
54
+ from pandas._typing import (
55
+ ArrayLike,
56
+ AxisInt,
57
+ DatetimeLikeScalar,
58
+ Dtype,
59
+ DtypeObj,
60
+ F,
61
+ InterpolateOptions,
62
+ NpDtype,
63
+ PositionalIndexer2D,
64
+ PositionalIndexerTuple,
65
+ ScalarIndexer,
66
+ Self,
67
+ SequenceIndexer,
68
+ TimeAmbiguous,
69
+ TimeNonexistent,
70
+ npt,
71
+ )
72
+ from pandas.compat.numpy import function as nv
73
+ from pandas.errors import (
74
+ AbstractMethodError,
75
+ InvalidComparison,
76
+ PerformanceWarning,
77
+ )
78
+ from pandas.util._decorators import (
79
+ Appender,
80
+ Substitution,
81
+ cache_readonly,
82
+ )
83
+ from pandas.util._exceptions import find_stack_level
84
+
85
+ from pandas.core.dtypes.cast import construct_1d_object_array_from_listlike
86
+ from pandas.core.dtypes.common import (
87
+ is_all_strings,
88
+ is_integer_dtype,
89
+ is_list_like,
90
+ is_object_dtype,
91
+ is_string_dtype,
92
+ pandas_dtype,
93
+ )
94
+ from pandas.core.dtypes.dtypes import (
95
+ ArrowDtype,
96
+ CategoricalDtype,
97
+ DatetimeTZDtype,
98
+ ExtensionDtype,
99
+ PeriodDtype,
100
+ )
101
+ from pandas.core.dtypes.generic import (
102
+ ABCCategorical,
103
+ ABCMultiIndex,
104
+ )
105
+ from pandas.core.dtypes.missing import (
106
+ is_valid_na_for_dtype,
107
+ isna,
108
+ )
109
+
110
+ from pandas.core import (
111
+ algorithms,
112
+ missing,
113
+ nanops,
114
+ ops,
115
+ )
116
+ from pandas.core.algorithms import (
117
+ isin,
118
+ map_array,
119
+ unique1d,
120
+ )
121
+ from pandas.core.array_algos import datetimelike_accumulations
122
+ from pandas.core.arraylike import OpsMixin
123
+ from pandas.core.arrays._mixins import (
124
+ NDArrayBackedExtensionArray,
125
+ ravel_compat,
126
+ )
127
+ from pandas.core.arrays.arrow.array import ArrowExtensionArray
128
+ from pandas.core.arrays.base import ExtensionArray
129
+ from pandas.core.arrays.integer import IntegerArray
130
+ import pandas.core.common as com
131
+ from pandas.core.construction import (
132
+ array as pd_array,
133
+ ensure_wrapped_if_datetimelike,
134
+ extract_array,
135
+ )
136
+ from pandas.core.indexers import (
137
+ check_array_indexer,
138
+ check_setitem_lengths,
139
+ )
140
+ from pandas.core.ops.common import unpack_zerodim_and_defer
141
+ from pandas.core.ops.invalid import (
142
+ invalid_comparison,
143
+ make_invalid_op,
144
+ )
145
+
146
+ from pandas.tseries import frequencies
147
+
148
+ if TYPE_CHECKING:
149
+ from collections.abc import (
150
+ Iterator,
151
+ Sequence,
152
+ )
153
+
154
+ from pandas import Index
155
+ from pandas.core.arrays import (
156
+ DatetimeArray,
157
+ PeriodArray,
158
+ TimedeltaArray,
159
+ )
160
+
161
+ DTScalarOrNaT = Union[DatetimeLikeScalar, NaTType]
162
+
163
+
164
+ def _make_unpacked_invalid_op(op_name: str):
165
+ op = make_invalid_op(op_name)
166
+ return unpack_zerodim_and_defer(op_name)(op)
167
+
168
+
169
+ def _period_dispatch(meth: F) -> F:
170
+ """
171
+ For PeriodArray methods, dispatch to DatetimeArray and re-wrap the results
172
+ in PeriodArray. We cannot use ._ndarray directly for the affected
173
+ methods because the i8 data has different semantics on NaT values.
174
+ """
175
+
176
+ @wraps(meth)
177
+ def new_meth(self, *args, **kwargs):
178
+ if not isinstance(self.dtype, PeriodDtype):
179
+ return meth(self, *args, **kwargs)
180
+
181
+ arr = self.view("M8[ns]")
182
+ result = meth(arr, *args, **kwargs)
183
+ if result is NaT:
184
+ return NaT
185
+ elif isinstance(result, Timestamp):
186
+ return self._box_func(result._value)
187
+
188
+ res_i8 = result.view("i8")
189
+ return self._from_backing_data(res_i8)
190
+
191
+ return cast(F, new_meth)
192
+
193
+
194
+ # error: Definition of "_concat_same_type" in base class "NDArrayBacked" is
195
+ # incompatible with definition in base class "ExtensionArray"
196
+ class DatetimeLikeArrayMixin( # type: ignore[misc]
197
+ OpsMixin, NDArrayBackedExtensionArray
198
+ ):
199
+ """
200
+ Shared Base/Mixin class for DatetimeArray, TimedeltaArray, PeriodArray
201
+
202
+ Assumes that __new__/__init__ defines:
203
+ _ndarray
204
+
205
+ and that inheriting subclass implements:
206
+ freq
207
+ """
208
+
209
+ # _infer_matches -> which infer_dtype strings are close enough to our own
210
+ _infer_matches: tuple[str, ...]
211
+ _is_recognized_dtype: Callable[[DtypeObj], bool]
212
+ _recognized_scalars: tuple[type, ...]
213
+ _ndarray: np.ndarray
214
+ freq: BaseOffset | None
215
+
216
+ @cache_readonly
217
+ def _can_hold_na(self) -> bool:
218
+ return True
219
+
220
+ def __init__(
221
+ self, data, dtype: Dtype | None = None, freq=None, copy: bool = False
222
+ ) -> None:
223
+ raise AbstractMethodError(self)
224
+
225
+ @property
226
+ def _scalar_type(self) -> type[DatetimeLikeScalar]:
227
+ """
228
+ The scalar associated with this datelike
229
+
230
+ * PeriodArray : Period
231
+ * DatetimeArray : Timestamp
232
+ * TimedeltaArray : Timedelta
233
+ """
234
+ raise AbstractMethodError(self)
235
+
236
+ def _scalar_from_string(self, value: str) -> DTScalarOrNaT:
237
+ """
238
+ Construct a scalar type from a string.
239
+
240
+ Parameters
241
+ ----------
242
+ value : str
243
+
244
+ Returns
245
+ -------
246
+ Period, Timestamp, or Timedelta, or NaT
247
+ Whatever the type of ``self._scalar_type`` is.
248
+
249
+ Notes
250
+ -----
251
+ This should call ``self._check_compatible_with`` before
252
+ unboxing the result.
253
+ """
254
+ raise AbstractMethodError(self)
255
+
256
+ def _unbox_scalar(
257
+ self, value: DTScalarOrNaT
258
+ ) -> np.int64 | np.datetime64 | np.timedelta64:
259
+ """
260
+ Unbox the integer value of a scalar `value`.
261
+
262
+ Parameters
263
+ ----------
264
+ value : Period, Timestamp, Timedelta, or NaT
265
+ Depending on subclass.
266
+
267
+ Returns
268
+ -------
269
+ int
270
+
271
+ Examples
272
+ --------
273
+ >>> arr = pd.array(np.array(['1970-01-01'], 'datetime64[ns]'))
274
+ >>> arr._unbox_scalar(arr[0])
275
+ numpy.datetime64('1970-01-01T00:00:00.000000000')
276
+ """
277
+ raise AbstractMethodError(self)
278
+
279
+ def _check_compatible_with(self, other: DTScalarOrNaT) -> None:
280
+ """
281
+ Verify that `self` and `other` are compatible.
282
+
283
+ * DatetimeArray verifies that the timezones (if any) match
284
+ * PeriodArray verifies that the freq matches
285
+ * Timedelta has no verification
286
+
287
+ In each case, NaT is considered compatible.
288
+
289
+ Parameters
290
+ ----------
291
+ other
292
+
293
+ Raises
294
+ ------
295
+ Exception
296
+ """
297
+ raise AbstractMethodError(self)
298
+
299
+ # ------------------------------------------------------------------
300
+
301
+ def _box_func(self, x):
302
+ """
303
+ box function to get object from internal representation
304
+ """
305
+ raise AbstractMethodError(self)
306
+
307
+ def _box_values(self, values) -> np.ndarray:
308
+ """
309
+ apply box func to passed values
310
+ """
311
+ return lib.map_infer(values, self._box_func, convert=False)
312
+
313
+ def __iter__(self) -> Iterator:
314
+ if self.ndim > 1:
315
+ return (self[n] for n in range(len(self)))
316
+ else:
317
+ return (self._box_func(v) for v in self.asi8)
318
+
319
+ @property
320
+ def asi8(self) -> npt.NDArray[np.int64]:
321
+ """
322
+ Integer representation of the values.
323
+
324
+ Returns
325
+ -------
326
+ ndarray
327
+ An ndarray with int64 dtype.
328
+ """
329
+ # do not cache or you'll create a memory leak
330
+ return self._ndarray.view("i8")
331
+
332
+ # ----------------------------------------------------------------
333
+ # Rendering Methods
334
+
335
+ def _format_native_types(
336
+ self, *, na_rep: str | float = "NaT", date_format=None
337
+ ) -> npt.NDArray[np.object_]:
338
+ """
339
+ Helper method for astype when converting to strings.
340
+
341
+ Returns
342
+ -------
343
+ ndarray[str]
344
+ """
345
+ raise AbstractMethodError(self)
346
+
347
+ def _formatter(self, boxed: bool = False):
348
+ # TODO: Remove Datetime & DatetimeTZ formatters.
349
+ return "'{}'".format
350
+
351
+ # ----------------------------------------------------------------
352
+ # Array-Like / EA-Interface Methods
353
+
354
+ def __array__(
355
+ self, dtype: NpDtype | None = None, copy: bool | None = None
356
+ ) -> np.ndarray:
357
+ # used for Timedelta/DatetimeArray, overwritten by PeriodArray
358
+ if is_object_dtype(dtype):
359
+ return np.array(list(self), dtype=object)
360
+ return self._ndarray
361
+
362
+ @overload
363
+ def __getitem__(self, item: ScalarIndexer) -> DTScalarOrNaT:
364
+ ...
365
+
366
+ @overload
367
+ def __getitem__(
368
+ self,
369
+ item: SequenceIndexer | PositionalIndexerTuple,
370
+ ) -> Self:
371
+ ...
372
+
373
+ def __getitem__(self, key: PositionalIndexer2D) -> Self | DTScalarOrNaT:
374
+ """
375
+ This getitem defers to the underlying array, which by-definition can
376
+ only handle list-likes, slices, and integer scalars
377
+ """
378
+ # Use cast as we know we will get back a DatetimeLikeArray or DTScalar,
379
+ # but skip evaluating the Union at runtime for performance
380
+ # (see https://github.com/pandas-dev/pandas/pull/44624)
381
+ result = cast("Union[Self, DTScalarOrNaT]", super().__getitem__(key))
382
+ if lib.is_scalar(result):
383
+ return result
384
+ else:
385
+ # At this point we know the result is an array.
386
+ result = cast(Self, result)
387
+ result._freq = self._get_getitem_freq(key)
388
+ return result
389
+
390
+ def _get_getitem_freq(self, key) -> BaseOffset | None:
391
+ """
392
+ Find the `freq` attribute to assign to the result of a __getitem__ lookup.
393
+ """
394
+ is_period = isinstance(self.dtype, PeriodDtype)
395
+ if is_period:
396
+ freq = self.freq
397
+ elif self.ndim != 1:
398
+ freq = None
399
+ else:
400
+ key = check_array_indexer(self, key) # maybe ndarray[bool] -> slice
401
+ freq = None
402
+ if isinstance(key, slice):
403
+ if self.freq is not None and key.step is not None:
404
+ freq = key.step * self.freq
405
+ else:
406
+ freq = self.freq
407
+ elif key is Ellipsis:
408
+ # GH#21282 indexing with Ellipsis is similar to a full slice,
409
+ # should preserve `freq` attribute
410
+ freq = self.freq
411
+ elif com.is_bool_indexer(key):
412
+ new_key = lib.maybe_booleans_to_slice(key.view(np.uint8))
413
+ if isinstance(new_key, slice):
414
+ return self._get_getitem_freq(new_key)
415
+ return freq
416
+
417
+ # error: Argument 1 of "__setitem__" is incompatible with supertype
418
+ # "ExtensionArray"; supertype defines the argument type as "Union[int,
419
+ # ndarray]"
420
+ def __setitem__(
421
+ self,
422
+ key: int | Sequence[int] | Sequence[bool] | slice,
423
+ value: NaTType | Any | Sequence[Any],
424
+ ) -> None:
425
+ # I'm fudging the types a bit here. "Any" above really depends
426
+ # on type(self). For PeriodArray, it's Period (or stuff coercible
427
+ # to a period in from_sequence). For DatetimeArray, it's Timestamp...
428
+ # I don't know if mypy can do that, possibly with Generics.
429
+ # https://mypy.readthedocs.io/en/latest/generics.html
430
+
431
+ no_op = check_setitem_lengths(key, value, self)
432
+
433
+ # Calling super() before the no_op short-circuit means that we raise
434
+ # on invalid 'value' even if this is a no-op, e.g. wrong-dtype empty array.
435
+ super().__setitem__(key, value)
436
+
437
+ if no_op:
438
+ return
439
+
440
+ self._maybe_clear_freq()
441
+
442
+ def _maybe_clear_freq(self) -> None:
443
+ # inplace operations like __setitem__ may invalidate the freq of
444
+ # DatetimeArray and TimedeltaArray
445
+ pass
446
+
447
+ def astype(self, dtype, copy: bool = True):
448
+ # Some notes on cases we don't have to handle here in the base class:
449
+ # 1. PeriodArray.astype handles period -> period
450
+ # 2. DatetimeArray.astype handles conversion between tz.
451
+ # 3. DatetimeArray.astype handles datetime -> period
452
+ dtype = pandas_dtype(dtype)
453
+
454
+ if dtype == object:
455
+ if self.dtype.kind == "M":
456
+ self = cast("DatetimeArray", self)
457
+ # *much* faster than self._box_values
458
+ # for e.g. test_get_loc_tuple_monotonic_above_size_cutoff
459
+ i8data = self.asi8
460
+ converted = ints_to_pydatetime(
461
+ i8data,
462
+ tz=self.tz,
463
+ box="timestamp",
464
+ reso=self._creso,
465
+ )
466
+ return converted
467
+
468
+ elif self.dtype.kind == "m":
469
+ return ints_to_pytimedelta(self._ndarray, box=True)
470
+
471
+ return self._box_values(self.asi8.ravel()).reshape(self.shape)
472
+
473
+ elif isinstance(dtype, ExtensionDtype):
474
+ return super().astype(dtype, copy=copy)
475
+ elif is_string_dtype(dtype):
476
+ return self._format_native_types()
477
+ elif dtype.kind in "iu":
478
+ # we deliberately ignore int32 vs. int64 here.
479
+ # See https://github.com/pandas-dev/pandas/issues/24381 for more.
480
+ values = self.asi8
481
+ if dtype != np.int64:
482
+ raise TypeError(
483
+ f"Converting from {self.dtype} to {dtype} is not supported. "
484
+ "Do obj.astype('int64').astype(dtype) instead"
485
+ )
486
+
487
+ if copy:
488
+ values = values.copy()
489
+ return values
490
+ elif (dtype.kind in "mM" and self.dtype != dtype) or dtype.kind == "f":
491
+ # disallow conversion between datetime/timedelta,
492
+ # and conversions for any datetimelike to float
493
+ msg = f"Cannot cast {type(self).__name__} to dtype {dtype}"
494
+ raise TypeError(msg)
495
+ else:
496
+ return np.asarray(self, dtype=dtype)
497
+
498
+ @overload
499
+ def view(self) -> Self:
500
+ ...
501
+
502
+ @overload
503
+ def view(self, dtype: Literal["M8[ns]"]) -> DatetimeArray:
504
+ ...
505
+
506
+ @overload
507
+ def view(self, dtype: Literal["m8[ns]"]) -> TimedeltaArray:
508
+ ...
509
+
510
+ @overload
511
+ def view(self, dtype: Dtype | None = ...) -> ArrayLike:
512
+ ...
513
+
514
+ # pylint: disable-next=useless-parent-delegation
515
+ def view(self, dtype: Dtype | None = None) -> ArrayLike:
516
+ # we need to explicitly call super() method as long as the `@overload`s
517
+ # are present in this file.
518
+ return super().view(dtype)
519
+
520
+ # ------------------------------------------------------------------
521
+ # Validation Methods
522
+ # TODO: try to de-duplicate these, ensure identical behavior
523
+
524
+ def _validate_comparison_value(self, other):
525
+ if isinstance(other, str):
526
+ try:
527
+ # GH#18435 strings get a pass from tzawareness compat
528
+ other = self._scalar_from_string(other)
529
+ except (ValueError, IncompatibleFrequency):
530
+ # failed to parse as Timestamp/Timedelta/Period
531
+ raise InvalidComparison(other)
532
+
533
+ if isinstance(other, self._recognized_scalars) or other is NaT:
534
+ other = self._scalar_type(other)
535
+ try:
536
+ self._check_compatible_with(other)
537
+ except (TypeError, IncompatibleFrequency) as err:
538
+ # e.g. tzawareness mismatch
539
+ raise InvalidComparison(other) from err
540
+
541
+ elif not is_list_like(other):
542
+ raise InvalidComparison(other)
543
+
544
+ elif len(other) != len(self):
545
+ raise ValueError("Lengths must match")
546
+
547
+ else:
548
+ try:
549
+ other = self._validate_listlike(other, allow_object=True)
550
+ self._check_compatible_with(other)
551
+ except (TypeError, IncompatibleFrequency) as err:
552
+ if is_object_dtype(getattr(other, "dtype", None)):
553
+ # We will have to operate element-wise
554
+ pass
555
+ else:
556
+ raise InvalidComparison(other) from err
557
+
558
+ return other
559
+
560
+ def _validate_scalar(
561
+ self,
562
+ value,
563
+ *,
564
+ allow_listlike: bool = False,
565
+ unbox: bool = True,
566
+ ):
567
+ """
568
+ Validate that the input value can be cast to our scalar_type.
569
+
570
+ Parameters
571
+ ----------
572
+ value : object
573
+ allow_listlike: bool, default False
574
+ When raising an exception, whether the message should say
575
+ listlike inputs are allowed.
576
+ unbox : bool, default True
577
+ Whether to unbox the result before returning. Note: unbox=False
578
+ skips the setitem compatibility check.
579
+
580
+ Returns
581
+ -------
582
+ self._scalar_type or NaT
583
+ """
584
+ if isinstance(value, self._scalar_type):
585
+ pass
586
+
587
+ elif isinstance(value, str):
588
+ # NB: Careful about tzawareness
589
+ try:
590
+ value = self._scalar_from_string(value)
591
+ except ValueError as err:
592
+ msg = self._validation_error_message(value, allow_listlike)
593
+ raise TypeError(msg) from err
594
+
595
+ elif is_valid_na_for_dtype(value, self.dtype):
596
+ # GH#18295
597
+ value = NaT
598
+
599
+ elif isna(value):
600
+ # if we are dt64tz and value is dt64("NaT"), dont cast to NaT,
601
+ # or else we'll fail to raise in _unbox_scalar
602
+ msg = self._validation_error_message(value, allow_listlike)
603
+ raise TypeError(msg)
604
+
605
+ elif isinstance(value, self._recognized_scalars):
606
+ # error: Argument 1 to "Timestamp" has incompatible type "object"; expected
607
+ # "integer[Any] | float | str | date | datetime | datetime64"
608
+ value = self._scalar_type(value) # type: ignore[arg-type]
609
+
610
+ else:
611
+ msg = self._validation_error_message(value, allow_listlike)
612
+ raise TypeError(msg)
613
+
614
+ if not unbox:
615
+ # NB: In general NDArrayBackedExtensionArray will unbox here;
616
+ # this option exists to prevent a performance hit in
617
+ # TimedeltaIndex.get_loc
618
+ return value
619
+ return self._unbox_scalar(value)
620
+
621
+ def _validation_error_message(self, value, allow_listlike: bool = False) -> str:
622
+ """
623
+ Construct an exception message on validation error.
624
+
625
+ Some methods allow only scalar inputs, while others allow either scalar
626
+ or listlike.
627
+
628
+ Parameters
629
+ ----------
630
+ allow_listlike: bool, default False
631
+
632
+ Returns
633
+ -------
634
+ str
635
+ """
636
+ if hasattr(value, "dtype") and getattr(value, "ndim", 0) > 0:
637
+ msg_got = f"{value.dtype} array"
638
+ else:
639
+ msg_got = f"'{type(value).__name__}'"
640
+ if allow_listlike:
641
+ msg = (
642
+ f"value should be a '{self._scalar_type.__name__}', 'NaT', "
643
+ f"or array of those. Got {msg_got} instead."
644
+ )
645
+ else:
646
+ msg = (
647
+ f"value should be a '{self._scalar_type.__name__}' or 'NaT'. "
648
+ f"Got {msg_got} instead."
649
+ )
650
+ return msg
651
+
652
+ def _validate_listlike(self, value, allow_object: bool = False):
653
+ if isinstance(value, type(self)):
654
+ if self.dtype.kind in "mM" and not allow_object:
655
+ # error: "DatetimeLikeArrayMixin" has no attribute "as_unit"
656
+ value = value.as_unit(self.unit, round_ok=False) # type: ignore[attr-defined]
657
+ return value
658
+
659
+ if isinstance(value, list) and len(value) == 0:
660
+ # We treat empty list as our own dtype.
661
+ return type(self)._from_sequence([], dtype=self.dtype)
662
+
663
+ if hasattr(value, "dtype") and value.dtype == object:
664
+ # `array` below won't do inference if value is an Index or Series.
665
+ # so do so here. in the Index case, inferred_type may be cached.
666
+ if lib.infer_dtype(value) in self._infer_matches:
667
+ try:
668
+ value = type(self)._from_sequence(value)
669
+ except (ValueError, TypeError):
670
+ if allow_object:
671
+ return value
672
+ msg = self._validation_error_message(value, True)
673
+ raise TypeError(msg)
674
+
675
+ # Do type inference if necessary up front (after unpacking
676
+ # NumpyExtensionArray)
677
+ # e.g. we passed PeriodIndex.values and got an ndarray of Periods
678
+ value = extract_array(value, extract_numpy=True)
679
+ value = pd_array(value)
680
+ value = extract_array(value, extract_numpy=True)
681
+
682
+ if is_all_strings(value):
683
+ # We got a StringArray
684
+ try:
685
+ # TODO: Could use from_sequence_of_strings if implemented
686
+ # Note: passing dtype is necessary for PeriodArray tests
687
+ value = type(self)._from_sequence(value, dtype=self.dtype)
688
+ except ValueError:
689
+ pass
690
+
691
+ if isinstance(value.dtype, CategoricalDtype):
692
+ # e.g. we have a Categorical holding self.dtype
693
+ if value.categories.dtype == self.dtype:
694
+ # TODO: do we need equal dtype or just comparable?
695
+ value = value._internal_get_values()
696
+ value = extract_array(value, extract_numpy=True)
697
+
698
+ if allow_object and is_object_dtype(value.dtype):
699
+ pass
700
+
701
+ elif not type(self)._is_recognized_dtype(value.dtype):
702
+ msg = self._validation_error_message(value, True)
703
+ raise TypeError(msg)
704
+
705
+ if self.dtype.kind in "mM" and not allow_object:
706
+ # error: "DatetimeLikeArrayMixin" has no attribute "as_unit"
707
+ value = value.as_unit(self.unit, round_ok=False) # type: ignore[attr-defined]
708
+ return value
709
+
710
+ def _validate_setitem_value(self, value):
711
+ if is_list_like(value):
712
+ value = self._validate_listlike(value)
713
+ else:
714
+ return self._validate_scalar(value, allow_listlike=True)
715
+
716
+ return self._unbox(value)
717
+
718
+ @final
719
+ def _unbox(self, other) -> np.int64 | np.datetime64 | np.timedelta64 | np.ndarray:
720
+ """
721
+ Unbox either a scalar with _unbox_scalar or an instance of our own type.
722
+ """
723
+ if lib.is_scalar(other):
724
+ other = self._unbox_scalar(other)
725
+ else:
726
+ # same type as self
727
+ self._check_compatible_with(other)
728
+ other = other._ndarray
729
+ return other
730
+
731
+ # ------------------------------------------------------------------
732
+ # Additional array methods
733
+ # These are not part of the EA API, but we implement them because
734
+ # pandas assumes they're there.
735
+
736
+ @ravel_compat
737
+ def map(self, mapper, na_action=None):
738
+ from pandas import Index
739
+
740
+ result = map_array(self, mapper, na_action=na_action)
741
+ result = Index(result)
742
+
743
+ if isinstance(result, ABCMultiIndex):
744
+ return result.to_numpy()
745
+ else:
746
+ return result.array
747
+
748
+ def isin(self, values: ArrayLike) -> npt.NDArray[np.bool_]:
749
+ """
750
+ Compute boolean array of whether each value is found in the
751
+ passed set of values.
752
+
753
+ Parameters
754
+ ----------
755
+ values : np.ndarray or ExtensionArray
756
+
757
+ Returns
758
+ -------
759
+ ndarray[bool]
760
+ """
761
+ if values.dtype.kind in "fiuc":
762
+ # TODO: de-duplicate with equals, validate_comparison_value
763
+ return np.zeros(self.shape, dtype=bool)
764
+
765
+ values = ensure_wrapped_if_datetimelike(values)
766
+
767
+ if not isinstance(values, type(self)):
768
+ inferable = [
769
+ "timedelta",
770
+ "timedelta64",
771
+ "datetime",
772
+ "datetime64",
773
+ "date",
774
+ "period",
775
+ ]
776
+ if values.dtype == object:
777
+ values = lib.maybe_convert_objects(
778
+ values, # type: ignore[arg-type]
779
+ convert_non_numeric=True,
780
+ dtype_if_all_nat=self.dtype,
781
+ )
782
+ if values.dtype != object:
783
+ return self.isin(values)
784
+
785
+ inferred = lib.infer_dtype(values, skipna=False)
786
+ if inferred not in inferable:
787
+ if inferred == "string":
788
+ pass
789
+
790
+ elif "mixed" in inferred:
791
+ return isin(self.astype(object), values)
792
+ else:
793
+ return np.zeros(self.shape, dtype=bool)
794
+
795
+ try:
796
+ values = type(self)._from_sequence(values)
797
+ except ValueError:
798
+ return isin(self.astype(object), values)
799
+ else:
800
+ warnings.warn(
801
+ # GH#53111
802
+ f"The behavior of 'isin' with dtype={self.dtype} and "
803
+ "castable values (e.g. strings) is deprecated. In a "
804
+ "future version, these will not be considered matching "
805
+ "by isin. Explicitly cast to the appropriate dtype before "
806
+ "calling isin instead.",
807
+ FutureWarning,
808
+ stacklevel=find_stack_level(),
809
+ )
810
+
811
+ if self.dtype.kind in "mM":
812
+ self = cast("DatetimeArray | TimedeltaArray", self)
813
+ # error: Item "ExtensionArray" of "ExtensionArray | ndarray[Any, Any]"
814
+ # has no attribute "as_unit"
815
+ values = values.as_unit(self.unit) # type: ignore[union-attr]
816
+
817
+ try:
818
+ # error: Argument 1 to "_check_compatible_with" of "DatetimeLikeArrayMixin"
819
+ # has incompatible type "ExtensionArray | ndarray[Any, Any]"; expected
820
+ # "Period | Timestamp | Timedelta | NaTType"
821
+ self._check_compatible_with(values) # type: ignore[arg-type]
822
+ except (TypeError, ValueError):
823
+ # Includes tzawareness mismatch and IncompatibleFrequencyError
824
+ return np.zeros(self.shape, dtype=bool)
825
+
826
+ # error: Item "ExtensionArray" of "ExtensionArray | ndarray[Any, Any]"
827
+ # has no attribute "asi8"
828
+ return isin(self.asi8, values.asi8) # type: ignore[union-attr]
829
+
830
+ # ------------------------------------------------------------------
831
+ # Null Handling
832
+
833
+ def isna(self) -> npt.NDArray[np.bool_]:
834
+ return self._isnan
835
+
836
+ @property # NB: override with cache_readonly in immutable subclasses
837
+ def _isnan(self) -> npt.NDArray[np.bool_]:
838
+ """
839
+ return if each value is nan
840
+ """
841
+ return self.asi8 == iNaT
842
+
843
+ @property # NB: override with cache_readonly in immutable subclasses
844
+ def _hasna(self) -> bool:
845
+ """
846
+ return if I have any nans; enables various perf speedups
847
+ """
848
+ return bool(self._isnan.any())
849
+
850
+ def _maybe_mask_results(
851
+ self, result: np.ndarray, fill_value=iNaT, convert=None
852
+ ) -> np.ndarray:
853
+ """
854
+ Parameters
855
+ ----------
856
+ result : np.ndarray
857
+ fill_value : object, default iNaT
858
+ convert : str, dtype or None
859
+
860
+ Returns
861
+ -------
862
+ result : ndarray with values replace by the fill_value
863
+
864
+ mask the result if needed, convert to the provided dtype if its not
865
+ None
866
+
867
+ This is an internal routine.
868
+ """
869
+ if self._hasna:
870
+ if convert:
871
+ result = result.astype(convert)
872
+ if fill_value is None:
873
+ fill_value = np.nan
874
+ np.putmask(result, self._isnan, fill_value)
875
+ return result
876
+
877
+ # ------------------------------------------------------------------
878
+ # Frequency Properties/Methods
879
+
880
+ @property
881
+ def freqstr(self) -> str | None:
882
+ """
883
+ Return the frequency object as a string if it's set, otherwise None.
884
+
885
+ Examples
886
+ --------
887
+ For DatetimeIndex:
888
+
889
+ >>> idx = pd.DatetimeIndex(["1/1/2020 10:00:00+00:00"], freq="D")
890
+ >>> idx.freqstr
891
+ 'D'
892
+
893
+ The frequency can be inferred if there are more than 2 points:
894
+
895
+ >>> idx = pd.DatetimeIndex(["2018-01-01", "2018-01-03", "2018-01-05"],
896
+ ... freq="infer")
897
+ >>> idx.freqstr
898
+ '2D'
899
+
900
+ For PeriodIndex:
901
+
902
+ >>> idx = pd.PeriodIndex(["2023-1", "2023-2", "2023-3"], freq="M")
903
+ >>> idx.freqstr
904
+ 'M'
905
+ """
906
+ if self.freq is None:
907
+ return None
908
+ return self.freq.freqstr
909
+
910
+ @property # NB: override with cache_readonly in immutable subclasses
911
+ def inferred_freq(self) -> str | None:
912
+ """
913
+ Tries to return a string representing a frequency generated by infer_freq.
914
+
915
+ Returns None if it can't autodetect the frequency.
916
+
917
+ Examples
918
+ --------
919
+ For DatetimeIndex:
920
+
921
+ >>> idx = pd.DatetimeIndex(["2018-01-01", "2018-01-03", "2018-01-05"])
922
+ >>> idx.inferred_freq
923
+ '2D'
924
+
925
+ For TimedeltaIndex:
926
+
927
+ >>> tdelta_idx = pd.to_timedelta(["0 days", "10 days", "20 days"])
928
+ >>> tdelta_idx
929
+ TimedeltaIndex(['0 days', '10 days', '20 days'],
930
+ dtype='timedelta64[ns]', freq=None)
931
+ >>> tdelta_idx.inferred_freq
932
+ '10D'
933
+ """
934
+ if self.ndim != 1:
935
+ return None
936
+ try:
937
+ return frequencies.infer_freq(self)
938
+ except ValueError:
939
+ return None
940
+
941
+ @property # NB: override with cache_readonly in immutable subclasses
942
+ def _resolution_obj(self) -> Resolution | None:
943
+ freqstr = self.freqstr
944
+ if freqstr is None:
945
+ return None
946
+ try:
947
+ return Resolution.get_reso_from_freqstr(freqstr)
948
+ except KeyError:
949
+ return None
950
+
951
+ @property # NB: override with cache_readonly in immutable subclasses
952
+ def resolution(self) -> str:
953
+ """
954
+ Returns day, hour, minute, second, millisecond or microsecond
955
+ """
956
+ # error: Item "None" of "Optional[Any]" has no attribute "attrname"
957
+ return self._resolution_obj.attrname # type: ignore[union-attr]
958
+
959
+ # monotonicity/uniqueness properties are called via frequencies.infer_freq,
960
+ # see GH#23789
961
+
962
+ @property
963
+ def _is_monotonic_increasing(self) -> bool:
964
+ return algos.is_monotonic(self.asi8, timelike=True)[0]
965
+
966
+ @property
967
+ def _is_monotonic_decreasing(self) -> bool:
968
+ return algos.is_monotonic(self.asi8, timelike=True)[1]
969
+
970
+ @property
971
+ def _is_unique(self) -> bool:
972
+ return len(unique1d(self.asi8.ravel("K"))) == self.size
973
+
974
+ # ------------------------------------------------------------------
975
+ # Arithmetic Methods
976
+
977
+ def _cmp_method(self, other, op):
978
+ if self.ndim > 1 and getattr(other, "shape", None) == self.shape:
979
+ # TODO: handle 2D-like listlikes
980
+ return op(self.ravel(), other.ravel()).reshape(self.shape)
981
+
982
+ try:
983
+ other = self._validate_comparison_value(other)
984
+ except InvalidComparison:
985
+ return invalid_comparison(self, other, op)
986
+
987
+ dtype = getattr(other, "dtype", None)
988
+ if is_object_dtype(dtype):
989
+ # We have to use comp_method_OBJECT_ARRAY instead of numpy
990
+ # comparison otherwise it would raise when comparing to None
991
+ result = ops.comp_method_OBJECT_ARRAY(
992
+ op, np.asarray(self.astype(object)), other
993
+ )
994
+ return result
995
+ if other is NaT:
996
+ if op is operator.ne:
997
+ result = np.ones(self.shape, dtype=bool)
998
+ else:
999
+ result = np.zeros(self.shape, dtype=bool)
1000
+ return result
1001
+
1002
+ if not isinstance(self.dtype, PeriodDtype):
1003
+ self = cast(TimelikeOps, self)
1004
+ if self._creso != other._creso:
1005
+ if not isinstance(other, type(self)):
1006
+ # i.e. Timedelta/Timestamp, cast to ndarray and let
1007
+ # compare_mismatched_resolutions handle broadcasting
1008
+ try:
1009
+ # GH#52080 see if we can losslessly cast to shared unit
1010
+ other = other.as_unit(self.unit, round_ok=False)
1011
+ except ValueError:
1012
+ other_arr = np.array(other.asm8)
1013
+ return compare_mismatched_resolutions(
1014
+ self._ndarray, other_arr, op
1015
+ )
1016
+ else:
1017
+ other_arr = other._ndarray
1018
+ return compare_mismatched_resolutions(self._ndarray, other_arr, op)
1019
+
1020
+ other_vals = self._unbox(other)
1021
+ # GH#37462 comparison on i8 values is almost 2x faster than M8/m8
1022
+ result = op(self._ndarray.view("i8"), other_vals.view("i8"))
1023
+
1024
+ o_mask = isna(other)
1025
+ mask = self._isnan | o_mask
1026
+ if mask.any():
1027
+ nat_result = op is operator.ne
1028
+ np.putmask(result, mask, nat_result)
1029
+
1030
+ return result
1031
+
1032
+ # pow is invalid for all three subclasses; TimedeltaArray will override
1033
+ # the multiplication and division ops
1034
+ __pow__ = _make_unpacked_invalid_op("__pow__")
1035
+ __rpow__ = _make_unpacked_invalid_op("__rpow__")
1036
+ __mul__ = _make_unpacked_invalid_op("__mul__")
1037
+ __rmul__ = _make_unpacked_invalid_op("__rmul__")
1038
+ __truediv__ = _make_unpacked_invalid_op("__truediv__")
1039
+ __rtruediv__ = _make_unpacked_invalid_op("__rtruediv__")
1040
+ __floordiv__ = _make_unpacked_invalid_op("__floordiv__")
1041
+ __rfloordiv__ = _make_unpacked_invalid_op("__rfloordiv__")
1042
+ __mod__ = _make_unpacked_invalid_op("__mod__")
1043
+ __rmod__ = _make_unpacked_invalid_op("__rmod__")
1044
+ __divmod__ = _make_unpacked_invalid_op("__divmod__")
1045
+ __rdivmod__ = _make_unpacked_invalid_op("__rdivmod__")
1046
+
1047
+ @final
1048
+ def _get_i8_values_and_mask(
1049
+ self, other
1050
+ ) -> tuple[int | npt.NDArray[np.int64], None | npt.NDArray[np.bool_]]:
1051
+ """
1052
+ Get the int64 values and b_mask to pass to add_overflowsafe.
1053
+ """
1054
+ if isinstance(other, Period):
1055
+ i8values = other.ordinal
1056
+ mask = None
1057
+ elif isinstance(other, (Timestamp, Timedelta)):
1058
+ i8values = other._value
1059
+ mask = None
1060
+ else:
1061
+ # PeriodArray, DatetimeArray, TimedeltaArray
1062
+ mask = other._isnan
1063
+ i8values = other.asi8
1064
+ return i8values, mask
1065
+
1066
+ @final
1067
+ def _get_arithmetic_result_freq(self, other) -> BaseOffset | None:
1068
+ """
1069
+ Check if we can preserve self.freq in addition or subtraction.
1070
+ """
1071
+ # Adding or subtracting a Timedelta/Timestamp scalar is freq-preserving
1072
+ # whenever self.freq is a Tick
1073
+ if isinstance(self.dtype, PeriodDtype):
1074
+ return self.freq
1075
+ elif not lib.is_scalar(other):
1076
+ return None
1077
+ elif isinstance(self.freq, Tick):
1078
+ # In these cases
1079
+ return self.freq
1080
+ return None
1081
+
1082
+ @final
1083
+ def _add_datetimelike_scalar(self, other) -> DatetimeArray:
1084
+ if not lib.is_np_dtype(self.dtype, "m"):
1085
+ raise TypeError(
1086
+ f"cannot add {type(self).__name__} and {type(other).__name__}"
1087
+ )
1088
+
1089
+ self = cast("TimedeltaArray", self)
1090
+
1091
+ from pandas.core.arrays import DatetimeArray
1092
+ from pandas.core.arrays.datetimes import tz_to_dtype
1093
+
1094
+ assert other is not NaT
1095
+ if isna(other):
1096
+ # i.e. np.datetime64("NaT")
1097
+ # In this case we specifically interpret NaT as a datetime, not
1098
+ # the timedelta interpretation we would get by returning self + NaT
1099
+ result = self._ndarray + NaT.to_datetime64().astype(f"M8[{self.unit}]")
1100
+ # Preserve our resolution
1101
+ return DatetimeArray._simple_new(result, dtype=result.dtype)
1102
+
1103
+ other = Timestamp(other)
1104
+ self, other = self._ensure_matching_resos(other)
1105
+ self = cast("TimedeltaArray", self)
1106
+
1107
+ other_i8, o_mask = self._get_i8_values_and_mask(other)
1108
+ result = add_overflowsafe(self.asi8, np.asarray(other_i8, dtype="i8"))
1109
+ res_values = result.view(f"M8[{self.unit}]")
1110
+
1111
+ dtype = tz_to_dtype(tz=other.tz, unit=self.unit)
1112
+ res_values = result.view(f"M8[{self.unit}]")
1113
+ new_freq = self._get_arithmetic_result_freq(other)
1114
+ return DatetimeArray._simple_new(res_values, dtype=dtype, freq=new_freq)
1115
+
1116
+ @final
1117
+ def _add_datetime_arraylike(self, other: DatetimeArray) -> DatetimeArray:
1118
+ if not lib.is_np_dtype(self.dtype, "m"):
1119
+ raise TypeError(
1120
+ f"cannot add {type(self).__name__} and {type(other).__name__}"
1121
+ )
1122
+
1123
+ # defer to DatetimeArray.__add__
1124
+ return other + self
1125
+
1126
+ @final
1127
+ def _sub_datetimelike_scalar(
1128
+ self, other: datetime | np.datetime64
1129
+ ) -> TimedeltaArray:
1130
+ if self.dtype.kind != "M":
1131
+ raise TypeError(f"cannot subtract a datelike from a {type(self).__name__}")
1132
+
1133
+ self = cast("DatetimeArray", self)
1134
+ # subtract a datetime from myself, yielding a ndarray[timedelta64[ns]]
1135
+
1136
+ if isna(other):
1137
+ # i.e. np.datetime64("NaT")
1138
+ return self - NaT
1139
+
1140
+ ts = Timestamp(other)
1141
+
1142
+ self, ts = self._ensure_matching_resos(ts)
1143
+ return self._sub_datetimelike(ts)
1144
+
1145
+ @final
1146
+ def _sub_datetime_arraylike(self, other: DatetimeArray) -> TimedeltaArray:
1147
+ if self.dtype.kind != "M":
1148
+ raise TypeError(f"cannot subtract a datelike from a {type(self).__name__}")
1149
+
1150
+ if len(self) != len(other):
1151
+ raise ValueError("cannot add indices of unequal length")
1152
+
1153
+ self = cast("DatetimeArray", self)
1154
+
1155
+ self, other = self._ensure_matching_resos(other)
1156
+ return self._sub_datetimelike(other)
1157
+
1158
+ @final
1159
+ def _sub_datetimelike(self, other: Timestamp | DatetimeArray) -> TimedeltaArray:
1160
+ self = cast("DatetimeArray", self)
1161
+
1162
+ from pandas.core.arrays import TimedeltaArray
1163
+
1164
+ try:
1165
+ self._assert_tzawareness_compat(other)
1166
+ except TypeError as err:
1167
+ new_message = str(err).replace("compare", "subtract")
1168
+ raise type(err)(new_message) from err
1169
+
1170
+ other_i8, o_mask = self._get_i8_values_and_mask(other)
1171
+ res_values = add_overflowsafe(self.asi8, np.asarray(-other_i8, dtype="i8"))
1172
+ res_m8 = res_values.view(f"timedelta64[{self.unit}]")
1173
+
1174
+ new_freq = self._get_arithmetic_result_freq(other)
1175
+ new_freq = cast("Tick | None", new_freq)
1176
+ return TimedeltaArray._simple_new(res_m8, dtype=res_m8.dtype, freq=new_freq)
1177
+
1178
+ @final
1179
+ def _add_period(self, other: Period) -> PeriodArray:
1180
+ if not lib.is_np_dtype(self.dtype, "m"):
1181
+ raise TypeError(f"cannot add Period to a {type(self).__name__}")
1182
+
1183
+ # We will wrap in a PeriodArray and defer to the reversed operation
1184
+ from pandas.core.arrays.period import PeriodArray
1185
+
1186
+ i8vals = np.broadcast_to(other.ordinal, self.shape)
1187
+ dtype = PeriodDtype(other.freq)
1188
+ parr = PeriodArray(i8vals, dtype=dtype)
1189
+ return parr + self
1190
+
1191
+ def _add_offset(self, offset):
1192
+ raise AbstractMethodError(self)
1193
+
1194
+ def _add_timedeltalike_scalar(self, other):
1195
+ """
1196
+ Add a delta of a timedeltalike
1197
+
1198
+ Returns
1199
+ -------
1200
+ Same type as self
1201
+ """
1202
+ if isna(other):
1203
+ # i.e np.timedelta64("NaT")
1204
+ new_values = np.empty(self.shape, dtype="i8").view(self._ndarray.dtype)
1205
+ new_values.fill(iNaT)
1206
+ return type(self)._simple_new(new_values, dtype=self.dtype)
1207
+
1208
+ # PeriodArray overrides, so we only get here with DTA/TDA
1209
+ self = cast("DatetimeArray | TimedeltaArray", self)
1210
+ other = Timedelta(other)
1211
+ self, other = self._ensure_matching_resos(other)
1212
+ return self._add_timedeltalike(other)
1213
+
1214
+ def _add_timedelta_arraylike(self, other: TimedeltaArray):
1215
+ """
1216
+ Add a delta of a TimedeltaIndex
1217
+
1218
+ Returns
1219
+ -------
1220
+ Same type as self
1221
+ """
1222
+ # overridden by PeriodArray
1223
+
1224
+ if len(self) != len(other):
1225
+ raise ValueError("cannot add indices of unequal length")
1226
+
1227
+ self = cast("DatetimeArray | TimedeltaArray", self)
1228
+
1229
+ self, other = self._ensure_matching_resos(other)
1230
+ return self._add_timedeltalike(other)
1231
+
1232
+ @final
1233
+ def _add_timedeltalike(self, other: Timedelta | TimedeltaArray):
1234
+ self = cast("DatetimeArray | TimedeltaArray", self)
1235
+
1236
+ other_i8, o_mask = self._get_i8_values_and_mask(other)
1237
+ new_values = add_overflowsafe(self.asi8, np.asarray(other_i8, dtype="i8"))
1238
+ res_values = new_values.view(self._ndarray.dtype)
1239
+
1240
+ new_freq = self._get_arithmetic_result_freq(other)
1241
+
1242
+ # error: Argument "dtype" to "_simple_new" of "DatetimeArray" has
1243
+ # incompatible type "Union[dtype[datetime64], DatetimeTZDtype,
1244
+ # dtype[timedelta64]]"; expected "Union[dtype[datetime64], DatetimeTZDtype]"
1245
+ return type(self)._simple_new(
1246
+ res_values, dtype=self.dtype, freq=new_freq # type: ignore[arg-type]
1247
+ )
1248
+
1249
+ @final
1250
+ def _add_nat(self):
1251
+ """
1252
+ Add pd.NaT to self
1253
+ """
1254
+ if isinstance(self.dtype, PeriodDtype):
1255
+ raise TypeError(
1256
+ f"Cannot add {type(self).__name__} and {type(NaT).__name__}"
1257
+ )
1258
+ self = cast("TimedeltaArray | DatetimeArray", self)
1259
+
1260
+ # GH#19124 pd.NaT is treated like a timedelta for both timedelta
1261
+ # and datetime dtypes
1262
+ result = np.empty(self.shape, dtype=np.int64)
1263
+ result.fill(iNaT)
1264
+ result = result.view(self._ndarray.dtype) # preserve reso
1265
+ # error: Argument "dtype" to "_simple_new" of "DatetimeArray" has
1266
+ # incompatible type "Union[dtype[timedelta64], dtype[datetime64],
1267
+ # DatetimeTZDtype]"; expected "Union[dtype[datetime64], DatetimeTZDtype]"
1268
+ return type(self)._simple_new(
1269
+ result, dtype=self.dtype, freq=None # type: ignore[arg-type]
1270
+ )
1271
+
1272
+ @final
1273
+ def _sub_nat(self):
1274
+ """
1275
+ Subtract pd.NaT from self
1276
+ """
1277
+ # GH#19124 Timedelta - datetime is not in general well-defined.
1278
+ # We make an exception for pd.NaT, which in this case quacks
1279
+ # like a timedelta.
1280
+ # For datetime64 dtypes by convention we treat NaT as a datetime, so
1281
+ # this subtraction returns a timedelta64 dtype.
1282
+ # For period dtype, timedelta64 is a close-enough return dtype.
1283
+ result = np.empty(self.shape, dtype=np.int64)
1284
+ result.fill(iNaT)
1285
+ if self.dtype.kind in "mM":
1286
+ # We can retain unit in dtype
1287
+ self = cast("DatetimeArray| TimedeltaArray", self)
1288
+ return result.view(f"timedelta64[{self.unit}]")
1289
+ else:
1290
+ return result.view("timedelta64[ns]")
1291
+
1292
+ @final
1293
+ def _sub_periodlike(self, other: Period | PeriodArray) -> npt.NDArray[np.object_]:
1294
+ # If the operation is well-defined, we return an object-dtype ndarray
1295
+ # of DateOffsets. Null entries are filled with pd.NaT
1296
+ if not isinstance(self.dtype, PeriodDtype):
1297
+ raise TypeError(
1298
+ f"cannot subtract {type(other).__name__} from {type(self).__name__}"
1299
+ )
1300
+
1301
+ self = cast("PeriodArray", self)
1302
+ self._check_compatible_with(other)
1303
+
1304
+ other_i8, o_mask = self._get_i8_values_and_mask(other)
1305
+ new_i8_data = add_overflowsafe(self.asi8, np.asarray(-other_i8, dtype="i8"))
1306
+ new_data = np.array([self.freq.base * x for x in new_i8_data])
1307
+
1308
+ if o_mask is None:
1309
+ # i.e. Period scalar
1310
+ mask = self._isnan
1311
+ else:
1312
+ # i.e. PeriodArray
1313
+ mask = self._isnan | o_mask
1314
+ new_data[mask] = NaT
1315
+ return new_data
1316
+
1317
+ @final
1318
+ def _addsub_object_array(self, other: npt.NDArray[np.object_], op):
1319
+ """
1320
+ Add or subtract array-like of DateOffset objects
1321
+
1322
+ Parameters
1323
+ ----------
1324
+ other : np.ndarray[object]
1325
+ op : {operator.add, operator.sub}
1326
+
1327
+ Returns
1328
+ -------
1329
+ np.ndarray[object]
1330
+ Except in fastpath case with length 1 where we operate on the
1331
+ contained scalar.
1332
+ """
1333
+ assert op in [operator.add, operator.sub]
1334
+ if len(other) == 1 and self.ndim == 1:
1335
+ # Note: without this special case, we could annotate return type
1336
+ # as ndarray[object]
1337
+ # If both 1D then broadcasting is unambiguous
1338
+ return op(self, other[0])
1339
+
1340
+ warnings.warn(
1341
+ "Adding/subtracting object-dtype array to "
1342
+ f"{type(self).__name__} not vectorized.",
1343
+ PerformanceWarning,
1344
+ stacklevel=find_stack_level(),
1345
+ )
1346
+
1347
+ # Caller is responsible for broadcasting if necessary
1348
+ assert self.shape == other.shape, (self.shape, other.shape)
1349
+
1350
+ res_values = op(self.astype("O"), np.asarray(other))
1351
+ return res_values
1352
+
1353
+ def _accumulate(self, name: str, *, skipna: bool = True, **kwargs) -> Self:
1354
+ if name not in {"cummin", "cummax"}:
1355
+ raise TypeError(f"Accumulation {name} not supported for {type(self)}")
1356
+
1357
+ op = getattr(datetimelike_accumulations, name)
1358
+ result = op(self.copy(), skipna=skipna, **kwargs)
1359
+
1360
+ return type(self)._simple_new(result, dtype=self.dtype)
1361
+
1362
+ @unpack_zerodim_and_defer("__add__")
1363
+ def __add__(self, other):
1364
+ other_dtype = getattr(other, "dtype", None)
1365
+ other = ensure_wrapped_if_datetimelike(other)
1366
+
1367
+ # scalar others
1368
+ if other is NaT:
1369
+ result = self._add_nat()
1370
+ elif isinstance(other, (Tick, timedelta, np.timedelta64)):
1371
+ result = self._add_timedeltalike_scalar(other)
1372
+ elif isinstance(other, BaseOffset):
1373
+ # specifically _not_ a Tick
1374
+ result = self._add_offset(other)
1375
+ elif isinstance(other, (datetime, np.datetime64)):
1376
+ result = self._add_datetimelike_scalar(other)
1377
+ elif isinstance(other, Period) and lib.is_np_dtype(self.dtype, "m"):
1378
+ result = self._add_period(other)
1379
+ elif lib.is_integer(other):
1380
+ # This check must come after the check for np.timedelta64
1381
+ # as is_integer returns True for these
1382
+ if not isinstance(self.dtype, PeriodDtype):
1383
+ raise integer_op_not_supported(self)
1384
+ obj = cast("PeriodArray", self)
1385
+ result = obj._addsub_int_array_or_scalar(other * obj.dtype._n, operator.add)
1386
+
1387
+ # array-like others
1388
+ elif lib.is_np_dtype(other_dtype, "m"):
1389
+ # TimedeltaIndex, ndarray[timedelta64]
1390
+ result = self._add_timedelta_arraylike(other)
1391
+ elif is_object_dtype(other_dtype):
1392
+ # e.g. Array/Index of DateOffset objects
1393
+ result = self._addsub_object_array(other, operator.add)
1394
+ elif lib.is_np_dtype(other_dtype, "M") or isinstance(
1395
+ other_dtype, DatetimeTZDtype
1396
+ ):
1397
+ # DatetimeIndex, ndarray[datetime64]
1398
+ return self._add_datetime_arraylike(other)
1399
+ elif is_integer_dtype(other_dtype):
1400
+ if not isinstance(self.dtype, PeriodDtype):
1401
+ raise integer_op_not_supported(self)
1402
+ obj = cast("PeriodArray", self)
1403
+ result = obj._addsub_int_array_or_scalar(other * obj.dtype._n, operator.add)
1404
+ else:
1405
+ # Includes Categorical, other ExtensionArrays
1406
+ # For PeriodDtype, if self is a TimedeltaArray and other is a
1407
+ # PeriodArray with a timedelta-like (i.e. Tick) freq, this
1408
+ # operation is valid. Defer to the PeriodArray implementation.
1409
+ # In remaining cases, this will end up raising TypeError.
1410
+ return NotImplemented
1411
+
1412
+ if isinstance(result, np.ndarray) and lib.is_np_dtype(result.dtype, "m"):
1413
+ from pandas.core.arrays import TimedeltaArray
1414
+
1415
+ return TimedeltaArray._from_sequence(result)
1416
+ return result
1417
+
1418
+ def __radd__(self, other):
1419
+ # alias for __add__
1420
+ return self.__add__(other)
1421
+
1422
+ @unpack_zerodim_and_defer("__sub__")
1423
+ def __sub__(self, other):
1424
+ other_dtype = getattr(other, "dtype", None)
1425
+ other = ensure_wrapped_if_datetimelike(other)
1426
+
1427
+ # scalar others
1428
+ if other is NaT:
1429
+ result = self._sub_nat()
1430
+ elif isinstance(other, (Tick, timedelta, np.timedelta64)):
1431
+ result = self._add_timedeltalike_scalar(-other)
1432
+ elif isinstance(other, BaseOffset):
1433
+ # specifically _not_ a Tick
1434
+ result = self._add_offset(-other)
1435
+ elif isinstance(other, (datetime, np.datetime64)):
1436
+ result = self._sub_datetimelike_scalar(other)
1437
+ elif lib.is_integer(other):
1438
+ # This check must come after the check for np.timedelta64
1439
+ # as is_integer returns True for these
1440
+ if not isinstance(self.dtype, PeriodDtype):
1441
+ raise integer_op_not_supported(self)
1442
+ obj = cast("PeriodArray", self)
1443
+ result = obj._addsub_int_array_or_scalar(other * obj.dtype._n, operator.sub)
1444
+
1445
+ elif isinstance(other, Period):
1446
+ result = self._sub_periodlike(other)
1447
+
1448
+ # array-like others
1449
+ elif lib.is_np_dtype(other_dtype, "m"):
1450
+ # TimedeltaIndex, ndarray[timedelta64]
1451
+ result = self._add_timedelta_arraylike(-other)
1452
+ elif is_object_dtype(other_dtype):
1453
+ # e.g. Array/Index of DateOffset objects
1454
+ result = self._addsub_object_array(other, operator.sub)
1455
+ elif lib.is_np_dtype(other_dtype, "M") or isinstance(
1456
+ other_dtype, DatetimeTZDtype
1457
+ ):
1458
+ # DatetimeIndex, ndarray[datetime64]
1459
+ result = self._sub_datetime_arraylike(other)
1460
+ elif isinstance(other_dtype, PeriodDtype):
1461
+ # PeriodIndex
1462
+ result = self._sub_periodlike(other)
1463
+ elif is_integer_dtype(other_dtype):
1464
+ if not isinstance(self.dtype, PeriodDtype):
1465
+ raise integer_op_not_supported(self)
1466
+ obj = cast("PeriodArray", self)
1467
+ result = obj._addsub_int_array_or_scalar(other * obj.dtype._n, operator.sub)
1468
+ else:
1469
+ # Includes ExtensionArrays, float_dtype
1470
+ return NotImplemented
1471
+
1472
+ if isinstance(result, np.ndarray) and lib.is_np_dtype(result.dtype, "m"):
1473
+ from pandas.core.arrays import TimedeltaArray
1474
+
1475
+ return TimedeltaArray._from_sequence(result)
1476
+ return result
1477
+
1478
+ def __rsub__(self, other):
1479
+ other_dtype = getattr(other, "dtype", None)
1480
+ other_is_dt64 = lib.is_np_dtype(other_dtype, "M") or isinstance(
1481
+ other_dtype, DatetimeTZDtype
1482
+ )
1483
+
1484
+ if other_is_dt64 and lib.is_np_dtype(self.dtype, "m"):
1485
+ # ndarray[datetime64] cannot be subtracted from self, so
1486
+ # we need to wrap in DatetimeArray/Index and flip the operation
1487
+ if lib.is_scalar(other):
1488
+ # i.e. np.datetime64 object
1489
+ return Timestamp(other) - self
1490
+ if not isinstance(other, DatetimeLikeArrayMixin):
1491
+ # Avoid down-casting DatetimeIndex
1492
+ from pandas.core.arrays import DatetimeArray
1493
+
1494
+ other = DatetimeArray._from_sequence(other)
1495
+ return other - self
1496
+ elif self.dtype.kind == "M" and hasattr(other, "dtype") and not other_is_dt64:
1497
+ # GH#19959 datetime - datetime is well-defined as timedelta,
1498
+ # but any other type - datetime is not well-defined.
1499
+ raise TypeError(
1500
+ f"cannot subtract {type(self).__name__} from {type(other).__name__}"
1501
+ )
1502
+ elif isinstance(self.dtype, PeriodDtype) and lib.is_np_dtype(other_dtype, "m"):
1503
+ # TODO: Can we simplify/generalize these cases at all?
1504
+ raise TypeError(f"cannot subtract {type(self).__name__} from {other.dtype}")
1505
+ elif lib.is_np_dtype(self.dtype, "m"):
1506
+ self = cast("TimedeltaArray", self)
1507
+ return (-self) + other
1508
+
1509
+ # We get here with e.g. datetime objects
1510
+ return -(self - other)
1511
+
1512
+ def __iadd__(self, other) -> Self:
1513
+ result = self + other
1514
+ self[:] = result[:]
1515
+
1516
+ if not isinstance(self.dtype, PeriodDtype):
1517
+ # restore freq, which is invalidated by setitem
1518
+ self._freq = result.freq
1519
+ return self
1520
+
1521
+ def __isub__(self, other) -> Self:
1522
+ result = self - other
1523
+ self[:] = result[:]
1524
+
1525
+ if not isinstance(self.dtype, PeriodDtype):
1526
+ # restore freq, which is invalidated by setitem
1527
+ self._freq = result.freq
1528
+ return self
1529
+
1530
+ # --------------------------------------------------------------
1531
+ # Reductions
1532
+
1533
+ @_period_dispatch
1534
+ def _quantile(
1535
+ self,
1536
+ qs: npt.NDArray[np.float64],
1537
+ interpolation: str,
1538
+ ) -> Self:
1539
+ return super()._quantile(qs=qs, interpolation=interpolation)
1540
+
1541
+ @_period_dispatch
1542
+ def min(self, *, axis: AxisInt | None = None, skipna: bool = True, **kwargs):
1543
+ """
1544
+ Return the minimum value of the Array or minimum along
1545
+ an axis.
1546
+
1547
+ See Also
1548
+ --------
1549
+ numpy.ndarray.min
1550
+ Index.min : Return the minimum value in an Index.
1551
+ Series.min : Return the minimum value in a Series.
1552
+ """
1553
+ nv.validate_min((), kwargs)
1554
+ nv.validate_minmax_axis(axis, self.ndim)
1555
+
1556
+ result = nanops.nanmin(self._ndarray, axis=axis, skipna=skipna)
1557
+ return self._wrap_reduction_result(axis, result)
1558
+
1559
+ @_period_dispatch
1560
+ def max(self, *, axis: AxisInt | None = None, skipna: bool = True, **kwargs):
1561
+ """
1562
+ Return the maximum value of the Array or maximum along
1563
+ an axis.
1564
+
1565
+ See Also
1566
+ --------
1567
+ numpy.ndarray.max
1568
+ Index.max : Return the maximum value in an Index.
1569
+ Series.max : Return the maximum value in a Series.
1570
+ """
1571
+ nv.validate_max((), kwargs)
1572
+ nv.validate_minmax_axis(axis, self.ndim)
1573
+
1574
+ result = nanops.nanmax(self._ndarray, axis=axis, skipna=skipna)
1575
+ return self._wrap_reduction_result(axis, result)
1576
+
1577
+ def mean(self, *, skipna: bool = True, axis: AxisInt | None = 0):
1578
+ """
1579
+ Return the mean value of the Array.
1580
+
1581
+ Parameters
1582
+ ----------
1583
+ skipna : bool, default True
1584
+ Whether to ignore any NaT elements.
1585
+ axis : int, optional, default 0
1586
+
1587
+ Returns
1588
+ -------
1589
+ scalar
1590
+ Timestamp or Timedelta.
1591
+
1592
+ See Also
1593
+ --------
1594
+ numpy.ndarray.mean : Returns the average of array elements along a given axis.
1595
+ Series.mean : Return the mean value in a Series.
1596
+
1597
+ Notes
1598
+ -----
1599
+ mean is only defined for Datetime and Timedelta dtypes, not for Period.
1600
+
1601
+ Examples
1602
+ --------
1603
+ For :class:`pandas.DatetimeIndex`:
1604
+
1605
+ >>> idx = pd.date_range('2001-01-01 00:00', periods=3)
1606
+ >>> idx
1607
+ DatetimeIndex(['2001-01-01', '2001-01-02', '2001-01-03'],
1608
+ dtype='datetime64[ns]', freq='D')
1609
+ >>> idx.mean()
1610
+ Timestamp('2001-01-02 00:00:00')
1611
+
1612
+ For :class:`pandas.TimedeltaIndex`:
1613
+
1614
+ >>> tdelta_idx = pd.to_timedelta([1, 2, 3], unit='D')
1615
+ >>> tdelta_idx
1616
+ TimedeltaIndex(['1 days', '2 days', '3 days'],
1617
+ dtype='timedelta64[ns]', freq=None)
1618
+ >>> tdelta_idx.mean()
1619
+ Timedelta('2 days 00:00:00')
1620
+ """
1621
+ if isinstance(self.dtype, PeriodDtype):
1622
+ # See discussion in GH#24757
1623
+ raise TypeError(
1624
+ f"mean is not implemented for {type(self).__name__} since the "
1625
+ "meaning is ambiguous. An alternative is "
1626
+ "obj.to_timestamp(how='start').mean()"
1627
+ )
1628
+
1629
+ result = nanops.nanmean(
1630
+ self._ndarray, axis=axis, skipna=skipna, mask=self.isna()
1631
+ )
1632
+ return self._wrap_reduction_result(axis, result)
1633
+
1634
+ @_period_dispatch
1635
+ def median(self, *, axis: AxisInt | None = None, skipna: bool = True, **kwargs):
1636
+ nv.validate_median((), kwargs)
1637
+
1638
+ if axis is not None and abs(axis) >= self.ndim:
1639
+ raise ValueError("abs(axis) must be less than ndim")
1640
+
1641
+ result = nanops.nanmedian(self._ndarray, axis=axis, skipna=skipna)
1642
+ return self._wrap_reduction_result(axis, result)
1643
+
1644
+ def _mode(self, dropna: bool = True):
1645
+ mask = None
1646
+ if dropna:
1647
+ mask = self.isna()
1648
+
1649
+ i8modes = algorithms.mode(self.view("i8"), mask=mask)
1650
+ npmodes = i8modes.view(self._ndarray.dtype)
1651
+ npmodes = cast(np.ndarray, npmodes)
1652
+ return self._from_backing_data(npmodes)
1653
+
1654
+ # ------------------------------------------------------------------
1655
+ # GroupBy Methods
1656
+
1657
+ def _groupby_op(
1658
+ self,
1659
+ *,
1660
+ how: str,
1661
+ has_dropped_na: bool,
1662
+ min_count: int,
1663
+ ngroups: int,
1664
+ ids: npt.NDArray[np.intp],
1665
+ **kwargs,
1666
+ ):
1667
+ dtype = self.dtype
1668
+ if dtype.kind == "M":
1669
+ # Adding/multiplying datetimes is not valid
1670
+ if how in ["sum", "prod", "cumsum", "cumprod", "var", "skew"]:
1671
+ raise TypeError(f"datetime64 type does not support {how} operations")
1672
+ if how in ["any", "all"]:
1673
+ # GH#34479
1674
+ warnings.warn(
1675
+ f"'{how}' with datetime64 dtypes is deprecated and will raise in a "
1676
+ f"future version. Use (obj != pd.Timestamp(0)).{how}() instead.",
1677
+ FutureWarning,
1678
+ stacklevel=find_stack_level(),
1679
+ )
1680
+
1681
+ elif isinstance(dtype, PeriodDtype):
1682
+ # Adding/multiplying Periods is not valid
1683
+ if how in ["sum", "prod", "cumsum", "cumprod", "var", "skew"]:
1684
+ raise TypeError(f"Period type does not support {how} operations")
1685
+ if how in ["any", "all"]:
1686
+ # GH#34479
1687
+ warnings.warn(
1688
+ f"'{how}' with PeriodDtype is deprecated and will raise in a "
1689
+ f"future version. Use (obj != pd.Period(0, freq)).{how}() instead.",
1690
+ FutureWarning,
1691
+ stacklevel=find_stack_level(),
1692
+ )
1693
+ else:
1694
+ # timedeltas we can add but not multiply
1695
+ if how in ["prod", "cumprod", "skew", "var"]:
1696
+ raise TypeError(f"timedelta64 type does not support {how} operations")
1697
+
1698
+ # All of the functions implemented here are ordinal, so we can
1699
+ # operate on the tz-naive equivalents
1700
+ npvalues = self._ndarray.view("M8[ns]")
1701
+
1702
+ from pandas.core.groupby.ops import WrappedCythonOp
1703
+
1704
+ kind = WrappedCythonOp.get_kind_from_how(how)
1705
+ op = WrappedCythonOp(how=how, kind=kind, has_dropped_na=has_dropped_na)
1706
+
1707
+ res_values = op._cython_op_ndim_compat(
1708
+ npvalues,
1709
+ min_count=min_count,
1710
+ ngroups=ngroups,
1711
+ comp_ids=ids,
1712
+ mask=None,
1713
+ **kwargs,
1714
+ )
1715
+
1716
+ if op.how in op.cast_blocklist:
1717
+ # i.e. how in ["rank"], since other cast_blocklist methods don't go
1718
+ # through cython_operation
1719
+ return res_values
1720
+
1721
+ # We did a view to M8[ns] above, now we go the other direction
1722
+ assert res_values.dtype == "M8[ns]"
1723
+ if how in ["std", "sem"]:
1724
+ from pandas.core.arrays import TimedeltaArray
1725
+
1726
+ if isinstance(self.dtype, PeriodDtype):
1727
+ raise TypeError("'std' and 'sem' are not valid for PeriodDtype")
1728
+ self = cast("DatetimeArray | TimedeltaArray", self)
1729
+ new_dtype = f"m8[{self.unit}]"
1730
+ res_values = res_values.view(new_dtype)
1731
+ return TimedeltaArray._simple_new(res_values, dtype=res_values.dtype)
1732
+
1733
+ res_values = res_values.view(self._ndarray.dtype)
1734
+ return self._from_backing_data(res_values)
1735
+
1736
+
1737
+ class DatelikeOps(DatetimeLikeArrayMixin):
1738
+ """
1739
+ Common ops for DatetimeIndex/PeriodIndex, but not TimedeltaIndex.
1740
+ """
1741
+
1742
+ @Substitution(
1743
+ URL="https://docs.python.org/3/library/datetime.html"
1744
+ "#strftime-and-strptime-behavior"
1745
+ )
1746
+ def strftime(self, date_format: str) -> npt.NDArray[np.object_]:
1747
+ """
1748
+ Convert to Index using specified date_format.
1749
+
1750
+ Return an Index of formatted strings specified by date_format, which
1751
+ supports the same string format as the python standard library. Details
1752
+ of the string format can be found in `python string format
1753
+ doc <%(URL)s>`__.
1754
+
1755
+ Formats supported by the C `strftime` API but not by the python string format
1756
+ doc (such as `"%%R"`, `"%%r"`) are not officially supported and should be
1757
+ preferably replaced with their supported equivalents (such as `"%%H:%%M"`,
1758
+ `"%%I:%%M:%%S %%p"`).
1759
+
1760
+ Note that `PeriodIndex` support additional directives, detailed in
1761
+ `Period.strftime`.
1762
+
1763
+ Parameters
1764
+ ----------
1765
+ date_format : str
1766
+ Date format string (e.g. "%%Y-%%m-%%d").
1767
+
1768
+ Returns
1769
+ -------
1770
+ ndarray[object]
1771
+ NumPy ndarray of formatted strings.
1772
+
1773
+ See Also
1774
+ --------
1775
+ to_datetime : Convert the given argument to datetime.
1776
+ DatetimeIndex.normalize : Return DatetimeIndex with times to midnight.
1777
+ DatetimeIndex.round : Round the DatetimeIndex to the specified freq.
1778
+ DatetimeIndex.floor : Floor the DatetimeIndex to the specified freq.
1779
+ Timestamp.strftime : Format a single Timestamp.
1780
+ Period.strftime : Format a single Period.
1781
+
1782
+ Examples
1783
+ --------
1784
+ >>> rng = pd.date_range(pd.Timestamp("2018-03-10 09:00"),
1785
+ ... periods=3, freq='s')
1786
+ >>> rng.strftime('%%B %%d, %%Y, %%r')
1787
+ Index(['March 10, 2018, 09:00:00 AM', 'March 10, 2018, 09:00:01 AM',
1788
+ 'March 10, 2018, 09:00:02 AM'],
1789
+ dtype='object')
1790
+ """
1791
+ result = self._format_native_types(date_format=date_format, na_rep=np.nan)
1792
+ return result.astype(object, copy=False)
1793
+
1794
+
1795
+ _round_doc = """
1796
+ Perform {op} operation on the data to the specified `freq`.
1797
+
1798
+ Parameters
1799
+ ----------
1800
+ freq : str or Offset
1801
+ The frequency level to {op} the index to. Must be a fixed
1802
+ frequency like 'S' (second) not 'ME' (month end). See
1803
+ :ref:`frequency aliases <timeseries.offset_aliases>` for
1804
+ a list of possible `freq` values.
1805
+ ambiguous : 'infer', bool-ndarray, 'NaT', default 'raise'
1806
+ Only relevant for DatetimeIndex:
1807
+
1808
+ - 'infer' will attempt to infer fall dst-transition hours based on
1809
+ order
1810
+ - bool-ndarray where True signifies a DST time, False designates
1811
+ a non-DST time (note that this flag is only applicable for
1812
+ ambiguous times)
1813
+ - 'NaT' will return NaT where there are ambiguous times
1814
+ - 'raise' will raise an AmbiguousTimeError if there are ambiguous
1815
+ times.
1816
+
1817
+ nonexistent : 'shift_forward', 'shift_backward', 'NaT', timedelta, default 'raise'
1818
+ A nonexistent time does not exist in a particular timezone
1819
+ where clocks moved forward due to DST.
1820
+
1821
+ - 'shift_forward' will shift the nonexistent time forward to the
1822
+ closest existing time
1823
+ - 'shift_backward' will shift the nonexistent time backward to the
1824
+ closest existing time
1825
+ - 'NaT' will return NaT where there are nonexistent times
1826
+ - timedelta objects will shift nonexistent times by the timedelta
1827
+ - 'raise' will raise an NonExistentTimeError if there are
1828
+ nonexistent times.
1829
+
1830
+ Returns
1831
+ -------
1832
+ DatetimeIndex, TimedeltaIndex, or Series
1833
+ Index of the same type for a DatetimeIndex or TimedeltaIndex,
1834
+ or a Series with the same index for a Series.
1835
+
1836
+ Raises
1837
+ ------
1838
+ ValueError if the `freq` cannot be converted.
1839
+
1840
+ Notes
1841
+ -----
1842
+ If the timestamps have a timezone, {op}ing will take place relative to the
1843
+ local ("wall") time and re-localized to the same timezone. When {op}ing
1844
+ near daylight savings time, use ``nonexistent`` and ``ambiguous`` to
1845
+ control the re-localization behavior.
1846
+
1847
+ Examples
1848
+ --------
1849
+ **DatetimeIndex**
1850
+
1851
+ >>> rng = pd.date_range('1/1/2018 11:59:00', periods=3, freq='min')
1852
+ >>> rng
1853
+ DatetimeIndex(['2018-01-01 11:59:00', '2018-01-01 12:00:00',
1854
+ '2018-01-01 12:01:00'],
1855
+ dtype='datetime64[ns]', freq='min')
1856
+ """
1857
+
1858
+ _round_example = """>>> rng.round('h')
1859
+ DatetimeIndex(['2018-01-01 12:00:00', '2018-01-01 12:00:00',
1860
+ '2018-01-01 12:00:00'],
1861
+ dtype='datetime64[ns]', freq=None)
1862
+
1863
+ **Series**
1864
+
1865
+ >>> pd.Series(rng).dt.round("h")
1866
+ 0 2018-01-01 12:00:00
1867
+ 1 2018-01-01 12:00:00
1868
+ 2 2018-01-01 12:00:00
1869
+ dtype: datetime64[ns]
1870
+
1871
+ When rounding near a daylight savings time transition, use ``ambiguous`` or
1872
+ ``nonexistent`` to control how the timestamp should be re-localized.
1873
+
1874
+ >>> rng_tz = pd.DatetimeIndex(["2021-10-31 03:30:00"], tz="Europe/Amsterdam")
1875
+
1876
+ >>> rng_tz.floor("2h", ambiguous=False)
1877
+ DatetimeIndex(['2021-10-31 02:00:00+01:00'],
1878
+ dtype='datetime64[ns, Europe/Amsterdam]', freq=None)
1879
+
1880
+ >>> rng_tz.floor("2h", ambiguous=True)
1881
+ DatetimeIndex(['2021-10-31 02:00:00+02:00'],
1882
+ dtype='datetime64[ns, Europe/Amsterdam]', freq=None)
1883
+ """
1884
+
1885
+ _floor_example = """>>> rng.floor('h')
1886
+ DatetimeIndex(['2018-01-01 11:00:00', '2018-01-01 12:00:00',
1887
+ '2018-01-01 12:00:00'],
1888
+ dtype='datetime64[ns]', freq=None)
1889
+
1890
+ **Series**
1891
+
1892
+ >>> pd.Series(rng).dt.floor("h")
1893
+ 0 2018-01-01 11:00:00
1894
+ 1 2018-01-01 12:00:00
1895
+ 2 2018-01-01 12:00:00
1896
+ dtype: datetime64[ns]
1897
+
1898
+ When rounding near a daylight savings time transition, use ``ambiguous`` or
1899
+ ``nonexistent`` to control how the timestamp should be re-localized.
1900
+
1901
+ >>> rng_tz = pd.DatetimeIndex(["2021-10-31 03:30:00"], tz="Europe/Amsterdam")
1902
+
1903
+ >>> rng_tz.floor("2h", ambiguous=False)
1904
+ DatetimeIndex(['2021-10-31 02:00:00+01:00'],
1905
+ dtype='datetime64[ns, Europe/Amsterdam]', freq=None)
1906
+
1907
+ >>> rng_tz.floor("2h", ambiguous=True)
1908
+ DatetimeIndex(['2021-10-31 02:00:00+02:00'],
1909
+ dtype='datetime64[ns, Europe/Amsterdam]', freq=None)
1910
+ """
1911
+
1912
+ _ceil_example = """>>> rng.ceil('h')
1913
+ DatetimeIndex(['2018-01-01 12:00:00', '2018-01-01 12:00:00',
1914
+ '2018-01-01 13:00:00'],
1915
+ dtype='datetime64[ns]', freq=None)
1916
+
1917
+ **Series**
1918
+
1919
+ >>> pd.Series(rng).dt.ceil("h")
1920
+ 0 2018-01-01 12:00:00
1921
+ 1 2018-01-01 12:00:00
1922
+ 2 2018-01-01 13:00:00
1923
+ dtype: datetime64[ns]
1924
+
1925
+ When rounding near a daylight savings time transition, use ``ambiguous`` or
1926
+ ``nonexistent`` to control how the timestamp should be re-localized.
1927
+
1928
+ >>> rng_tz = pd.DatetimeIndex(["2021-10-31 01:30:00"], tz="Europe/Amsterdam")
1929
+
1930
+ >>> rng_tz.ceil("h", ambiguous=False)
1931
+ DatetimeIndex(['2021-10-31 02:00:00+01:00'],
1932
+ dtype='datetime64[ns, Europe/Amsterdam]', freq=None)
1933
+
1934
+ >>> rng_tz.ceil("h", ambiguous=True)
1935
+ DatetimeIndex(['2021-10-31 02:00:00+02:00'],
1936
+ dtype='datetime64[ns, Europe/Amsterdam]', freq=None)
1937
+ """
1938
+
1939
+
1940
+ class TimelikeOps(DatetimeLikeArrayMixin):
1941
+ """
1942
+ Common ops for TimedeltaIndex/DatetimeIndex, but not PeriodIndex.
1943
+ """
1944
+
1945
+ _default_dtype: np.dtype
1946
+
1947
+ def __init__(
1948
+ self, values, dtype=None, freq=lib.no_default, copy: bool = False
1949
+ ) -> None:
1950
+ warnings.warn(
1951
+ # GH#55623
1952
+ f"{type(self).__name__}.__init__ is deprecated and will be "
1953
+ "removed in a future version. Use pd.array instead.",
1954
+ FutureWarning,
1955
+ stacklevel=find_stack_level(),
1956
+ )
1957
+ if dtype is not None:
1958
+ dtype = pandas_dtype(dtype)
1959
+
1960
+ values = extract_array(values, extract_numpy=True)
1961
+ if isinstance(values, IntegerArray):
1962
+ values = values.to_numpy("int64", na_value=iNaT)
1963
+
1964
+ inferred_freq = getattr(values, "_freq", None)
1965
+ explicit_none = freq is None
1966
+ freq = freq if freq is not lib.no_default else None
1967
+
1968
+ if isinstance(values, type(self)):
1969
+ if explicit_none:
1970
+ # don't inherit from values
1971
+ pass
1972
+ elif freq is None:
1973
+ freq = values.freq
1974
+ elif freq and values.freq:
1975
+ freq = to_offset(freq)
1976
+ freq = _validate_inferred_freq(freq, values.freq)
1977
+
1978
+ if dtype is not None and dtype != values.dtype:
1979
+ # TODO: we only have tests for this for DTA, not TDA (2022-07-01)
1980
+ raise TypeError(
1981
+ f"dtype={dtype} does not match data dtype {values.dtype}"
1982
+ )
1983
+
1984
+ dtype = values.dtype
1985
+ values = values._ndarray
1986
+
1987
+ elif dtype is None:
1988
+ if isinstance(values, np.ndarray) and values.dtype.kind in "Mm":
1989
+ dtype = values.dtype
1990
+ else:
1991
+ dtype = self._default_dtype
1992
+ if isinstance(values, np.ndarray) and values.dtype == "i8":
1993
+ values = values.view(dtype)
1994
+
1995
+ if not isinstance(values, np.ndarray):
1996
+ raise ValueError(
1997
+ f"Unexpected type '{type(values).__name__}'. 'values' must be a "
1998
+ f"{type(self).__name__}, ndarray, or Series or Index "
1999
+ "containing one of those."
2000
+ )
2001
+ if values.ndim not in [1, 2]:
2002
+ raise ValueError("Only 1-dimensional input arrays are supported.")
2003
+
2004
+ if values.dtype == "i8":
2005
+ # for compat with datetime/timedelta/period shared methods,
2006
+ # we can sometimes get here with int64 values. These represent
2007
+ # nanosecond UTC (or tz-naive) unix timestamps
2008
+ if dtype is None:
2009
+ dtype = self._default_dtype
2010
+ values = values.view(self._default_dtype)
2011
+ elif lib.is_np_dtype(dtype, "mM"):
2012
+ values = values.view(dtype)
2013
+ elif isinstance(dtype, DatetimeTZDtype):
2014
+ kind = self._default_dtype.kind
2015
+ new_dtype = f"{kind}8[{dtype.unit}]"
2016
+ values = values.view(new_dtype)
2017
+
2018
+ dtype = self._validate_dtype(values, dtype)
2019
+
2020
+ if freq == "infer":
2021
+ raise ValueError(
2022
+ f"Frequency inference not allowed in {type(self).__name__}.__init__. "
2023
+ "Use 'pd.array()' instead."
2024
+ )
2025
+
2026
+ if copy:
2027
+ values = values.copy()
2028
+ if freq:
2029
+ freq = to_offset(freq)
2030
+ if values.dtype.kind == "m" and not isinstance(freq, Tick):
2031
+ raise TypeError("TimedeltaArray/Index freq must be a Tick")
2032
+
2033
+ NDArrayBacked.__init__(self, values=values, dtype=dtype)
2034
+ self._freq = freq
2035
+
2036
+ if inferred_freq is None and freq is not None:
2037
+ type(self)._validate_frequency(self, freq)
2038
+
2039
+ @classmethod
2040
+ def _validate_dtype(cls, values, dtype):
2041
+ raise AbstractMethodError(cls)
2042
+
2043
+ @property
2044
+ def freq(self):
2045
+ """
2046
+ Return the frequency object if it is set, otherwise None.
2047
+ """
2048
+ return self._freq
2049
+
2050
+ @freq.setter
2051
+ def freq(self, value) -> None:
2052
+ if value is not None:
2053
+ value = to_offset(value)
2054
+ self._validate_frequency(self, value)
2055
+ if self.dtype.kind == "m" and not isinstance(value, Tick):
2056
+ raise TypeError("TimedeltaArray/Index freq must be a Tick")
2057
+
2058
+ if self.ndim > 1:
2059
+ raise ValueError("Cannot set freq with ndim > 1")
2060
+
2061
+ self._freq = value
2062
+
2063
+ @final
2064
+ def _maybe_pin_freq(self, freq, validate_kwds: dict):
2065
+ """
2066
+ Constructor helper to pin the appropriate `freq` attribute. Assumes
2067
+ that self._freq is currently set to any freq inferred in
2068
+ _from_sequence_not_strict.
2069
+ """
2070
+ if freq is None:
2071
+ # user explicitly passed None -> override any inferred_freq
2072
+ self._freq = None
2073
+ elif freq == "infer":
2074
+ # if self._freq is *not* None then we already inferred a freq
2075
+ # and there is nothing left to do
2076
+ if self._freq is None:
2077
+ # Set _freq directly to bypass duplicative _validate_frequency
2078
+ # check.
2079
+ self._freq = to_offset(self.inferred_freq)
2080
+ elif freq is lib.no_default:
2081
+ # user did not specify anything, keep inferred freq if the original
2082
+ # data had one, otherwise do nothing
2083
+ pass
2084
+ elif self._freq is None:
2085
+ # We cannot inherit a freq from the data, so we need to validate
2086
+ # the user-passed freq
2087
+ freq = to_offset(freq)
2088
+ type(self)._validate_frequency(self, freq, **validate_kwds)
2089
+ self._freq = freq
2090
+ else:
2091
+ # Otherwise we just need to check that the user-passed freq
2092
+ # doesn't conflict with the one we already have.
2093
+ freq = to_offset(freq)
2094
+ _validate_inferred_freq(freq, self._freq)
2095
+
2096
+ @final
2097
+ @classmethod
2098
+ def _validate_frequency(cls, index, freq: BaseOffset, **kwargs):
2099
+ """
2100
+ Validate that a frequency is compatible with the values of a given
2101
+ Datetime Array/Index or Timedelta Array/Index
2102
+
2103
+ Parameters
2104
+ ----------
2105
+ index : DatetimeIndex or TimedeltaIndex
2106
+ The index on which to determine if the given frequency is valid
2107
+ freq : DateOffset
2108
+ The frequency to validate
2109
+ """
2110
+ inferred = index.inferred_freq
2111
+ if index.size == 0 or inferred == freq.freqstr:
2112
+ return None
2113
+
2114
+ try:
2115
+ on_freq = cls._generate_range(
2116
+ start=index[0],
2117
+ end=None,
2118
+ periods=len(index),
2119
+ freq=freq,
2120
+ unit=index.unit,
2121
+ **kwargs,
2122
+ )
2123
+ if not np.array_equal(index.asi8, on_freq.asi8):
2124
+ raise ValueError
2125
+ except ValueError as err:
2126
+ if "non-fixed" in str(err):
2127
+ # non-fixed frequencies are not meaningful for timedelta64;
2128
+ # we retain that error message
2129
+ raise err
2130
+ # GH#11587 the main way this is reached is if the `np.array_equal`
2131
+ # check above is False. This can also be reached if index[0]
2132
+ # is `NaT`, in which case the call to `cls._generate_range` will
2133
+ # raise a ValueError, which we re-raise with a more targeted
2134
+ # message.
2135
+ raise ValueError(
2136
+ f"Inferred frequency {inferred} from passed values "
2137
+ f"does not conform to passed frequency {freq.freqstr}"
2138
+ ) from err
2139
+
2140
+ @classmethod
2141
+ def _generate_range(
2142
+ cls, start, end, periods: int | None, freq, *args, **kwargs
2143
+ ) -> Self:
2144
+ raise AbstractMethodError(cls)
2145
+
2146
+ # --------------------------------------------------------------
2147
+
2148
+ @cache_readonly
2149
+ def _creso(self) -> int:
2150
+ return get_unit_from_dtype(self._ndarray.dtype)
2151
+
2152
+ @cache_readonly
2153
+ def unit(self) -> str:
2154
+ # e.g. "ns", "us", "ms"
2155
+ # error: Argument 1 to "dtype_to_unit" has incompatible type
2156
+ # "ExtensionDtype"; expected "Union[DatetimeTZDtype, dtype[Any]]"
2157
+ return dtype_to_unit(self.dtype) # type: ignore[arg-type]
2158
+
2159
+ def as_unit(self, unit: str, round_ok: bool = True) -> Self:
2160
+ if unit not in ["s", "ms", "us", "ns"]:
2161
+ raise ValueError("Supported units are 's', 'ms', 'us', 'ns'")
2162
+
2163
+ dtype = np.dtype(f"{self.dtype.kind}8[{unit}]")
2164
+ new_values = astype_overflowsafe(self._ndarray, dtype, round_ok=round_ok)
2165
+
2166
+ if isinstance(self.dtype, np.dtype):
2167
+ new_dtype = new_values.dtype
2168
+ else:
2169
+ tz = cast("DatetimeArray", self).tz
2170
+ new_dtype = DatetimeTZDtype(tz=tz, unit=unit)
2171
+
2172
+ # error: Unexpected keyword argument "freq" for "_simple_new" of
2173
+ # "NDArrayBacked" [call-arg]
2174
+ return type(self)._simple_new(
2175
+ new_values, dtype=new_dtype, freq=self.freq # type: ignore[call-arg]
2176
+ )
2177
+
2178
+ # TODO: annotate other as DatetimeArray | TimedeltaArray | Timestamp | Timedelta
2179
+ # with the return type matching input type. TypeVar?
2180
+ def _ensure_matching_resos(self, other):
2181
+ if self._creso != other._creso:
2182
+ # Just as with Timestamp/Timedelta, we cast to the higher resolution
2183
+ if self._creso < other._creso:
2184
+ self = self.as_unit(other.unit)
2185
+ else:
2186
+ other = other.as_unit(self.unit)
2187
+ return self, other
2188
+
2189
+ # --------------------------------------------------------------
2190
+
2191
+ def __array_ufunc__(self, ufunc: np.ufunc, method: str, *inputs, **kwargs):
2192
+ if (
2193
+ ufunc in [np.isnan, np.isinf, np.isfinite]
2194
+ and len(inputs) == 1
2195
+ and inputs[0] is self
2196
+ ):
2197
+ # numpy 1.18 changed isinf and isnan to not raise on dt64/td64
2198
+ return getattr(ufunc, method)(self._ndarray, **kwargs)
2199
+
2200
+ return super().__array_ufunc__(ufunc, method, *inputs, **kwargs)
2201
+
2202
+ def _round(self, freq, mode, ambiguous, nonexistent):
2203
+ # round the local times
2204
+ if isinstance(self.dtype, DatetimeTZDtype):
2205
+ # operate on naive timestamps, then convert back to aware
2206
+ self = cast("DatetimeArray", self)
2207
+ naive = self.tz_localize(None)
2208
+ result = naive._round(freq, mode, ambiguous, nonexistent)
2209
+ return result.tz_localize(
2210
+ self.tz, ambiguous=ambiguous, nonexistent=nonexistent
2211
+ )
2212
+
2213
+ values = self.view("i8")
2214
+ values = cast(np.ndarray, values)
2215
+ nanos = get_unit_for_round(freq, self._creso)
2216
+ if nanos == 0:
2217
+ # GH 52761
2218
+ return self.copy()
2219
+ result_i8 = round_nsint64(values, mode, nanos)
2220
+ result = self._maybe_mask_results(result_i8, fill_value=iNaT)
2221
+ result = result.view(self._ndarray.dtype)
2222
+ return self._simple_new(result, dtype=self.dtype)
2223
+
2224
+ @Appender((_round_doc + _round_example).format(op="round"))
2225
+ def round(
2226
+ self,
2227
+ freq,
2228
+ ambiguous: TimeAmbiguous = "raise",
2229
+ nonexistent: TimeNonexistent = "raise",
2230
+ ) -> Self:
2231
+ return self._round(freq, RoundTo.NEAREST_HALF_EVEN, ambiguous, nonexistent)
2232
+
2233
+ @Appender((_round_doc + _floor_example).format(op="floor"))
2234
+ def floor(
2235
+ self,
2236
+ freq,
2237
+ ambiguous: TimeAmbiguous = "raise",
2238
+ nonexistent: TimeNonexistent = "raise",
2239
+ ) -> Self:
2240
+ return self._round(freq, RoundTo.MINUS_INFTY, ambiguous, nonexistent)
2241
+
2242
+ @Appender((_round_doc + _ceil_example).format(op="ceil"))
2243
+ def ceil(
2244
+ self,
2245
+ freq,
2246
+ ambiguous: TimeAmbiguous = "raise",
2247
+ nonexistent: TimeNonexistent = "raise",
2248
+ ) -> Self:
2249
+ return self._round(freq, RoundTo.PLUS_INFTY, ambiguous, nonexistent)
2250
+
2251
+ # --------------------------------------------------------------
2252
+ # Reductions
2253
+
2254
+ def any(self, *, axis: AxisInt | None = None, skipna: bool = True) -> bool:
2255
+ # GH#34479 the nanops call will issue a FutureWarning for non-td64 dtype
2256
+ return nanops.nanany(self._ndarray, axis=axis, skipna=skipna, mask=self.isna())
2257
+
2258
+ def all(self, *, axis: AxisInt | None = None, skipna: bool = True) -> bool:
2259
+ # GH#34479 the nanops call will issue a FutureWarning for non-td64 dtype
2260
+
2261
+ return nanops.nanall(self._ndarray, axis=axis, skipna=skipna, mask=self.isna())
2262
+
2263
+ # --------------------------------------------------------------
2264
+ # Frequency Methods
2265
+
2266
+ def _maybe_clear_freq(self) -> None:
2267
+ self._freq = None
2268
+
2269
+ def _with_freq(self, freq) -> Self:
2270
+ """
2271
+ Helper to get a view on the same data, with a new freq.
2272
+
2273
+ Parameters
2274
+ ----------
2275
+ freq : DateOffset, None, or "infer"
2276
+
2277
+ Returns
2278
+ -------
2279
+ Same type as self
2280
+ """
2281
+ # GH#29843
2282
+ if freq is None:
2283
+ # Always valid
2284
+ pass
2285
+ elif len(self) == 0 and isinstance(freq, BaseOffset):
2286
+ # Always valid. In the TimedeltaArray case, we require a Tick offset
2287
+ if self.dtype.kind == "m" and not isinstance(freq, Tick):
2288
+ raise TypeError("TimedeltaArray/Index freq must be a Tick")
2289
+ else:
2290
+ # As an internal method, we can ensure this assertion always holds
2291
+ assert freq == "infer"
2292
+ freq = to_offset(self.inferred_freq)
2293
+
2294
+ arr = self.view()
2295
+ arr._freq = freq
2296
+ return arr
2297
+
2298
+ # --------------------------------------------------------------
2299
+ # ExtensionArray Interface
2300
+
2301
+ def _values_for_json(self) -> np.ndarray:
2302
+ # Small performance bump vs the base class which calls np.asarray(self)
2303
+ if isinstance(self.dtype, np.dtype):
2304
+ return self._ndarray
2305
+ return super()._values_for_json()
2306
+
2307
+ def factorize(
2308
+ self,
2309
+ use_na_sentinel: bool = True,
2310
+ sort: bool = False,
2311
+ ):
2312
+ if self.freq is not None:
2313
+ # We must be unique, so can short-circuit (and retain freq)
2314
+ codes = np.arange(len(self), dtype=np.intp)
2315
+ uniques = self.copy() # TODO: copy or view?
2316
+ if sort and self.freq.n < 0:
2317
+ codes = codes[::-1]
2318
+ uniques = uniques[::-1]
2319
+ return codes, uniques
2320
+
2321
+ if sort:
2322
+ # algorithms.factorize only passes sort=True here when freq is
2323
+ # not None, so this should not be reached.
2324
+ raise NotImplementedError(
2325
+ f"The 'sort' keyword in {type(self).__name__}.factorize is "
2326
+ "ignored unless arr.freq is not None. To factorize with sort, "
2327
+ "call pd.factorize(obj, sort=True) instead."
2328
+ )
2329
+ return super().factorize(use_na_sentinel=use_na_sentinel)
2330
+
2331
+ @classmethod
2332
+ def _concat_same_type(
2333
+ cls,
2334
+ to_concat: Sequence[Self],
2335
+ axis: AxisInt = 0,
2336
+ ) -> Self:
2337
+ new_obj = super()._concat_same_type(to_concat, axis)
2338
+
2339
+ obj = to_concat[0]
2340
+
2341
+ if axis == 0:
2342
+ # GH 3232: If the concat result is evenly spaced, we can retain the
2343
+ # original frequency
2344
+ to_concat = [x for x in to_concat if len(x)]
2345
+
2346
+ if obj.freq is not None and all(x.freq == obj.freq for x in to_concat):
2347
+ pairs = zip(to_concat[:-1], to_concat[1:])
2348
+ if all(pair[0][-1] + obj.freq == pair[1][0] for pair in pairs):
2349
+ new_freq = obj.freq
2350
+ new_obj._freq = new_freq
2351
+ return new_obj
2352
+
2353
+ def copy(self, order: str = "C") -> Self:
2354
+ new_obj = super().copy(order=order)
2355
+ new_obj._freq = self.freq
2356
+ return new_obj
2357
+
2358
+ def interpolate(
2359
+ self,
2360
+ *,
2361
+ method: InterpolateOptions,
2362
+ axis: int,
2363
+ index: Index,
2364
+ limit,
2365
+ limit_direction,
2366
+ limit_area,
2367
+ copy: bool,
2368
+ **kwargs,
2369
+ ) -> Self:
2370
+ """
2371
+ See NDFrame.interpolate.__doc__.
2372
+ """
2373
+ # NB: we return type(self) even if copy=False
2374
+ if method != "linear":
2375
+ raise NotImplementedError
2376
+
2377
+ if not copy:
2378
+ out_data = self._ndarray
2379
+ else:
2380
+ out_data = self._ndarray.copy()
2381
+
2382
+ missing.interpolate_2d_inplace(
2383
+ out_data,
2384
+ method=method,
2385
+ axis=axis,
2386
+ index=index,
2387
+ limit=limit,
2388
+ limit_direction=limit_direction,
2389
+ limit_area=limit_area,
2390
+ **kwargs,
2391
+ )
2392
+ if not copy:
2393
+ return self
2394
+ return type(self)._simple_new(out_data, dtype=self.dtype)
2395
+
2396
+ # --------------------------------------------------------------
2397
+ # Unsorted
2398
+
2399
+ @property
2400
+ def _is_dates_only(self) -> bool:
2401
+ """
2402
+ Check if we are round times at midnight (and no timezone), which will
2403
+ be given a more compact __repr__ than other cases. For TimedeltaArray
2404
+ we are checking for multiples of 24H.
2405
+ """
2406
+ if not lib.is_np_dtype(self.dtype):
2407
+ # i.e. we have a timezone
2408
+ return False
2409
+
2410
+ values_int = self.asi8
2411
+ consider_values = values_int != iNaT
2412
+ reso = get_unit_from_dtype(self.dtype)
2413
+ ppd = periods_per_day(reso)
2414
+
2415
+ # TODO: can we reuse is_date_array_normalized? would need a skipna kwd
2416
+ # (first attempt at this was less performant than this implementation)
2417
+ even_days = np.logical_and(consider_values, values_int % ppd != 0).sum() == 0
2418
+ return even_days
2419
+
2420
+
2421
+ # -------------------------------------------------------------------
2422
+ # Shared Constructor Helpers
2423
+
2424
+
2425
+ def ensure_arraylike_for_datetimelike(
2426
+ data, copy: bool, cls_name: str
2427
+ ) -> tuple[ArrayLike, bool]:
2428
+ if not hasattr(data, "dtype"):
2429
+ # e.g. list, tuple
2430
+ if not isinstance(data, (list, tuple)) and np.ndim(data) == 0:
2431
+ # i.e. generator
2432
+ data = list(data)
2433
+
2434
+ data = construct_1d_object_array_from_listlike(data)
2435
+ copy = False
2436
+ elif isinstance(data, ABCMultiIndex):
2437
+ raise TypeError(f"Cannot create a {cls_name} from a MultiIndex.")
2438
+ else:
2439
+ data = extract_array(data, extract_numpy=True)
2440
+
2441
+ if isinstance(data, IntegerArray) or (
2442
+ isinstance(data, ArrowExtensionArray) and data.dtype.kind in "iu"
2443
+ ):
2444
+ data = data.to_numpy("int64", na_value=iNaT)
2445
+ copy = False
2446
+ elif isinstance(data, ArrowExtensionArray):
2447
+ data = data._maybe_convert_datelike_array()
2448
+ data = data.to_numpy()
2449
+ copy = False
2450
+ elif not isinstance(data, (np.ndarray, ExtensionArray)):
2451
+ # GH#24539 e.g. xarray, dask object
2452
+ data = np.asarray(data)
2453
+
2454
+ elif isinstance(data, ABCCategorical):
2455
+ # GH#18664 preserve tz in going DTI->Categorical->DTI
2456
+ # TODO: cases where we need to do another pass through maybe_convert_dtype,
2457
+ # e.g. the categories are timedelta64s
2458
+ data = data.categories.take(data.codes, fill_value=NaT)._values
2459
+ copy = False
2460
+
2461
+ return data, copy
2462
+
2463
+
2464
+ @overload
2465
+ def validate_periods(periods: None) -> None:
2466
+ ...
2467
+
2468
+
2469
+ @overload
2470
+ def validate_periods(periods: int | float) -> int:
2471
+ ...
2472
+
2473
+
2474
+ def validate_periods(periods: int | float | None) -> int | None:
2475
+ """
2476
+ If a `periods` argument is passed to the Datetime/Timedelta Array/Index
2477
+ constructor, cast it to an integer.
2478
+
2479
+ Parameters
2480
+ ----------
2481
+ periods : None, float, int
2482
+
2483
+ Returns
2484
+ -------
2485
+ periods : None or int
2486
+
2487
+ Raises
2488
+ ------
2489
+ TypeError
2490
+ if periods is None, float, or int
2491
+ """
2492
+ if periods is not None:
2493
+ if lib.is_float(periods):
2494
+ warnings.warn(
2495
+ # GH#56036
2496
+ "Non-integer 'periods' in pd.date_range, pd.timedelta_range, "
2497
+ "pd.period_range, and pd.interval_range are deprecated and "
2498
+ "will raise in a future version.",
2499
+ FutureWarning,
2500
+ stacklevel=find_stack_level(),
2501
+ )
2502
+ periods = int(periods)
2503
+ elif not lib.is_integer(periods):
2504
+ raise TypeError(f"periods must be a number, got {periods}")
2505
+ return periods
2506
+
2507
+
2508
+ def _validate_inferred_freq(
2509
+ freq: BaseOffset | None, inferred_freq: BaseOffset | None
2510
+ ) -> BaseOffset | None:
2511
+ """
2512
+ If the user passes a freq and another freq is inferred from passed data,
2513
+ require that they match.
2514
+
2515
+ Parameters
2516
+ ----------
2517
+ freq : DateOffset or None
2518
+ inferred_freq : DateOffset or None
2519
+
2520
+ Returns
2521
+ -------
2522
+ freq : DateOffset or None
2523
+ """
2524
+ if inferred_freq is not None:
2525
+ if freq is not None and freq != inferred_freq:
2526
+ raise ValueError(
2527
+ f"Inferred frequency {inferred_freq} from passed "
2528
+ "values does not conform to passed frequency "
2529
+ f"{freq.freqstr}"
2530
+ )
2531
+ if freq is None:
2532
+ freq = inferred_freq
2533
+
2534
+ return freq
2535
+
2536
+
2537
+ def dtype_to_unit(dtype: DatetimeTZDtype | np.dtype | ArrowDtype) -> str:
2538
+ """
2539
+ Return the unit str corresponding to the dtype's resolution.
2540
+
2541
+ Parameters
2542
+ ----------
2543
+ dtype : DatetimeTZDtype or np.dtype
2544
+ If np.dtype, we assume it is a datetime64 dtype.
2545
+
2546
+ Returns
2547
+ -------
2548
+ str
2549
+ """
2550
+ if isinstance(dtype, DatetimeTZDtype):
2551
+ return dtype.unit
2552
+ elif isinstance(dtype, ArrowDtype):
2553
+ if dtype.kind not in "mM":
2554
+ raise ValueError(f"{dtype=} does not have a resolution.")
2555
+ return dtype.pyarrow_dtype.unit
2556
+ return np.datetime_data(dtype)[0]
venv/lib/python3.10/site-packages/pandas/core/arrays/datetimes.py ADDED
@@ -0,0 +1,2820 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from __future__ import annotations
2
+
3
+ from datetime import (
4
+ datetime,
5
+ timedelta,
6
+ tzinfo,
7
+ )
8
+ from typing import (
9
+ TYPE_CHECKING,
10
+ cast,
11
+ overload,
12
+ )
13
+ import warnings
14
+
15
+ import numpy as np
16
+
17
+ from pandas._libs import (
18
+ lib,
19
+ tslib,
20
+ )
21
+ from pandas._libs.tslibs import (
22
+ BaseOffset,
23
+ NaT,
24
+ NaTType,
25
+ Resolution,
26
+ Timestamp,
27
+ astype_overflowsafe,
28
+ fields,
29
+ get_resolution,
30
+ get_supported_dtype,
31
+ get_unit_from_dtype,
32
+ ints_to_pydatetime,
33
+ is_date_array_normalized,
34
+ is_supported_dtype,
35
+ is_unitless,
36
+ normalize_i8_timestamps,
37
+ timezones,
38
+ to_offset,
39
+ tz_convert_from_utc,
40
+ tzconversion,
41
+ )
42
+ from pandas._libs.tslibs.dtypes import abbrev_to_npy_unit
43
+ from pandas.errors import PerformanceWarning
44
+ from pandas.util._exceptions import find_stack_level
45
+ from pandas.util._validators import validate_inclusive
46
+
47
+ from pandas.core.dtypes.common import (
48
+ DT64NS_DTYPE,
49
+ INT64_DTYPE,
50
+ is_bool_dtype,
51
+ is_float_dtype,
52
+ is_string_dtype,
53
+ pandas_dtype,
54
+ )
55
+ from pandas.core.dtypes.dtypes import (
56
+ DatetimeTZDtype,
57
+ ExtensionDtype,
58
+ PeriodDtype,
59
+ )
60
+ from pandas.core.dtypes.missing import isna
61
+
62
+ from pandas.core.arrays import datetimelike as dtl
63
+ from pandas.core.arrays._ranges import generate_regular_range
64
+ import pandas.core.common as com
65
+
66
+ from pandas.tseries.frequencies import get_period_alias
67
+ from pandas.tseries.offsets import (
68
+ Day,
69
+ Tick,
70
+ )
71
+
72
+ if TYPE_CHECKING:
73
+ from collections.abc import Iterator
74
+
75
+ from pandas._typing import (
76
+ ArrayLike,
77
+ DateTimeErrorChoices,
78
+ DtypeObj,
79
+ IntervalClosedType,
80
+ Self,
81
+ TimeAmbiguous,
82
+ TimeNonexistent,
83
+ npt,
84
+ )
85
+
86
+ from pandas import DataFrame
87
+ from pandas.core.arrays import PeriodArray
88
+
89
+
90
+ _ITER_CHUNKSIZE = 10_000
91
+
92
+
93
+ @overload
94
+ def tz_to_dtype(tz: tzinfo, unit: str = ...) -> DatetimeTZDtype:
95
+ ...
96
+
97
+
98
+ @overload
99
+ def tz_to_dtype(tz: None, unit: str = ...) -> np.dtype[np.datetime64]:
100
+ ...
101
+
102
+
103
+ def tz_to_dtype(
104
+ tz: tzinfo | None, unit: str = "ns"
105
+ ) -> np.dtype[np.datetime64] | DatetimeTZDtype:
106
+ """
107
+ Return a datetime64[ns] dtype appropriate for the given timezone.
108
+
109
+ Parameters
110
+ ----------
111
+ tz : tzinfo or None
112
+ unit : str, default "ns"
113
+
114
+ Returns
115
+ -------
116
+ np.dtype or Datetime64TZDType
117
+ """
118
+ if tz is None:
119
+ return np.dtype(f"M8[{unit}]")
120
+ else:
121
+ return DatetimeTZDtype(tz=tz, unit=unit)
122
+
123
+
124
+ def _field_accessor(name: str, field: str, docstring: str | None = None):
125
+ def f(self):
126
+ values = self._local_timestamps()
127
+
128
+ if field in self._bool_ops:
129
+ result: np.ndarray
130
+
131
+ if field.endswith(("start", "end")):
132
+ freq = self.freq
133
+ month_kw = 12
134
+ if freq:
135
+ kwds = freq.kwds
136
+ month_kw = kwds.get("startingMonth", kwds.get("month", 12))
137
+
138
+ result = fields.get_start_end_field(
139
+ values, field, self.freqstr, month_kw, reso=self._creso
140
+ )
141
+ else:
142
+ result = fields.get_date_field(values, field, reso=self._creso)
143
+
144
+ # these return a boolean by-definition
145
+ return result
146
+
147
+ if field in self._object_ops:
148
+ result = fields.get_date_name_field(values, field, reso=self._creso)
149
+ result = self._maybe_mask_results(result, fill_value=None)
150
+
151
+ else:
152
+ result = fields.get_date_field(values, field, reso=self._creso)
153
+ result = self._maybe_mask_results(
154
+ result, fill_value=None, convert="float64"
155
+ )
156
+
157
+ return result
158
+
159
+ f.__name__ = name
160
+ f.__doc__ = docstring
161
+ return property(f)
162
+
163
+
164
+ # error: Definition of "_concat_same_type" in base class "NDArrayBacked" is
165
+ # incompatible with definition in base class "ExtensionArray"
166
+ class DatetimeArray(dtl.TimelikeOps, dtl.DatelikeOps): # type: ignore[misc]
167
+ """
168
+ Pandas ExtensionArray for tz-naive or tz-aware datetime data.
169
+
170
+ .. warning::
171
+
172
+ DatetimeArray is currently experimental, and its API may change
173
+ without warning. In particular, :attr:`DatetimeArray.dtype` is
174
+ expected to change to always be an instance of an ``ExtensionDtype``
175
+ subclass.
176
+
177
+ Parameters
178
+ ----------
179
+ values : Series, Index, DatetimeArray, ndarray
180
+ The datetime data.
181
+
182
+ For DatetimeArray `values` (or a Series or Index boxing one),
183
+ `dtype` and `freq` will be extracted from `values`.
184
+
185
+ dtype : numpy.dtype or DatetimeTZDtype
186
+ Note that the only NumPy dtype allowed is 'datetime64[ns]'.
187
+ freq : str or Offset, optional
188
+ The frequency.
189
+ copy : bool, default False
190
+ Whether to copy the underlying array of values.
191
+
192
+ Attributes
193
+ ----------
194
+ None
195
+
196
+ Methods
197
+ -------
198
+ None
199
+
200
+ Examples
201
+ --------
202
+ >>> pd.arrays.DatetimeArray._from_sequence(
203
+ ... pd.DatetimeIndex(['2023-01-01', '2023-01-02'], freq='D'))
204
+ <DatetimeArray>
205
+ ['2023-01-01 00:00:00', '2023-01-02 00:00:00']
206
+ Length: 2, dtype: datetime64[ns]
207
+ """
208
+
209
+ _typ = "datetimearray"
210
+ _internal_fill_value = np.datetime64("NaT", "ns")
211
+ _recognized_scalars = (datetime, np.datetime64)
212
+ _is_recognized_dtype = lambda x: lib.is_np_dtype(x, "M") or isinstance(
213
+ x, DatetimeTZDtype
214
+ )
215
+ _infer_matches = ("datetime", "datetime64", "date")
216
+
217
+ @property
218
+ def _scalar_type(self) -> type[Timestamp]:
219
+ return Timestamp
220
+
221
+ # define my properties & methods for delegation
222
+ _bool_ops: list[str] = [
223
+ "is_month_start",
224
+ "is_month_end",
225
+ "is_quarter_start",
226
+ "is_quarter_end",
227
+ "is_year_start",
228
+ "is_year_end",
229
+ "is_leap_year",
230
+ ]
231
+ _object_ops: list[str] = ["freq", "tz"]
232
+ _field_ops: list[str] = [
233
+ "year",
234
+ "month",
235
+ "day",
236
+ "hour",
237
+ "minute",
238
+ "second",
239
+ "weekday",
240
+ "dayofweek",
241
+ "day_of_week",
242
+ "dayofyear",
243
+ "day_of_year",
244
+ "quarter",
245
+ "days_in_month",
246
+ "daysinmonth",
247
+ "microsecond",
248
+ "nanosecond",
249
+ ]
250
+ _other_ops: list[str] = ["date", "time", "timetz"]
251
+ _datetimelike_ops: list[str] = (
252
+ _field_ops + _object_ops + _bool_ops + _other_ops + ["unit"]
253
+ )
254
+ _datetimelike_methods: list[str] = [
255
+ "to_period",
256
+ "tz_localize",
257
+ "tz_convert",
258
+ "normalize",
259
+ "strftime",
260
+ "round",
261
+ "floor",
262
+ "ceil",
263
+ "month_name",
264
+ "day_name",
265
+ "as_unit",
266
+ ]
267
+
268
+ # ndim is inherited from ExtensionArray, must exist to ensure
269
+ # Timestamp.__richcmp__(DateTimeArray) operates pointwise
270
+
271
+ # ensure that operations with numpy arrays defer to our implementation
272
+ __array_priority__ = 1000
273
+
274
+ # -----------------------------------------------------------------
275
+ # Constructors
276
+
277
+ _dtype: np.dtype[np.datetime64] | DatetimeTZDtype
278
+ _freq: BaseOffset | None = None
279
+ _default_dtype = DT64NS_DTYPE # used in TimeLikeOps.__init__
280
+
281
+ @classmethod
282
+ def _from_scalars(cls, scalars, *, dtype: DtypeObj) -> Self:
283
+ if lib.infer_dtype(scalars, skipna=True) not in ["datetime", "datetime64"]:
284
+ # TODO: require any NAs be valid-for-DTA
285
+ # TODO: if dtype is passed, check for tzawareness compat?
286
+ raise ValueError
287
+ return cls._from_sequence(scalars, dtype=dtype)
288
+
289
+ @classmethod
290
+ def _validate_dtype(cls, values, dtype):
291
+ # used in TimeLikeOps.__init__
292
+ dtype = _validate_dt64_dtype(dtype)
293
+ _validate_dt64_dtype(values.dtype)
294
+ if isinstance(dtype, np.dtype):
295
+ if values.dtype != dtype:
296
+ raise ValueError("Values resolution does not match dtype.")
297
+ else:
298
+ vunit = np.datetime_data(values.dtype)[0]
299
+ if vunit != dtype.unit:
300
+ raise ValueError("Values resolution does not match dtype.")
301
+ return dtype
302
+
303
+ # error: Signature of "_simple_new" incompatible with supertype "NDArrayBacked"
304
+ @classmethod
305
+ def _simple_new( # type: ignore[override]
306
+ cls,
307
+ values: npt.NDArray[np.datetime64],
308
+ freq: BaseOffset | None = None,
309
+ dtype: np.dtype[np.datetime64] | DatetimeTZDtype = DT64NS_DTYPE,
310
+ ) -> Self:
311
+ assert isinstance(values, np.ndarray)
312
+ assert dtype.kind == "M"
313
+ if isinstance(dtype, np.dtype):
314
+ assert dtype == values.dtype
315
+ assert not is_unitless(dtype)
316
+ else:
317
+ # DatetimeTZDtype. If we have e.g. DatetimeTZDtype[us, UTC],
318
+ # then values.dtype should be M8[us].
319
+ assert dtype._creso == get_unit_from_dtype(values.dtype)
320
+
321
+ result = super()._simple_new(values, dtype)
322
+ result._freq = freq
323
+ return result
324
+
325
+ @classmethod
326
+ def _from_sequence(cls, scalars, *, dtype=None, copy: bool = False):
327
+ return cls._from_sequence_not_strict(scalars, dtype=dtype, copy=copy)
328
+
329
+ @classmethod
330
+ def _from_sequence_not_strict(
331
+ cls,
332
+ data,
333
+ *,
334
+ dtype=None,
335
+ copy: bool = False,
336
+ tz=lib.no_default,
337
+ freq: str | BaseOffset | lib.NoDefault | None = lib.no_default,
338
+ dayfirst: bool = False,
339
+ yearfirst: bool = False,
340
+ ambiguous: TimeAmbiguous = "raise",
341
+ ) -> Self:
342
+ """
343
+ A non-strict version of _from_sequence, called from DatetimeIndex.__new__.
344
+ """
345
+
346
+ # if the user either explicitly passes tz=None or a tz-naive dtype, we
347
+ # disallows inferring a tz.
348
+ explicit_tz_none = tz is None
349
+ if tz is lib.no_default:
350
+ tz = None
351
+ else:
352
+ tz = timezones.maybe_get_tz(tz)
353
+
354
+ dtype = _validate_dt64_dtype(dtype)
355
+ # if dtype has an embedded tz, capture it
356
+ tz = _validate_tz_from_dtype(dtype, tz, explicit_tz_none)
357
+
358
+ unit = None
359
+ if dtype is not None:
360
+ unit = dtl.dtype_to_unit(dtype)
361
+
362
+ data, copy = dtl.ensure_arraylike_for_datetimelike(
363
+ data, copy, cls_name="DatetimeArray"
364
+ )
365
+ inferred_freq = None
366
+ if isinstance(data, DatetimeArray):
367
+ inferred_freq = data.freq
368
+
369
+ subarr, tz = _sequence_to_dt64(
370
+ data,
371
+ copy=copy,
372
+ tz=tz,
373
+ dayfirst=dayfirst,
374
+ yearfirst=yearfirst,
375
+ ambiguous=ambiguous,
376
+ out_unit=unit,
377
+ )
378
+ # We have to call this again after possibly inferring a tz above
379
+ _validate_tz_from_dtype(dtype, tz, explicit_tz_none)
380
+ if tz is not None and explicit_tz_none:
381
+ raise ValueError(
382
+ "Passed data is timezone-aware, incompatible with 'tz=None'. "
383
+ "Use obj.tz_localize(None) instead."
384
+ )
385
+
386
+ data_unit = np.datetime_data(subarr.dtype)[0]
387
+ data_dtype = tz_to_dtype(tz, data_unit)
388
+ result = cls._simple_new(subarr, freq=inferred_freq, dtype=data_dtype)
389
+ if unit is not None and unit != result.unit:
390
+ # If unit was specified in user-passed dtype, cast to it here
391
+ result = result.as_unit(unit)
392
+
393
+ validate_kwds = {"ambiguous": ambiguous}
394
+ result._maybe_pin_freq(freq, validate_kwds)
395
+ return result
396
+
397
+ @classmethod
398
+ def _generate_range(
399
+ cls,
400
+ start,
401
+ end,
402
+ periods: int | None,
403
+ freq,
404
+ tz=None,
405
+ normalize: bool = False,
406
+ ambiguous: TimeAmbiguous = "raise",
407
+ nonexistent: TimeNonexistent = "raise",
408
+ inclusive: IntervalClosedType = "both",
409
+ *,
410
+ unit: str | None = None,
411
+ ) -> Self:
412
+ periods = dtl.validate_periods(periods)
413
+ if freq is None and any(x is None for x in [periods, start, end]):
414
+ raise ValueError("Must provide freq argument if no data is supplied")
415
+
416
+ if com.count_not_none(start, end, periods, freq) != 3:
417
+ raise ValueError(
418
+ "Of the four parameters: start, end, periods, "
419
+ "and freq, exactly three must be specified"
420
+ )
421
+ freq = to_offset(freq)
422
+
423
+ if start is not None:
424
+ start = Timestamp(start)
425
+
426
+ if end is not None:
427
+ end = Timestamp(end)
428
+
429
+ if start is NaT or end is NaT:
430
+ raise ValueError("Neither `start` nor `end` can be NaT")
431
+
432
+ if unit is not None:
433
+ if unit not in ["s", "ms", "us", "ns"]:
434
+ raise ValueError("'unit' must be one of 's', 'ms', 'us', 'ns'")
435
+ else:
436
+ unit = "ns"
437
+
438
+ if start is not None:
439
+ start = start.as_unit(unit, round_ok=False)
440
+ if end is not None:
441
+ end = end.as_unit(unit, round_ok=False)
442
+
443
+ left_inclusive, right_inclusive = validate_inclusive(inclusive)
444
+ start, end = _maybe_normalize_endpoints(start, end, normalize)
445
+ tz = _infer_tz_from_endpoints(start, end, tz)
446
+
447
+ if tz is not None:
448
+ # Localize the start and end arguments
449
+ start = _maybe_localize_point(start, freq, tz, ambiguous, nonexistent)
450
+ end = _maybe_localize_point(end, freq, tz, ambiguous, nonexistent)
451
+
452
+ if freq is not None:
453
+ # We break Day arithmetic (fixed 24 hour) here and opt for
454
+ # Day to mean calendar day (23/24/25 hour). Therefore, strip
455
+ # tz info from start and day to avoid DST arithmetic
456
+ if isinstance(freq, Day):
457
+ if start is not None:
458
+ start = start.tz_localize(None)
459
+ if end is not None:
460
+ end = end.tz_localize(None)
461
+
462
+ if isinstance(freq, Tick):
463
+ i8values = generate_regular_range(start, end, periods, freq, unit=unit)
464
+ else:
465
+ xdr = _generate_range(
466
+ start=start, end=end, periods=periods, offset=freq, unit=unit
467
+ )
468
+ i8values = np.array([x._value for x in xdr], dtype=np.int64)
469
+
470
+ endpoint_tz = start.tz if start is not None else end.tz
471
+
472
+ if tz is not None and endpoint_tz is None:
473
+ if not timezones.is_utc(tz):
474
+ # short-circuit tz_localize_to_utc which would make
475
+ # an unnecessary copy with UTC but be a no-op.
476
+ creso = abbrev_to_npy_unit(unit)
477
+ i8values = tzconversion.tz_localize_to_utc(
478
+ i8values,
479
+ tz,
480
+ ambiguous=ambiguous,
481
+ nonexistent=nonexistent,
482
+ creso=creso,
483
+ )
484
+
485
+ # i8values is localized datetime64 array -> have to convert
486
+ # start/end as well to compare
487
+ if start is not None:
488
+ start = start.tz_localize(tz, ambiguous, nonexistent)
489
+ if end is not None:
490
+ end = end.tz_localize(tz, ambiguous, nonexistent)
491
+ else:
492
+ # Create a linearly spaced date_range in local time
493
+ # Nanosecond-granularity timestamps aren't always correctly
494
+ # representable with doubles, so we limit the range that we
495
+ # pass to np.linspace as much as possible
496
+ periods = cast(int, periods)
497
+ i8values = (
498
+ np.linspace(0, end._value - start._value, periods, dtype="int64")
499
+ + start._value
500
+ )
501
+ if i8values.dtype != "i8":
502
+ # 2022-01-09 I (brock) am not sure if it is possible for this
503
+ # to overflow and cast to e.g. f8, but if it does we need to cast
504
+ i8values = i8values.astype("i8")
505
+
506
+ if start == end:
507
+ if not left_inclusive and not right_inclusive:
508
+ i8values = i8values[1:-1]
509
+ else:
510
+ start_i8 = Timestamp(start)._value
511
+ end_i8 = Timestamp(end)._value
512
+ if not left_inclusive or not right_inclusive:
513
+ if not left_inclusive and len(i8values) and i8values[0] == start_i8:
514
+ i8values = i8values[1:]
515
+ if not right_inclusive and len(i8values) and i8values[-1] == end_i8:
516
+ i8values = i8values[:-1]
517
+
518
+ dt64_values = i8values.view(f"datetime64[{unit}]")
519
+ dtype = tz_to_dtype(tz, unit=unit)
520
+ return cls._simple_new(dt64_values, freq=freq, dtype=dtype)
521
+
522
+ # -----------------------------------------------------------------
523
+ # DatetimeLike Interface
524
+
525
+ def _unbox_scalar(self, value) -> np.datetime64:
526
+ if not isinstance(value, self._scalar_type) and value is not NaT:
527
+ raise ValueError("'value' should be a Timestamp.")
528
+ self._check_compatible_with(value)
529
+ if value is NaT:
530
+ return np.datetime64(value._value, self.unit)
531
+ else:
532
+ return value.as_unit(self.unit).asm8
533
+
534
+ def _scalar_from_string(self, value) -> Timestamp | NaTType:
535
+ return Timestamp(value, tz=self.tz)
536
+
537
+ def _check_compatible_with(self, other) -> None:
538
+ if other is NaT:
539
+ return
540
+ self._assert_tzawareness_compat(other)
541
+
542
+ # -----------------------------------------------------------------
543
+ # Descriptive Properties
544
+
545
+ def _box_func(self, x: np.datetime64) -> Timestamp | NaTType:
546
+ # GH#42228
547
+ value = x.view("i8")
548
+ ts = Timestamp._from_value_and_reso(value, reso=self._creso, tz=self.tz)
549
+ return ts
550
+
551
+ @property
552
+ # error: Return type "Union[dtype, DatetimeTZDtype]" of "dtype"
553
+ # incompatible with return type "ExtensionDtype" in supertype
554
+ # "ExtensionArray"
555
+ def dtype(self) -> np.dtype[np.datetime64] | DatetimeTZDtype: # type: ignore[override]
556
+ """
557
+ The dtype for the DatetimeArray.
558
+
559
+ .. warning::
560
+
561
+ A future version of pandas will change dtype to never be a
562
+ ``numpy.dtype``. Instead, :attr:`DatetimeArray.dtype` will
563
+ always be an instance of an ``ExtensionDtype`` subclass.
564
+
565
+ Returns
566
+ -------
567
+ numpy.dtype or DatetimeTZDtype
568
+ If the values are tz-naive, then ``np.dtype('datetime64[ns]')``
569
+ is returned.
570
+
571
+ If the values are tz-aware, then the ``DatetimeTZDtype``
572
+ is returned.
573
+ """
574
+ return self._dtype
575
+
576
+ @property
577
+ def tz(self) -> tzinfo | None:
578
+ """
579
+ Return the timezone.
580
+
581
+ Returns
582
+ -------
583
+ datetime.tzinfo, pytz.tzinfo.BaseTZInfo, dateutil.tz.tz.tzfile, or None
584
+ Returns None when the array is tz-naive.
585
+
586
+ Examples
587
+ --------
588
+ For Series:
589
+
590
+ >>> s = pd.Series(["1/1/2020 10:00:00+00:00", "2/1/2020 11:00:00+00:00"])
591
+ >>> s = pd.to_datetime(s)
592
+ >>> s
593
+ 0 2020-01-01 10:00:00+00:00
594
+ 1 2020-02-01 11:00:00+00:00
595
+ dtype: datetime64[ns, UTC]
596
+ >>> s.dt.tz
597
+ datetime.timezone.utc
598
+
599
+ For DatetimeIndex:
600
+
601
+ >>> idx = pd.DatetimeIndex(["1/1/2020 10:00:00+00:00",
602
+ ... "2/1/2020 11:00:00+00:00"])
603
+ >>> idx.tz
604
+ datetime.timezone.utc
605
+ """
606
+ # GH 18595
607
+ return getattr(self.dtype, "tz", None)
608
+
609
+ @tz.setter
610
+ def tz(self, value):
611
+ # GH 3746: Prevent localizing or converting the index by setting tz
612
+ raise AttributeError(
613
+ "Cannot directly set timezone. Use tz_localize() "
614
+ "or tz_convert() as appropriate"
615
+ )
616
+
617
+ @property
618
+ def tzinfo(self) -> tzinfo | None:
619
+ """
620
+ Alias for tz attribute
621
+ """
622
+ return self.tz
623
+
624
+ @property # NB: override with cache_readonly in immutable subclasses
625
+ def is_normalized(self) -> bool:
626
+ """
627
+ Returns True if all of the dates are at midnight ("no time")
628
+ """
629
+ return is_date_array_normalized(self.asi8, self.tz, reso=self._creso)
630
+
631
+ @property # NB: override with cache_readonly in immutable subclasses
632
+ def _resolution_obj(self) -> Resolution:
633
+ return get_resolution(self.asi8, self.tz, reso=self._creso)
634
+
635
+ # ----------------------------------------------------------------
636
+ # Array-Like / EA-Interface Methods
637
+
638
+ def __array__(self, dtype=None, copy=None) -> np.ndarray:
639
+ if dtype is None and self.tz:
640
+ # The default for tz-aware is object, to preserve tz info
641
+ dtype = object
642
+
643
+ return super().__array__(dtype=dtype, copy=copy)
644
+
645
+ def __iter__(self) -> Iterator:
646
+ """
647
+ Return an iterator over the boxed values
648
+
649
+ Yields
650
+ ------
651
+ tstamp : Timestamp
652
+ """
653
+ if self.ndim > 1:
654
+ for i in range(len(self)):
655
+ yield self[i]
656
+ else:
657
+ # convert in chunks of 10k for efficiency
658
+ data = self.asi8
659
+ length = len(self)
660
+ chunksize = _ITER_CHUNKSIZE
661
+ chunks = (length // chunksize) + 1
662
+
663
+ for i in range(chunks):
664
+ start_i = i * chunksize
665
+ end_i = min((i + 1) * chunksize, length)
666
+ converted = ints_to_pydatetime(
667
+ data[start_i:end_i],
668
+ tz=self.tz,
669
+ box="timestamp",
670
+ reso=self._creso,
671
+ )
672
+ yield from converted
673
+
674
+ def astype(self, dtype, copy: bool = True):
675
+ # We handle
676
+ # --> datetime
677
+ # --> period
678
+ # DatetimeLikeArrayMixin Super handles the rest.
679
+ dtype = pandas_dtype(dtype)
680
+
681
+ if dtype == self.dtype:
682
+ if copy:
683
+ return self.copy()
684
+ return self
685
+
686
+ elif isinstance(dtype, ExtensionDtype):
687
+ if not isinstance(dtype, DatetimeTZDtype):
688
+ # e.g. Sparse[datetime64[ns]]
689
+ return super().astype(dtype, copy=copy)
690
+ elif self.tz is None:
691
+ # pre-2.0 this did self.tz_localize(dtype.tz), which did not match
692
+ # the Series behavior which did
693
+ # values.tz_localize("UTC").tz_convert(dtype.tz)
694
+ raise TypeError(
695
+ "Cannot use .astype to convert from timezone-naive dtype to "
696
+ "timezone-aware dtype. Use obj.tz_localize instead or "
697
+ "series.dt.tz_localize instead"
698
+ )
699
+ else:
700
+ # tzaware unit conversion e.g. datetime64[s, UTC]
701
+ np_dtype = np.dtype(dtype.str)
702
+ res_values = astype_overflowsafe(self._ndarray, np_dtype, copy=copy)
703
+ return type(self)._simple_new(res_values, dtype=dtype, freq=self.freq)
704
+
705
+ elif (
706
+ self.tz is None
707
+ and lib.is_np_dtype(dtype, "M")
708
+ and not is_unitless(dtype)
709
+ and is_supported_dtype(dtype)
710
+ ):
711
+ # unit conversion e.g. datetime64[s]
712
+ res_values = astype_overflowsafe(self._ndarray, dtype, copy=True)
713
+ return type(self)._simple_new(res_values, dtype=res_values.dtype)
714
+ # TODO: preserve freq?
715
+
716
+ elif self.tz is not None and lib.is_np_dtype(dtype, "M"):
717
+ # pre-2.0 behavior for DTA/DTI was
718
+ # values.tz_convert("UTC").tz_localize(None), which did not match
719
+ # the Series behavior
720
+ raise TypeError(
721
+ "Cannot use .astype to convert from timezone-aware dtype to "
722
+ "timezone-naive dtype. Use obj.tz_localize(None) or "
723
+ "obj.tz_convert('UTC').tz_localize(None) instead."
724
+ )
725
+
726
+ elif (
727
+ self.tz is None
728
+ and lib.is_np_dtype(dtype, "M")
729
+ and dtype != self.dtype
730
+ and is_unitless(dtype)
731
+ ):
732
+ raise TypeError(
733
+ "Casting to unit-less dtype 'datetime64' is not supported. "
734
+ "Pass e.g. 'datetime64[ns]' instead."
735
+ )
736
+
737
+ elif isinstance(dtype, PeriodDtype):
738
+ return self.to_period(freq=dtype.freq)
739
+ return dtl.DatetimeLikeArrayMixin.astype(self, dtype, copy)
740
+
741
+ # -----------------------------------------------------------------
742
+ # Rendering Methods
743
+
744
+ def _format_native_types(
745
+ self, *, na_rep: str | float = "NaT", date_format=None, **kwargs
746
+ ) -> npt.NDArray[np.object_]:
747
+ if date_format is None and self._is_dates_only:
748
+ # Only dates and no timezone: provide a default format
749
+ date_format = "%Y-%m-%d"
750
+
751
+ return tslib.format_array_from_datetime(
752
+ self.asi8, tz=self.tz, format=date_format, na_rep=na_rep, reso=self._creso
753
+ )
754
+
755
+ # -----------------------------------------------------------------
756
+ # Comparison Methods
757
+
758
+ def _has_same_tz(self, other) -> bool:
759
+ # vzone shouldn't be None if value is non-datetime like
760
+ if isinstance(other, np.datetime64):
761
+ # convert to Timestamp as np.datetime64 doesn't have tz attr
762
+ other = Timestamp(other)
763
+
764
+ if not hasattr(other, "tzinfo"):
765
+ return False
766
+ other_tz = other.tzinfo
767
+ return timezones.tz_compare(self.tzinfo, other_tz)
768
+
769
+ def _assert_tzawareness_compat(self, other) -> None:
770
+ # adapted from _Timestamp._assert_tzawareness_compat
771
+ other_tz = getattr(other, "tzinfo", None)
772
+ other_dtype = getattr(other, "dtype", None)
773
+
774
+ if isinstance(other_dtype, DatetimeTZDtype):
775
+ # Get tzinfo from Series dtype
776
+ other_tz = other.dtype.tz
777
+ if other is NaT:
778
+ # pd.NaT quacks both aware and naive
779
+ pass
780
+ elif self.tz is None:
781
+ if other_tz is not None:
782
+ raise TypeError(
783
+ "Cannot compare tz-naive and tz-aware datetime-like objects."
784
+ )
785
+ elif other_tz is None:
786
+ raise TypeError(
787
+ "Cannot compare tz-naive and tz-aware datetime-like objects"
788
+ )
789
+
790
+ # -----------------------------------------------------------------
791
+ # Arithmetic Methods
792
+
793
+ def _add_offset(self, offset: BaseOffset) -> Self:
794
+ assert not isinstance(offset, Tick)
795
+
796
+ if self.tz is not None:
797
+ values = self.tz_localize(None)
798
+ else:
799
+ values = self
800
+
801
+ try:
802
+ res_values = offset._apply_array(values._ndarray)
803
+ if res_values.dtype.kind == "i":
804
+ # error: Argument 1 to "view" of "ndarray" has incompatible type
805
+ # "dtype[datetime64] | DatetimeTZDtype"; expected
806
+ # "dtype[Any] | type[Any] | _SupportsDType[dtype[Any]]"
807
+ res_values = res_values.view(values.dtype) # type: ignore[arg-type]
808
+ except NotImplementedError:
809
+ warnings.warn(
810
+ "Non-vectorized DateOffset being applied to Series or DatetimeIndex.",
811
+ PerformanceWarning,
812
+ stacklevel=find_stack_level(),
813
+ )
814
+ res_values = self.astype("O") + offset
815
+ # TODO(GH#55564): as_unit will be unnecessary
816
+ result = type(self)._from_sequence(res_values).as_unit(self.unit)
817
+ if not len(self):
818
+ # GH#30336 _from_sequence won't be able to infer self.tz
819
+ return result.tz_localize(self.tz)
820
+
821
+ else:
822
+ result = type(self)._simple_new(res_values, dtype=res_values.dtype)
823
+ if offset.normalize:
824
+ result = result.normalize()
825
+ result._freq = None
826
+
827
+ if self.tz is not None:
828
+ result = result.tz_localize(self.tz)
829
+
830
+ return result
831
+
832
+ # -----------------------------------------------------------------
833
+ # Timezone Conversion and Localization Methods
834
+
835
+ def _local_timestamps(self) -> npt.NDArray[np.int64]:
836
+ """
837
+ Convert to an i8 (unix-like nanosecond timestamp) representation
838
+ while keeping the local timezone and not using UTC.
839
+ This is used to calculate time-of-day information as if the timestamps
840
+ were timezone-naive.
841
+ """
842
+ if self.tz is None or timezones.is_utc(self.tz):
843
+ # Avoid the copy that would be made in tzconversion
844
+ return self.asi8
845
+ return tz_convert_from_utc(self.asi8, self.tz, reso=self._creso)
846
+
847
+ def tz_convert(self, tz) -> Self:
848
+ """
849
+ Convert tz-aware Datetime Array/Index from one time zone to another.
850
+
851
+ Parameters
852
+ ----------
853
+ tz : str, pytz.timezone, dateutil.tz.tzfile, datetime.tzinfo or None
854
+ Time zone for time. Corresponding timestamps would be converted
855
+ to this time zone of the Datetime Array/Index. A `tz` of None will
856
+ convert to UTC and remove the timezone information.
857
+
858
+ Returns
859
+ -------
860
+ Array or Index
861
+
862
+ Raises
863
+ ------
864
+ TypeError
865
+ If Datetime Array/Index is tz-naive.
866
+
867
+ See Also
868
+ --------
869
+ DatetimeIndex.tz : A timezone that has a variable offset from UTC.
870
+ DatetimeIndex.tz_localize : Localize tz-naive DatetimeIndex to a
871
+ given time zone, or remove timezone from a tz-aware DatetimeIndex.
872
+
873
+ Examples
874
+ --------
875
+ With the `tz` parameter, we can change the DatetimeIndex
876
+ to other time zones:
877
+
878
+ >>> dti = pd.date_range(start='2014-08-01 09:00',
879
+ ... freq='h', periods=3, tz='Europe/Berlin')
880
+
881
+ >>> dti
882
+ DatetimeIndex(['2014-08-01 09:00:00+02:00',
883
+ '2014-08-01 10:00:00+02:00',
884
+ '2014-08-01 11:00:00+02:00'],
885
+ dtype='datetime64[ns, Europe/Berlin]', freq='h')
886
+
887
+ >>> dti.tz_convert('US/Central')
888
+ DatetimeIndex(['2014-08-01 02:00:00-05:00',
889
+ '2014-08-01 03:00:00-05:00',
890
+ '2014-08-01 04:00:00-05:00'],
891
+ dtype='datetime64[ns, US/Central]', freq='h')
892
+
893
+ With the ``tz=None``, we can remove the timezone (after converting
894
+ to UTC if necessary):
895
+
896
+ >>> dti = pd.date_range(start='2014-08-01 09:00', freq='h',
897
+ ... periods=3, tz='Europe/Berlin')
898
+
899
+ >>> dti
900
+ DatetimeIndex(['2014-08-01 09:00:00+02:00',
901
+ '2014-08-01 10:00:00+02:00',
902
+ '2014-08-01 11:00:00+02:00'],
903
+ dtype='datetime64[ns, Europe/Berlin]', freq='h')
904
+
905
+ >>> dti.tz_convert(None)
906
+ DatetimeIndex(['2014-08-01 07:00:00',
907
+ '2014-08-01 08:00:00',
908
+ '2014-08-01 09:00:00'],
909
+ dtype='datetime64[ns]', freq='h')
910
+ """
911
+ tz = timezones.maybe_get_tz(tz)
912
+
913
+ if self.tz is None:
914
+ # tz naive, use tz_localize
915
+ raise TypeError(
916
+ "Cannot convert tz-naive timestamps, use tz_localize to localize"
917
+ )
918
+
919
+ # No conversion since timestamps are all UTC to begin with
920
+ dtype = tz_to_dtype(tz, unit=self.unit)
921
+ return self._simple_new(self._ndarray, dtype=dtype, freq=self.freq)
922
+
923
+ @dtl.ravel_compat
924
+ def tz_localize(
925
+ self,
926
+ tz,
927
+ ambiguous: TimeAmbiguous = "raise",
928
+ nonexistent: TimeNonexistent = "raise",
929
+ ) -> Self:
930
+ """
931
+ Localize tz-naive Datetime Array/Index to tz-aware Datetime Array/Index.
932
+
933
+ This method takes a time zone (tz) naive Datetime Array/Index object
934
+ and makes this time zone aware. It does not move the time to another
935
+ time zone.
936
+
937
+ This method can also be used to do the inverse -- to create a time
938
+ zone unaware object from an aware object. To that end, pass `tz=None`.
939
+
940
+ Parameters
941
+ ----------
942
+ tz : str, pytz.timezone, dateutil.tz.tzfile, datetime.tzinfo or None
943
+ Time zone to convert timestamps to. Passing ``None`` will
944
+ remove the time zone information preserving local time.
945
+ ambiguous : 'infer', 'NaT', bool array, default 'raise'
946
+ When clocks moved backward due to DST, ambiguous times may arise.
947
+ For example in Central European Time (UTC+01), when going from
948
+ 03:00 DST to 02:00 non-DST, 02:30:00 local time occurs both at
949
+ 00:30:00 UTC and at 01:30:00 UTC. In such a situation, the
950
+ `ambiguous` parameter dictates how ambiguous times should be
951
+ handled.
952
+
953
+ - 'infer' will attempt to infer fall dst-transition hours based on
954
+ order
955
+ - bool-ndarray where True signifies a DST time, False signifies a
956
+ non-DST time (note that this flag is only applicable for
957
+ ambiguous times)
958
+ - 'NaT' will return NaT where there are ambiguous times
959
+ - 'raise' will raise an AmbiguousTimeError if there are ambiguous
960
+ times.
961
+
962
+ nonexistent : 'shift_forward', 'shift_backward, 'NaT', timedelta, \
963
+ default 'raise'
964
+ A nonexistent time does not exist in a particular timezone
965
+ where clocks moved forward due to DST.
966
+
967
+ - 'shift_forward' will shift the nonexistent time forward to the
968
+ closest existing time
969
+ - 'shift_backward' will shift the nonexistent time backward to the
970
+ closest existing time
971
+ - 'NaT' will return NaT where there are nonexistent times
972
+ - timedelta objects will shift nonexistent times by the timedelta
973
+ - 'raise' will raise an NonExistentTimeError if there are
974
+ nonexistent times.
975
+
976
+ Returns
977
+ -------
978
+ Same type as self
979
+ Array/Index converted to the specified time zone.
980
+
981
+ Raises
982
+ ------
983
+ TypeError
984
+ If the Datetime Array/Index is tz-aware and tz is not None.
985
+
986
+ See Also
987
+ --------
988
+ DatetimeIndex.tz_convert : Convert tz-aware DatetimeIndex from
989
+ one time zone to another.
990
+
991
+ Examples
992
+ --------
993
+ >>> tz_naive = pd.date_range('2018-03-01 09:00', periods=3)
994
+ >>> tz_naive
995
+ DatetimeIndex(['2018-03-01 09:00:00', '2018-03-02 09:00:00',
996
+ '2018-03-03 09:00:00'],
997
+ dtype='datetime64[ns]', freq='D')
998
+
999
+ Localize DatetimeIndex in US/Eastern time zone:
1000
+
1001
+ >>> tz_aware = tz_naive.tz_localize(tz='US/Eastern')
1002
+ >>> tz_aware
1003
+ DatetimeIndex(['2018-03-01 09:00:00-05:00',
1004
+ '2018-03-02 09:00:00-05:00',
1005
+ '2018-03-03 09:00:00-05:00'],
1006
+ dtype='datetime64[ns, US/Eastern]', freq=None)
1007
+
1008
+ With the ``tz=None``, we can remove the time zone information
1009
+ while keeping the local time (not converted to UTC):
1010
+
1011
+ >>> tz_aware.tz_localize(None)
1012
+ DatetimeIndex(['2018-03-01 09:00:00', '2018-03-02 09:00:00',
1013
+ '2018-03-03 09:00:00'],
1014
+ dtype='datetime64[ns]', freq=None)
1015
+
1016
+ Be careful with DST changes. When there is sequential data, pandas can
1017
+ infer the DST time:
1018
+
1019
+ >>> s = pd.to_datetime(pd.Series(['2018-10-28 01:30:00',
1020
+ ... '2018-10-28 02:00:00',
1021
+ ... '2018-10-28 02:30:00',
1022
+ ... '2018-10-28 02:00:00',
1023
+ ... '2018-10-28 02:30:00',
1024
+ ... '2018-10-28 03:00:00',
1025
+ ... '2018-10-28 03:30:00']))
1026
+ >>> s.dt.tz_localize('CET', ambiguous='infer')
1027
+ 0 2018-10-28 01:30:00+02:00
1028
+ 1 2018-10-28 02:00:00+02:00
1029
+ 2 2018-10-28 02:30:00+02:00
1030
+ 3 2018-10-28 02:00:00+01:00
1031
+ 4 2018-10-28 02:30:00+01:00
1032
+ 5 2018-10-28 03:00:00+01:00
1033
+ 6 2018-10-28 03:30:00+01:00
1034
+ dtype: datetime64[ns, CET]
1035
+
1036
+ In some cases, inferring the DST is impossible. In such cases, you can
1037
+ pass an ndarray to the ambiguous parameter to set the DST explicitly
1038
+
1039
+ >>> s = pd.to_datetime(pd.Series(['2018-10-28 01:20:00',
1040
+ ... '2018-10-28 02:36:00',
1041
+ ... '2018-10-28 03:46:00']))
1042
+ >>> s.dt.tz_localize('CET', ambiguous=np.array([True, True, False]))
1043
+ 0 2018-10-28 01:20:00+02:00
1044
+ 1 2018-10-28 02:36:00+02:00
1045
+ 2 2018-10-28 03:46:00+01:00
1046
+ dtype: datetime64[ns, CET]
1047
+
1048
+ If the DST transition causes nonexistent times, you can shift these
1049
+ dates forward or backwards with a timedelta object or `'shift_forward'`
1050
+ or `'shift_backwards'`.
1051
+
1052
+ >>> s = pd.to_datetime(pd.Series(['2015-03-29 02:30:00',
1053
+ ... '2015-03-29 03:30:00']))
1054
+ >>> s.dt.tz_localize('Europe/Warsaw', nonexistent='shift_forward')
1055
+ 0 2015-03-29 03:00:00+02:00
1056
+ 1 2015-03-29 03:30:00+02:00
1057
+ dtype: datetime64[ns, Europe/Warsaw]
1058
+
1059
+ >>> s.dt.tz_localize('Europe/Warsaw', nonexistent='shift_backward')
1060
+ 0 2015-03-29 01:59:59.999999999+01:00
1061
+ 1 2015-03-29 03:30:00+02:00
1062
+ dtype: datetime64[ns, Europe/Warsaw]
1063
+
1064
+ >>> s.dt.tz_localize('Europe/Warsaw', nonexistent=pd.Timedelta('1h'))
1065
+ 0 2015-03-29 03:30:00+02:00
1066
+ 1 2015-03-29 03:30:00+02:00
1067
+ dtype: datetime64[ns, Europe/Warsaw]
1068
+ """
1069
+ nonexistent_options = ("raise", "NaT", "shift_forward", "shift_backward")
1070
+ if nonexistent not in nonexistent_options and not isinstance(
1071
+ nonexistent, timedelta
1072
+ ):
1073
+ raise ValueError(
1074
+ "The nonexistent argument must be one of 'raise', "
1075
+ "'NaT', 'shift_forward', 'shift_backward' or "
1076
+ "a timedelta object"
1077
+ )
1078
+
1079
+ if self.tz is not None:
1080
+ if tz is None:
1081
+ new_dates = tz_convert_from_utc(self.asi8, self.tz, reso=self._creso)
1082
+ else:
1083
+ raise TypeError("Already tz-aware, use tz_convert to convert.")
1084
+ else:
1085
+ tz = timezones.maybe_get_tz(tz)
1086
+ # Convert to UTC
1087
+
1088
+ new_dates = tzconversion.tz_localize_to_utc(
1089
+ self.asi8,
1090
+ tz,
1091
+ ambiguous=ambiguous,
1092
+ nonexistent=nonexistent,
1093
+ creso=self._creso,
1094
+ )
1095
+ new_dates_dt64 = new_dates.view(f"M8[{self.unit}]")
1096
+ dtype = tz_to_dtype(tz, unit=self.unit)
1097
+
1098
+ freq = None
1099
+ if timezones.is_utc(tz) or (len(self) == 1 and not isna(new_dates_dt64[0])):
1100
+ # we can preserve freq
1101
+ # TODO: Also for fixed-offsets
1102
+ freq = self.freq
1103
+ elif tz is None and self.tz is None:
1104
+ # no-op
1105
+ freq = self.freq
1106
+ return self._simple_new(new_dates_dt64, dtype=dtype, freq=freq)
1107
+
1108
+ # ----------------------------------------------------------------
1109
+ # Conversion Methods - Vectorized analogues of Timestamp methods
1110
+
1111
+ def to_pydatetime(self) -> npt.NDArray[np.object_]:
1112
+ """
1113
+ Return an ndarray of ``datetime.datetime`` objects.
1114
+
1115
+ Returns
1116
+ -------
1117
+ numpy.ndarray
1118
+
1119
+ Examples
1120
+ --------
1121
+ >>> idx = pd.date_range('2018-02-27', periods=3)
1122
+ >>> idx.to_pydatetime()
1123
+ array([datetime.datetime(2018, 2, 27, 0, 0),
1124
+ datetime.datetime(2018, 2, 28, 0, 0),
1125
+ datetime.datetime(2018, 3, 1, 0, 0)], dtype=object)
1126
+ """
1127
+ return ints_to_pydatetime(self.asi8, tz=self.tz, reso=self._creso)
1128
+
1129
+ def normalize(self) -> Self:
1130
+ """
1131
+ Convert times to midnight.
1132
+
1133
+ The time component of the date-time is converted to midnight i.e.
1134
+ 00:00:00. This is useful in cases, when the time does not matter.
1135
+ Length is unaltered. The timezones are unaffected.
1136
+
1137
+ This method is available on Series with datetime values under
1138
+ the ``.dt`` accessor, and directly on Datetime Array/Index.
1139
+
1140
+ Returns
1141
+ -------
1142
+ DatetimeArray, DatetimeIndex or Series
1143
+ The same type as the original data. Series will have the same
1144
+ name and index. DatetimeIndex will have the same name.
1145
+
1146
+ See Also
1147
+ --------
1148
+ floor : Floor the datetimes to the specified freq.
1149
+ ceil : Ceil the datetimes to the specified freq.
1150
+ round : Round the datetimes to the specified freq.
1151
+
1152
+ Examples
1153
+ --------
1154
+ >>> idx = pd.date_range(start='2014-08-01 10:00', freq='h',
1155
+ ... periods=3, tz='Asia/Calcutta')
1156
+ >>> idx
1157
+ DatetimeIndex(['2014-08-01 10:00:00+05:30',
1158
+ '2014-08-01 11:00:00+05:30',
1159
+ '2014-08-01 12:00:00+05:30'],
1160
+ dtype='datetime64[ns, Asia/Calcutta]', freq='h')
1161
+ >>> idx.normalize()
1162
+ DatetimeIndex(['2014-08-01 00:00:00+05:30',
1163
+ '2014-08-01 00:00:00+05:30',
1164
+ '2014-08-01 00:00:00+05:30'],
1165
+ dtype='datetime64[ns, Asia/Calcutta]', freq=None)
1166
+ """
1167
+ new_values = normalize_i8_timestamps(self.asi8, self.tz, reso=self._creso)
1168
+ dt64_values = new_values.view(self._ndarray.dtype)
1169
+
1170
+ dta = type(self)._simple_new(dt64_values, dtype=dt64_values.dtype)
1171
+ dta = dta._with_freq("infer")
1172
+ if self.tz is not None:
1173
+ dta = dta.tz_localize(self.tz)
1174
+ return dta
1175
+
1176
+ def to_period(self, freq=None) -> PeriodArray:
1177
+ """
1178
+ Cast to PeriodArray/PeriodIndex at a particular frequency.
1179
+
1180
+ Converts DatetimeArray/Index to PeriodArray/PeriodIndex.
1181
+
1182
+ Parameters
1183
+ ----------
1184
+ freq : str or Period, optional
1185
+ One of pandas' :ref:`period aliases <timeseries.period_aliases>`
1186
+ or an Period object. Will be inferred by default.
1187
+
1188
+ Returns
1189
+ -------
1190
+ PeriodArray/PeriodIndex
1191
+
1192
+ Raises
1193
+ ------
1194
+ ValueError
1195
+ When converting a DatetimeArray/Index with non-regular values,
1196
+ so that a frequency cannot be inferred.
1197
+
1198
+ See Also
1199
+ --------
1200
+ PeriodIndex: Immutable ndarray holding ordinal values.
1201
+ DatetimeIndex.to_pydatetime: Return DatetimeIndex as object.
1202
+
1203
+ Examples
1204
+ --------
1205
+ >>> df = pd.DataFrame({"y": [1, 2, 3]},
1206
+ ... index=pd.to_datetime(["2000-03-31 00:00:00",
1207
+ ... "2000-05-31 00:00:00",
1208
+ ... "2000-08-31 00:00:00"]))
1209
+ >>> df.index.to_period("M")
1210
+ PeriodIndex(['2000-03', '2000-05', '2000-08'],
1211
+ dtype='period[M]')
1212
+
1213
+ Infer the daily frequency
1214
+
1215
+ >>> idx = pd.date_range("2017-01-01", periods=2)
1216
+ >>> idx.to_period()
1217
+ PeriodIndex(['2017-01-01', '2017-01-02'],
1218
+ dtype='period[D]')
1219
+ """
1220
+ from pandas.core.arrays import PeriodArray
1221
+
1222
+ if self.tz is not None:
1223
+ warnings.warn(
1224
+ "Converting to PeriodArray/Index representation "
1225
+ "will drop timezone information.",
1226
+ UserWarning,
1227
+ stacklevel=find_stack_level(),
1228
+ )
1229
+
1230
+ if freq is None:
1231
+ freq = self.freqstr or self.inferred_freq
1232
+ if isinstance(self.freq, BaseOffset) and hasattr(
1233
+ self.freq, "_period_dtype_code"
1234
+ ):
1235
+ freq = PeriodDtype(self.freq)._freqstr
1236
+
1237
+ if freq is None:
1238
+ raise ValueError(
1239
+ "You must pass a freq argument as current index has none."
1240
+ )
1241
+
1242
+ res = get_period_alias(freq)
1243
+
1244
+ # https://github.com/pandas-dev/pandas/issues/33358
1245
+ if res is None:
1246
+ res = freq
1247
+
1248
+ freq = res
1249
+ return PeriodArray._from_datetime64(self._ndarray, freq, tz=self.tz)
1250
+
1251
+ # -----------------------------------------------------------------
1252
+ # Properties - Vectorized Timestamp Properties/Methods
1253
+
1254
+ def month_name(self, locale=None) -> npt.NDArray[np.object_]:
1255
+ """
1256
+ Return the month names with specified locale.
1257
+
1258
+ Parameters
1259
+ ----------
1260
+ locale : str, optional
1261
+ Locale determining the language in which to return the month name.
1262
+ Default is English locale (``'en_US.utf8'``). Use the command
1263
+ ``locale -a`` on your terminal on Unix systems to find your locale
1264
+ language code.
1265
+
1266
+ Returns
1267
+ -------
1268
+ Series or Index
1269
+ Series or Index of month names.
1270
+
1271
+ Examples
1272
+ --------
1273
+ >>> s = pd.Series(pd.date_range(start='2018-01', freq='ME', periods=3))
1274
+ >>> s
1275
+ 0 2018-01-31
1276
+ 1 2018-02-28
1277
+ 2 2018-03-31
1278
+ dtype: datetime64[ns]
1279
+ >>> s.dt.month_name()
1280
+ 0 January
1281
+ 1 February
1282
+ 2 March
1283
+ dtype: object
1284
+
1285
+ >>> idx = pd.date_range(start='2018-01', freq='ME', periods=3)
1286
+ >>> idx
1287
+ DatetimeIndex(['2018-01-31', '2018-02-28', '2018-03-31'],
1288
+ dtype='datetime64[ns]', freq='ME')
1289
+ >>> idx.month_name()
1290
+ Index(['January', 'February', 'March'], dtype='object')
1291
+
1292
+ Using the ``locale`` parameter you can set a different locale language,
1293
+ for example: ``idx.month_name(locale='pt_BR.utf8')`` will return month
1294
+ names in Brazilian Portuguese language.
1295
+
1296
+ >>> idx = pd.date_range(start='2018-01', freq='ME', periods=3)
1297
+ >>> idx
1298
+ DatetimeIndex(['2018-01-31', '2018-02-28', '2018-03-31'],
1299
+ dtype='datetime64[ns]', freq='ME')
1300
+ >>> idx.month_name(locale='pt_BR.utf8') # doctest: +SKIP
1301
+ Index(['Janeiro', 'Fevereiro', 'Março'], dtype='object')
1302
+ """
1303
+ values = self._local_timestamps()
1304
+
1305
+ result = fields.get_date_name_field(
1306
+ values, "month_name", locale=locale, reso=self._creso
1307
+ )
1308
+ result = self._maybe_mask_results(result, fill_value=None)
1309
+ return result
1310
+
1311
+ def day_name(self, locale=None) -> npt.NDArray[np.object_]:
1312
+ """
1313
+ Return the day names with specified locale.
1314
+
1315
+ Parameters
1316
+ ----------
1317
+ locale : str, optional
1318
+ Locale determining the language in which to return the day name.
1319
+ Default is English locale (``'en_US.utf8'``). Use the command
1320
+ ``locale -a`` on your terminal on Unix systems to find your locale
1321
+ language code.
1322
+
1323
+ Returns
1324
+ -------
1325
+ Series or Index
1326
+ Series or Index of day names.
1327
+
1328
+ Examples
1329
+ --------
1330
+ >>> s = pd.Series(pd.date_range(start='2018-01-01', freq='D', periods=3))
1331
+ >>> s
1332
+ 0 2018-01-01
1333
+ 1 2018-01-02
1334
+ 2 2018-01-03
1335
+ dtype: datetime64[ns]
1336
+ >>> s.dt.day_name()
1337
+ 0 Monday
1338
+ 1 Tuesday
1339
+ 2 Wednesday
1340
+ dtype: object
1341
+
1342
+ >>> idx = pd.date_range(start='2018-01-01', freq='D', periods=3)
1343
+ >>> idx
1344
+ DatetimeIndex(['2018-01-01', '2018-01-02', '2018-01-03'],
1345
+ dtype='datetime64[ns]', freq='D')
1346
+ >>> idx.day_name()
1347
+ Index(['Monday', 'Tuesday', 'Wednesday'], dtype='object')
1348
+
1349
+ Using the ``locale`` parameter you can set a different locale language,
1350
+ for example: ``idx.day_name(locale='pt_BR.utf8')`` will return day
1351
+ names in Brazilian Portuguese language.
1352
+
1353
+ >>> idx = pd.date_range(start='2018-01-01', freq='D', periods=3)
1354
+ >>> idx
1355
+ DatetimeIndex(['2018-01-01', '2018-01-02', '2018-01-03'],
1356
+ dtype='datetime64[ns]', freq='D')
1357
+ >>> idx.day_name(locale='pt_BR.utf8') # doctest: +SKIP
1358
+ Index(['Segunda', 'Terça', 'Quarta'], dtype='object')
1359
+ """
1360
+ values = self._local_timestamps()
1361
+
1362
+ result = fields.get_date_name_field(
1363
+ values, "day_name", locale=locale, reso=self._creso
1364
+ )
1365
+ result = self._maybe_mask_results(result, fill_value=None)
1366
+ return result
1367
+
1368
+ @property
1369
+ def time(self) -> npt.NDArray[np.object_]:
1370
+ """
1371
+ Returns numpy array of :class:`datetime.time` objects.
1372
+
1373
+ The time part of the Timestamps.
1374
+
1375
+ Examples
1376
+ --------
1377
+ For Series:
1378
+
1379
+ >>> s = pd.Series(["1/1/2020 10:00:00+00:00", "2/1/2020 11:00:00+00:00"])
1380
+ >>> s = pd.to_datetime(s)
1381
+ >>> s
1382
+ 0 2020-01-01 10:00:00+00:00
1383
+ 1 2020-02-01 11:00:00+00:00
1384
+ dtype: datetime64[ns, UTC]
1385
+ >>> s.dt.time
1386
+ 0 10:00:00
1387
+ 1 11:00:00
1388
+ dtype: object
1389
+
1390
+ For DatetimeIndex:
1391
+
1392
+ >>> idx = pd.DatetimeIndex(["1/1/2020 10:00:00+00:00",
1393
+ ... "2/1/2020 11:00:00+00:00"])
1394
+ >>> idx.time
1395
+ array([datetime.time(10, 0), datetime.time(11, 0)], dtype=object)
1396
+ """
1397
+ # If the Timestamps have a timezone that is not UTC,
1398
+ # convert them into their i8 representation while
1399
+ # keeping their timezone and not using UTC
1400
+ timestamps = self._local_timestamps()
1401
+
1402
+ return ints_to_pydatetime(timestamps, box="time", reso=self._creso)
1403
+
1404
+ @property
1405
+ def timetz(self) -> npt.NDArray[np.object_]:
1406
+ """
1407
+ Returns numpy array of :class:`datetime.time` objects with timezones.
1408
+
1409
+ The time part of the Timestamps.
1410
+
1411
+ Examples
1412
+ --------
1413
+ For Series:
1414
+
1415
+ >>> s = pd.Series(["1/1/2020 10:00:00+00:00", "2/1/2020 11:00:00+00:00"])
1416
+ >>> s = pd.to_datetime(s)
1417
+ >>> s
1418
+ 0 2020-01-01 10:00:00+00:00
1419
+ 1 2020-02-01 11:00:00+00:00
1420
+ dtype: datetime64[ns, UTC]
1421
+ >>> s.dt.timetz
1422
+ 0 10:00:00+00:00
1423
+ 1 11:00:00+00:00
1424
+ dtype: object
1425
+
1426
+ For DatetimeIndex:
1427
+
1428
+ >>> idx = pd.DatetimeIndex(["1/1/2020 10:00:00+00:00",
1429
+ ... "2/1/2020 11:00:00+00:00"])
1430
+ >>> idx.timetz
1431
+ array([datetime.time(10, 0, tzinfo=datetime.timezone.utc),
1432
+ datetime.time(11, 0, tzinfo=datetime.timezone.utc)], dtype=object)
1433
+ """
1434
+ return ints_to_pydatetime(self.asi8, self.tz, box="time", reso=self._creso)
1435
+
1436
+ @property
1437
+ def date(self) -> npt.NDArray[np.object_]:
1438
+ """
1439
+ Returns numpy array of python :class:`datetime.date` objects.
1440
+
1441
+ Namely, the date part of Timestamps without time and
1442
+ timezone information.
1443
+
1444
+ Examples
1445
+ --------
1446
+ For Series:
1447
+
1448
+ >>> s = pd.Series(["1/1/2020 10:00:00+00:00", "2/1/2020 11:00:00+00:00"])
1449
+ >>> s = pd.to_datetime(s)
1450
+ >>> s
1451
+ 0 2020-01-01 10:00:00+00:00
1452
+ 1 2020-02-01 11:00:00+00:00
1453
+ dtype: datetime64[ns, UTC]
1454
+ >>> s.dt.date
1455
+ 0 2020-01-01
1456
+ 1 2020-02-01
1457
+ dtype: object
1458
+
1459
+ For DatetimeIndex:
1460
+
1461
+ >>> idx = pd.DatetimeIndex(["1/1/2020 10:00:00+00:00",
1462
+ ... "2/1/2020 11:00:00+00:00"])
1463
+ >>> idx.date
1464
+ array([datetime.date(2020, 1, 1), datetime.date(2020, 2, 1)], dtype=object)
1465
+ """
1466
+ # If the Timestamps have a timezone that is not UTC,
1467
+ # convert them into their i8 representation while
1468
+ # keeping their timezone and not using UTC
1469
+ timestamps = self._local_timestamps()
1470
+
1471
+ return ints_to_pydatetime(timestamps, box="date", reso=self._creso)
1472
+
1473
+ def isocalendar(self) -> DataFrame:
1474
+ """
1475
+ Calculate year, week, and day according to the ISO 8601 standard.
1476
+
1477
+ Returns
1478
+ -------
1479
+ DataFrame
1480
+ With columns year, week and day.
1481
+
1482
+ See Also
1483
+ --------
1484
+ Timestamp.isocalendar : Function return a 3-tuple containing ISO year,
1485
+ week number, and weekday for the given Timestamp object.
1486
+ datetime.date.isocalendar : Return a named tuple object with
1487
+ three components: year, week and weekday.
1488
+
1489
+ Examples
1490
+ --------
1491
+ >>> idx = pd.date_range(start='2019-12-29', freq='D', periods=4)
1492
+ >>> idx.isocalendar()
1493
+ year week day
1494
+ 2019-12-29 2019 52 7
1495
+ 2019-12-30 2020 1 1
1496
+ 2019-12-31 2020 1 2
1497
+ 2020-01-01 2020 1 3
1498
+ >>> idx.isocalendar().week
1499
+ 2019-12-29 52
1500
+ 2019-12-30 1
1501
+ 2019-12-31 1
1502
+ 2020-01-01 1
1503
+ Freq: D, Name: week, dtype: UInt32
1504
+ """
1505
+ from pandas import DataFrame
1506
+
1507
+ values = self._local_timestamps()
1508
+ sarray = fields.build_isocalendar_sarray(values, reso=self._creso)
1509
+ iso_calendar_df = DataFrame(
1510
+ sarray, columns=["year", "week", "day"], dtype="UInt32"
1511
+ )
1512
+ if self._hasna:
1513
+ iso_calendar_df.iloc[self._isnan] = None
1514
+ return iso_calendar_df
1515
+
1516
+ year = _field_accessor(
1517
+ "year",
1518
+ "Y",
1519
+ """
1520
+ The year of the datetime.
1521
+
1522
+ Examples
1523
+ --------
1524
+ >>> datetime_series = pd.Series(
1525
+ ... pd.date_range("2000-01-01", periods=3, freq="YE")
1526
+ ... )
1527
+ >>> datetime_series
1528
+ 0 2000-12-31
1529
+ 1 2001-12-31
1530
+ 2 2002-12-31
1531
+ dtype: datetime64[ns]
1532
+ >>> datetime_series.dt.year
1533
+ 0 2000
1534
+ 1 2001
1535
+ 2 2002
1536
+ dtype: int32
1537
+ """,
1538
+ )
1539
+ month = _field_accessor(
1540
+ "month",
1541
+ "M",
1542
+ """
1543
+ The month as January=1, December=12.
1544
+
1545
+ Examples
1546
+ --------
1547
+ >>> datetime_series = pd.Series(
1548
+ ... pd.date_range("2000-01-01", periods=3, freq="ME")
1549
+ ... )
1550
+ >>> datetime_series
1551
+ 0 2000-01-31
1552
+ 1 2000-02-29
1553
+ 2 2000-03-31
1554
+ dtype: datetime64[ns]
1555
+ >>> datetime_series.dt.month
1556
+ 0 1
1557
+ 1 2
1558
+ 2 3
1559
+ dtype: int32
1560
+ """,
1561
+ )
1562
+ day = _field_accessor(
1563
+ "day",
1564
+ "D",
1565
+ """
1566
+ The day of the datetime.
1567
+
1568
+ Examples
1569
+ --------
1570
+ >>> datetime_series = pd.Series(
1571
+ ... pd.date_range("2000-01-01", periods=3, freq="D")
1572
+ ... )
1573
+ >>> datetime_series
1574
+ 0 2000-01-01
1575
+ 1 2000-01-02
1576
+ 2 2000-01-03
1577
+ dtype: datetime64[ns]
1578
+ >>> datetime_series.dt.day
1579
+ 0 1
1580
+ 1 2
1581
+ 2 3
1582
+ dtype: int32
1583
+ """,
1584
+ )
1585
+ hour = _field_accessor(
1586
+ "hour",
1587
+ "h",
1588
+ """
1589
+ The hours of the datetime.
1590
+
1591
+ Examples
1592
+ --------
1593
+ >>> datetime_series = pd.Series(
1594
+ ... pd.date_range("2000-01-01", periods=3, freq="h")
1595
+ ... )
1596
+ >>> datetime_series
1597
+ 0 2000-01-01 00:00:00
1598
+ 1 2000-01-01 01:00:00
1599
+ 2 2000-01-01 02:00:00
1600
+ dtype: datetime64[ns]
1601
+ >>> datetime_series.dt.hour
1602
+ 0 0
1603
+ 1 1
1604
+ 2 2
1605
+ dtype: int32
1606
+ """,
1607
+ )
1608
+ minute = _field_accessor(
1609
+ "minute",
1610
+ "m",
1611
+ """
1612
+ The minutes of the datetime.
1613
+
1614
+ Examples
1615
+ --------
1616
+ >>> datetime_series = pd.Series(
1617
+ ... pd.date_range("2000-01-01", periods=3, freq="min")
1618
+ ... )
1619
+ >>> datetime_series
1620
+ 0 2000-01-01 00:00:00
1621
+ 1 2000-01-01 00:01:00
1622
+ 2 2000-01-01 00:02:00
1623
+ dtype: datetime64[ns]
1624
+ >>> datetime_series.dt.minute
1625
+ 0 0
1626
+ 1 1
1627
+ 2 2
1628
+ dtype: int32
1629
+ """,
1630
+ )
1631
+ second = _field_accessor(
1632
+ "second",
1633
+ "s",
1634
+ """
1635
+ The seconds of the datetime.
1636
+
1637
+ Examples
1638
+ --------
1639
+ >>> datetime_series = pd.Series(
1640
+ ... pd.date_range("2000-01-01", periods=3, freq="s")
1641
+ ... )
1642
+ >>> datetime_series
1643
+ 0 2000-01-01 00:00:00
1644
+ 1 2000-01-01 00:00:01
1645
+ 2 2000-01-01 00:00:02
1646
+ dtype: datetime64[ns]
1647
+ >>> datetime_series.dt.second
1648
+ 0 0
1649
+ 1 1
1650
+ 2 2
1651
+ dtype: int32
1652
+ """,
1653
+ )
1654
+ microsecond = _field_accessor(
1655
+ "microsecond",
1656
+ "us",
1657
+ """
1658
+ The microseconds of the datetime.
1659
+
1660
+ Examples
1661
+ --------
1662
+ >>> datetime_series = pd.Series(
1663
+ ... pd.date_range("2000-01-01", periods=3, freq="us")
1664
+ ... )
1665
+ >>> datetime_series
1666
+ 0 2000-01-01 00:00:00.000000
1667
+ 1 2000-01-01 00:00:00.000001
1668
+ 2 2000-01-01 00:00:00.000002
1669
+ dtype: datetime64[ns]
1670
+ >>> datetime_series.dt.microsecond
1671
+ 0 0
1672
+ 1 1
1673
+ 2 2
1674
+ dtype: int32
1675
+ """,
1676
+ )
1677
+ nanosecond = _field_accessor(
1678
+ "nanosecond",
1679
+ "ns",
1680
+ """
1681
+ The nanoseconds of the datetime.
1682
+
1683
+ Examples
1684
+ --------
1685
+ >>> datetime_series = pd.Series(
1686
+ ... pd.date_range("2000-01-01", periods=3, freq="ns")
1687
+ ... )
1688
+ >>> datetime_series
1689
+ 0 2000-01-01 00:00:00.000000000
1690
+ 1 2000-01-01 00:00:00.000000001
1691
+ 2 2000-01-01 00:00:00.000000002
1692
+ dtype: datetime64[ns]
1693
+ >>> datetime_series.dt.nanosecond
1694
+ 0 0
1695
+ 1 1
1696
+ 2 2
1697
+ dtype: int32
1698
+ """,
1699
+ )
1700
+ _dayofweek_doc = """
1701
+ The day of the week with Monday=0, Sunday=6.
1702
+
1703
+ Return the day of the week. It is assumed the week starts on
1704
+ Monday, which is denoted by 0 and ends on Sunday which is denoted
1705
+ by 6. This method is available on both Series with datetime
1706
+ values (using the `dt` accessor) or DatetimeIndex.
1707
+
1708
+ Returns
1709
+ -------
1710
+ Series or Index
1711
+ Containing integers indicating the day number.
1712
+
1713
+ See Also
1714
+ --------
1715
+ Series.dt.dayofweek : Alias.
1716
+ Series.dt.weekday : Alias.
1717
+ Series.dt.day_name : Returns the name of the day of the week.
1718
+
1719
+ Examples
1720
+ --------
1721
+ >>> s = pd.date_range('2016-12-31', '2017-01-08', freq='D').to_series()
1722
+ >>> s.dt.dayofweek
1723
+ 2016-12-31 5
1724
+ 2017-01-01 6
1725
+ 2017-01-02 0
1726
+ 2017-01-03 1
1727
+ 2017-01-04 2
1728
+ 2017-01-05 3
1729
+ 2017-01-06 4
1730
+ 2017-01-07 5
1731
+ 2017-01-08 6
1732
+ Freq: D, dtype: int32
1733
+ """
1734
+ day_of_week = _field_accessor("day_of_week", "dow", _dayofweek_doc)
1735
+ dayofweek = day_of_week
1736
+ weekday = day_of_week
1737
+
1738
+ day_of_year = _field_accessor(
1739
+ "dayofyear",
1740
+ "doy",
1741
+ """
1742
+ The ordinal day of the year.
1743
+
1744
+ Examples
1745
+ --------
1746
+ For Series:
1747
+
1748
+ >>> s = pd.Series(["1/1/2020 10:00:00+00:00", "2/1/2020 11:00:00+00:00"])
1749
+ >>> s = pd.to_datetime(s)
1750
+ >>> s
1751
+ 0 2020-01-01 10:00:00+00:00
1752
+ 1 2020-02-01 11:00:00+00:00
1753
+ dtype: datetime64[ns, UTC]
1754
+ >>> s.dt.dayofyear
1755
+ 0 1
1756
+ 1 32
1757
+ dtype: int32
1758
+
1759
+ For DatetimeIndex:
1760
+
1761
+ >>> idx = pd.DatetimeIndex(["1/1/2020 10:00:00+00:00",
1762
+ ... "2/1/2020 11:00:00+00:00"])
1763
+ >>> idx.dayofyear
1764
+ Index([1, 32], dtype='int32')
1765
+ """,
1766
+ )
1767
+ dayofyear = day_of_year
1768
+ quarter = _field_accessor(
1769
+ "quarter",
1770
+ "q",
1771
+ """
1772
+ The quarter of the date.
1773
+
1774
+ Examples
1775
+ --------
1776
+ For Series:
1777
+
1778
+ >>> s = pd.Series(["1/1/2020 10:00:00+00:00", "4/1/2020 11:00:00+00:00"])
1779
+ >>> s = pd.to_datetime(s)
1780
+ >>> s
1781
+ 0 2020-01-01 10:00:00+00:00
1782
+ 1 2020-04-01 11:00:00+00:00
1783
+ dtype: datetime64[ns, UTC]
1784
+ >>> s.dt.quarter
1785
+ 0 1
1786
+ 1 2
1787
+ dtype: int32
1788
+
1789
+ For DatetimeIndex:
1790
+
1791
+ >>> idx = pd.DatetimeIndex(["1/1/2020 10:00:00+00:00",
1792
+ ... "2/1/2020 11:00:00+00:00"])
1793
+ >>> idx.quarter
1794
+ Index([1, 1], dtype='int32')
1795
+ """,
1796
+ )
1797
+ days_in_month = _field_accessor(
1798
+ "days_in_month",
1799
+ "dim",
1800
+ """
1801
+ The number of days in the month.
1802
+
1803
+ Examples
1804
+ --------
1805
+ >>> s = pd.Series(["1/1/2020 10:00:00+00:00", "2/1/2020 11:00:00+00:00"])
1806
+ >>> s = pd.to_datetime(s)
1807
+ >>> s
1808
+ 0 2020-01-01 10:00:00+00:00
1809
+ 1 2020-02-01 11:00:00+00:00
1810
+ dtype: datetime64[ns, UTC]
1811
+ >>> s.dt.daysinmonth
1812
+ 0 31
1813
+ 1 29
1814
+ dtype: int32
1815
+ """,
1816
+ )
1817
+ daysinmonth = days_in_month
1818
+ _is_month_doc = """
1819
+ Indicates whether the date is the {first_or_last} day of the month.
1820
+
1821
+ Returns
1822
+ -------
1823
+ Series or array
1824
+ For Series, returns a Series with boolean values.
1825
+ For DatetimeIndex, returns a boolean array.
1826
+
1827
+ See Also
1828
+ --------
1829
+ is_month_start : Return a boolean indicating whether the date
1830
+ is the first day of the month.
1831
+ is_month_end : Return a boolean indicating whether the date
1832
+ is the last day of the month.
1833
+
1834
+ Examples
1835
+ --------
1836
+ This method is available on Series with datetime values under
1837
+ the ``.dt`` accessor, and directly on DatetimeIndex.
1838
+
1839
+ >>> s = pd.Series(pd.date_range("2018-02-27", periods=3))
1840
+ >>> s
1841
+ 0 2018-02-27
1842
+ 1 2018-02-28
1843
+ 2 2018-03-01
1844
+ dtype: datetime64[ns]
1845
+ >>> s.dt.is_month_start
1846
+ 0 False
1847
+ 1 False
1848
+ 2 True
1849
+ dtype: bool
1850
+ >>> s.dt.is_month_end
1851
+ 0 False
1852
+ 1 True
1853
+ 2 False
1854
+ dtype: bool
1855
+
1856
+ >>> idx = pd.date_range("2018-02-27", periods=3)
1857
+ >>> idx.is_month_start
1858
+ array([False, False, True])
1859
+ >>> idx.is_month_end
1860
+ array([False, True, False])
1861
+ """
1862
+ is_month_start = _field_accessor(
1863
+ "is_month_start", "is_month_start", _is_month_doc.format(first_or_last="first")
1864
+ )
1865
+
1866
+ is_month_end = _field_accessor(
1867
+ "is_month_end", "is_month_end", _is_month_doc.format(first_or_last="last")
1868
+ )
1869
+
1870
+ is_quarter_start = _field_accessor(
1871
+ "is_quarter_start",
1872
+ "is_quarter_start",
1873
+ """
1874
+ Indicator for whether the date is the first day of a quarter.
1875
+
1876
+ Returns
1877
+ -------
1878
+ is_quarter_start : Series or DatetimeIndex
1879
+ The same type as the original data with boolean values. Series will
1880
+ have the same name and index. DatetimeIndex will have the same
1881
+ name.
1882
+
1883
+ See Also
1884
+ --------
1885
+ quarter : Return the quarter of the date.
1886
+ is_quarter_end : Similar property for indicating the quarter end.
1887
+
1888
+ Examples
1889
+ --------
1890
+ This method is available on Series with datetime values under
1891
+ the ``.dt`` accessor, and directly on DatetimeIndex.
1892
+
1893
+ >>> df = pd.DataFrame({'dates': pd.date_range("2017-03-30",
1894
+ ... periods=4)})
1895
+ >>> df.assign(quarter=df.dates.dt.quarter,
1896
+ ... is_quarter_start=df.dates.dt.is_quarter_start)
1897
+ dates quarter is_quarter_start
1898
+ 0 2017-03-30 1 False
1899
+ 1 2017-03-31 1 False
1900
+ 2 2017-04-01 2 True
1901
+ 3 2017-04-02 2 False
1902
+
1903
+ >>> idx = pd.date_range('2017-03-30', periods=4)
1904
+ >>> idx
1905
+ DatetimeIndex(['2017-03-30', '2017-03-31', '2017-04-01', '2017-04-02'],
1906
+ dtype='datetime64[ns]', freq='D')
1907
+
1908
+ >>> idx.is_quarter_start
1909
+ array([False, False, True, False])
1910
+ """,
1911
+ )
1912
+ is_quarter_end = _field_accessor(
1913
+ "is_quarter_end",
1914
+ "is_quarter_end",
1915
+ """
1916
+ Indicator for whether the date is the last day of a quarter.
1917
+
1918
+ Returns
1919
+ -------
1920
+ is_quarter_end : Series or DatetimeIndex
1921
+ The same type as the original data with boolean values. Series will
1922
+ have the same name and index. DatetimeIndex will have the same
1923
+ name.
1924
+
1925
+ See Also
1926
+ --------
1927
+ quarter : Return the quarter of the date.
1928
+ is_quarter_start : Similar property indicating the quarter start.
1929
+
1930
+ Examples
1931
+ --------
1932
+ This method is available on Series with datetime values under
1933
+ the ``.dt`` accessor, and directly on DatetimeIndex.
1934
+
1935
+ >>> df = pd.DataFrame({'dates': pd.date_range("2017-03-30",
1936
+ ... periods=4)})
1937
+ >>> df.assign(quarter=df.dates.dt.quarter,
1938
+ ... is_quarter_end=df.dates.dt.is_quarter_end)
1939
+ dates quarter is_quarter_end
1940
+ 0 2017-03-30 1 False
1941
+ 1 2017-03-31 1 True
1942
+ 2 2017-04-01 2 False
1943
+ 3 2017-04-02 2 False
1944
+
1945
+ >>> idx = pd.date_range('2017-03-30', periods=4)
1946
+ >>> idx
1947
+ DatetimeIndex(['2017-03-30', '2017-03-31', '2017-04-01', '2017-04-02'],
1948
+ dtype='datetime64[ns]', freq='D')
1949
+
1950
+ >>> idx.is_quarter_end
1951
+ array([False, True, False, False])
1952
+ """,
1953
+ )
1954
+ is_year_start = _field_accessor(
1955
+ "is_year_start",
1956
+ "is_year_start",
1957
+ """
1958
+ Indicate whether the date is the first day of a year.
1959
+
1960
+ Returns
1961
+ -------
1962
+ Series or DatetimeIndex
1963
+ The same type as the original data with boolean values. Series will
1964
+ have the same name and index. DatetimeIndex will have the same
1965
+ name.
1966
+
1967
+ See Also
1968
+ --------
1969
+ is_year_end : Similar property indicating the last day of the year.
1970
+
1971
+ Examples
1972
+ --------
1973
+ This method is available on Series with datetime values under
1974
+ the ``.dt`` accessor, and directly on DatetimeIndex.
1975
+
1976
+ >>> dates = pd.Series(pd.date_range("2017-12-30", periods=3))
1977
+ >>> dates
1978
+ 0 2017-12-30
1979
+ 1 2017-12-31
1980
+ 2 2018-01-01
1981
+ dtype: datetime64[ns]
1982
+
1983
+ >>> dates.dt.is_year_start
1984
+ 0 False
1985
+ 1 False
1986
+ 2 True
1987
+ dtype: bool
1988
+
1989
+ >>> idx = pd.date_range("2017-12-30", periods=3)
1990
+ >>> idx
1991
+ DatetimeIndex(['2017-12-30', '2017-12-31', '2018-01-01'],
1992
+ dtype='datetime64[ns]', freq='D')
1993
+
1994
+ >>> idx.is_year_start
1995
+ array([False, False, True])
1996
+ """,
1997
+ )
1998
+ is_year_end = _field_accessor(
1999
+ "is_year_end",
2000
+ "is_year_end",
2001
+ """
2002
+ Indicate whether the date is the last day of the year.
2003
+
2004
+ Returns
2005
+ -------
2006
+ Series or DatetimeIndex
2007
+ The same type as the original data with boolean values. Series will
2008
+ have the same name and index. DatetimeIndex will have the same
2009
+ name.
2010
+
2011
+ See Also
2012
+ --------
2013
+ is_year_start : Similar property indicating the start of the year.
2014
+
2015
+ Examples
2016
+ --------
2017
+ This method is available on Series with datetime values under
2018
+ the ``.dt`` accessor, and directly on DatetimeIndex.
2019
+
2020
+ >>> dates = pd.Series(pd.date_range("2017-12-30", periods=3))
2021
+ >>> dates
2022
+ 0 2017-12-30
2023
+ 1 2017-12-31
2024
+ 2 2018-01-01
2025
+ dtype: datetime64[ns]
2026
+
2027
+ >>> dates.dt.is_year_end
2028
+ 0 False
2029
+ 1 True
2030
+ 2 False
2031
+ dtype: bool
2032
+
2033
+ >>> idx = pd.date_range("2017-12-30", periods=3)
2034
+ >>> idx
2035
+ DatetimeIndex(['2017-12-30', '2017-12-31', '2018-01-01'],
2036
+ dtype='datetime64[ns]', freq='D')
2037
+
2038
+ >>> idx.is_year_end
2039
+ array([False, True, False])
2040
+ """,
2041
+ )
2042
+ is_leap_year = _field_accessor(
2043
+ "is_leap_year",
2044
+ "is_leap_year",
2045
+ """
2046
+ Boolean indicator if the date belongs to a leap year.
2047
+
2048
+ A leap year is a year, which has 366 days (instead of 365) including
2049
+ 29th of February as an intercalary day.
2050
+ Leap years are years which are multiples of four with the exception
2051
+ of years divisible by 100 but not by 400.
2052
+
2053
+ Returns
2054
+ -------
2055
+ Series or ndarray
2056
+ Booleans indicating if dates belong to a leap year.
2057
+
2058
+ Examples
2059
+ --------
2060
+ This method is available on Series with datetime values under
2061
+ the ``.dt`` accessor, and directly on DatetimeIndex.
2062
+
2063
+ >>> idx = pd.date_range("2012-01-01", "2015-01-01", freq="YE")
2064
+ >>> idx
2065
+ DatetimeIndex(['2012-12-31', '2013-12-31', '2014-12-31'],
2066
+ dtype='datetime64[ns]', freq='YE-DEC')
2067
+ >>> idx.is_leap_year
2068
+ array([ True, False, False])
2069
+
2070
+ >>> dates_series = pd.Series(idx)
2071
+ >>> dates_series
2072
+ 0 2012-12-31
2073
+ 1 2013-12-31
2074
+ 2 2014-12-31
2075
+ dtype: datetime64[ns]
2076
+ >>> dates_series.dt.is_leap_year
2077
+ 0 True
2078
+ 1 False
2079
+ 2 False
2080
+ dtype: bool
2081
+ """,
2082
+ )
2083
+
2084
+ def to_julian_date(self) -> npt.NDArray[np.float64]:
2085
+ """
2086
+ Convert Datetime Array to float64 ndarray of Julian Dates.
2087
+ 0 Julian date is noon January 1, 4713 BC.
2088
+ https://en.wikipedia.org/wiki/Julian_day
2089
+ """
2090
+
2091
+ # http://mysite.verizon.net/aesir_research/date/jdalg2.htm
2092
+ year = np.asarray(self.year)
2093
+ month = np.asarray(self.month)
2094
+ day = np.asarray(self.day)
2095
+ testarr = month < 3
2096
+ year[testarr] -= 1
2097
+ month[testarr] += 12
2098
+ return (
2099
+ day
2100
+ + np.fix((153 * month - 457) / 5)
2101
+ + 365 * year
2102
+ + np.floor(year / 4)
2103
+ - np.floor(year / 100)
2104
+ + np.floor(year / 400)
2105
+ + 1_721_118.5
2106
+ + (
2107
+ self.hour
2108
+ + self.minute / 60
2109
+ + self.second / 3600
2110
+ + self.microsecond / 3600 / 10**6
2111
+ + self.nanosecond / 3600 / 10**9
2112
+ )
2113
+ / 24
2114
+ )
2115
+
2116
+ # -----------------------------------------------------------------
2117
+ # Reductions
2118
+
2119
+ def std(
2120
+ self,
2121
+ axis=None,
2122
+ dtype=None,
2123
+ out=None,
2124
+ ddof: int = 1,
2125
+ keepdims: bool = False,
2126
+ skipna: bool = True,
2127
+ ):
2128
+ """
2129
+ Return sample standard deviation over requested axis.
2130
+
2131
+ Normalized by `N-1` by default. This can be changed using ``ddof``.
2132
+
2133
+ Parameters
2134
+ ----------
2135
+ axis : int, optional
2136
+ Axis for the function to be applied on. For :class:`pandas.Series`
2137
+ this parameter is unused and defaults to ``None``.
2138
+ ddof : int, default 1
2139
+ Degrees of Freedom. The divisor used in calculations is `N - ddof`,
2140
+ where `N` represents the number of elements.
2141
+ skipna : bool, default True
2142
+ Exclude NA/null values. If an entire row/column is ``NA``, the result
2143
+ will be ``NA``.
2144
+
2145
+ Returns
2146
+ -------
2147
+ Timedelta
2148
+
2149
+ See Also
2150
+ --------
2151
+ numpy.ndarray.std : Returns the standard deviation of the array elements
2152
+ along given axis.
2153
+ Series.std : Return sample standard deviation over requested axis.
2154
+
2155
+ Examples
2156
+ --------
2157
+ For :class:`pandas.DatetimeIndex`:
2158
+
2159
+ >>> idx = pd.date_range('2001-01-01 00:00', periods=3)
2160
+ >>> idx
2161
+ DatetimeIndex(['2001-01-01', '2001-01-02', '2001-01-03'],
2162
+ dtype='datetime64[ns]', freq='D')
2163
+ >>> idx.std()
2164
+ Timedelta('1 days 00:00:00')
2165
+ """
2166
+ # Because std is translation-invariant, we can get self.std
2167
+ # by calculating (self - Timestamp(0)).std, and we can do it
2168
+ # without creating a copy by using a view on self._ndarray
2169
+ from pandas.core.arrays import TimedeltaArray
2170
+
2171
+ # Find the td64 dtype with the same resolution as our dt64 dtype
2172
+ dtype_str = self._ndarray.dtype.name.replace("datetime64", "timedelta64")
2173
+ dtype = np.dtype(dtype_str)
2174
+
2175
+ tda = TimedeltaArray._simple_new(self._ndarray.view(dtype), dtype=dtype)
2176
+
2177
+ return tda.std(axis=axis, out=out, ddof=ddof, keepdims=keepdims, skipna=skipna)
2178
+
2179
+
2180
+ # -------------------------------------------------------------------
2181
+ # Constructor Helpers
2182
+
2183
+
2184
+ def _sequence_to_dt64(
2185
+ data: ArrayLike,
2186
+ *,
2187
+ copy: bool = False,
2188
+ tz: tzinfo | None = None,
2189
+ dayfirst: bool = False,
2190
+ yearfirst: bool = False,
2191
+ ambiguous: TimeAmbiguous = "raise",
2192
+ out_unit: str | None = None,
2193
+ ):
2194
+ """
2195
+ Parameters
2196
+ ----------
2197
+ data : np.ndarray or ExtensionArray
2198
+ dtl.ensure_arraylike_for_datetimelike has already been called.
2199
+ copy : bool, default False
2200
+ tz : tzinfo or None, default None
2201
+ dayfirst : bool, default False
2202
+ yearfirst : bool, default False
2203
+ ambiguous : str, bool, or arraylike, default 'raise'
2204
+ See pandas._libs.tslibs.tzconversion.tz_localize_to_utc.
2205
+ out_unit : str or None, default None
2206
+ Desired output resolution.
2207
+
2208
+ Returns
2209
+ -------
2210
+ result : numpy.ndarray
2211
+ The sequence converted to a numpy array with dtype ``datetime64[unit]``.
2212
+ Where `unit` is "ns" unless specified otherwise by `out_unit`.
2213
+ tz : tzinfo or None
2214
+ Either the user-provided tzinfo or one inferred from the data.
2215
+
2216
+ Raises
2217
+ ------
2218
+ TypeError : PeriodDType data is passed
2219
+ """
2220
+
2221
+ # By this point we are assured to have either a numpy array or Index
2222
+ data, copy = maybe_convert_dtype(data, copy, tz=tz)
2223
+ data_dtype = getattr(data, "dtype", None)
2224
+
2225
+ if out_unit is None:
2226
+ out_unit = "ns"
2227
+ out_dtype = np.dtype(f"M8[{out_unit}]")
2228
+
2229
+ if data_dtype == object or is_string_dtype(data_dtype):
2230
+ # TODO: We do not have tests specific to string-dtypes,
2231
+ # also complex or categorical or other extension
2232
+ data = cast(np.ndarray, data)
2233
+ copy = False
2234
+ if lib.infer_dtype(data, skipna=False) == "integer":
2235
+ # Much more performant than going through array_to_datetime
2236
+ data = data.astype(np.int64)
2237
+ elif tz is not None and ambiguous == "raise":
2238
+ obj_data = np.asarray(data, dtype=object)
2239
+ result = tslib.array_to_datetime_with_tz(
2240
+ obj_data,
2241
+ tz=tz,
2242
+ dayfirst=dayfirst,
2243
+ yearfirst=yearfirst,
2244
+ creso=abbrev_to_npy_unit(out_unit),
2245
+ )
2246
+ return result, tz
2247
+ else:
2248
+ converted, inferred_tz = objects_to_datetime64(
2249
+ data,
2250
+ dayfirst=dayfirst,
2251
+ yearfirst=yearfirst,
2252
+ allow_object=False,
2253
+ out_unit=out_unit or "ns",
2254
+ )
2255
+ copy = False
2256
+ if tz and inferred_tz:
2257
+ # two timezones: convert to intended from base UTC repr
2258
+ # GH#42505 by convention, these are _already_ UTC
2259
+ result = converted
2260
+
2261
+ elif inferred_tz:
2262
+ tz = inferred_tz
2263
+ result = converted
2264
+
2265
+ else:
2266
+ result, _ = _construct_from_dt64_naive(
2267
+ converted, tz=tz, copy=copy, ambiguous=ambiguous
2268
+ )
2269
+ return result, tz
2270
+
2271
+ data_dtype = data.dtype
2272
+
2273
+ # `data` may have originally been a Categorical[datetime64[ns, tz]],
2274
+ # so we need to handle these types.
2275
+ if isinstance(data_dtype, DatetimeTZDtype):
2276
+ # DatetimeArray -> ndarray
2277
+ data = cast(DatetimeArray, data)
2278
+ tz = _maybe_infer_tz(tz, data.tz)
2279
+ result = data._ndarray
2280
+
2281
+ elif lib.is_np_dtype(data_dtype, "M"):
2282
+ # tz-naive DatetimeArray or ndarray[datetime64]
2283
+ if isinstance(data, DatetimeArray):
2284
+ data = data._ndarray
2285
+
2286
+ data = cast(np.ndarray, data)
2287
+ result, copy = _construct_from_dt64_naive(
2288
+ data, tz=tz, copy=copy, ambiguous=ambiguous
2289
+ )
2290
+
2291
+ else:
2292
+ # must be integer dtype otherwise
2293
+ # assume this data are epoch timestamps
2294
+ if data.dtype != INT64_DTYPE:
2295
+ data = data.astype(np.int64, copy=False)
2296
+ copy = False
2297
+ data = cast(np.ndarray, data)
2298
+ result = data.view(out_dtype)
2299
+
2300
+ if copy:
2301
+ result = result.copy()
2302
+
2303
+ assert isinstance(result, np.ndarray), type(result)
2304
+ assert result.dtype.kind == "M"
2305
+ assert result.dtype != "M8"
2306
+ assert is_supported_dtype(result.dtype)
2307
+ return result, tz
2308
+
2309
+
2310
+ def _construct_from_dt64_naive(
2311
+ data: np.ndarray, *, tz: tzinfo | None, copy: bool, ambiguous: TimeAmbiguous
2312
+ ) -> tuple[np.ndarray, bool]:
2313
+ """
2314
+ Convert datetime64 data to a supported dtype, localizing if necessary.
2315
+ """
2316
+ # Caller is responsible for ensuring
2317
+ # lib.is_np_dtype(data.dtype)
2318
+
2319
+ new_dtype = data.dtype
2320
+ if not is_supported_dtype(new_dtype):
2321
+ # Cast to the nearest supported unit, generally "s"
2322
+ new_dtype = get_supported_dtype(new_dtype)
2323
+ data = astype_overflowsafe(data, dtype=new_dtype, copy=False)
2324
+ copy = False
2325
+
2326
+ if data.dtype.byteorder == ">":
2327
+ # TODO: better way to handle this? non-copying alternative?
2328
+ # without this, test_constructor_datetime64_bigendian fails
2329
+ data = data.astype(data.dtype.newbyteorder("<"))
2330
+ new_dtype = data.dtype
2331
+ copy = False
2332
+
2333
+ if tz is not None:
2334
+ # Convert tz-naive to UTC
2335
+ # TODO: if tz is UTC, are there situations where we *don't* want a
2336
+ # copy? tz_localize_to_utc always makes one.
2337
+ shape = data.shape
2338
+ if data.ndim > 1:
2339
+ data = data.ravel()
2340
+
2341
+ data_unit = get_unit_from_dtype(new_dtype)
2342
+ data = tzconversion.tz_localize_to_utc(
2343
+ data.view("i8"), tz, ambiguous=ambiguous, creso=data_unit
2344
+ )
2345
+ data = data.view(new_dtype)
2346
+ data = data.reshape(shape)
2347
+
2348
+ assert data.dtype == new_dtype, data.dtype
2349
+ result = data
2350
+
2351
+ return result, copy
2352
+
2353
+
2354
+ def objects_to_datetime64(
2355
+ data: np.ndarray,
2356
+ dayfirst,
2357
+ yearfirst,
2358
+ utc: bool = False,
2359
+ errors: DateTimeErrorChoices = "raise",
2360
+ allow_object: bool = False,
2361
+ out_unit: str = "ns",
2362
+ ):
2363
+ """
2364
+ Convert data to array of timestamps.
2365
+
2366
+ Parameters
2367
+ ----------
2368
+ data : np.ndarray[object]
2369
+ dayfirst : bool
2370
+ yearfirst : bool
2371
+ utc : bool, default False
2372
+ Whether to convert/localize timestamps to UTC.
2373
+ errors : {'raise', 'ignore', 'coerce'}
2374
+ allow_object : bool
2375
+ Whether to return an object-dtype ndarray instead of raising if the
2376
+ data contains more than one timezone.
2377
+ out_unit : str, default "ns"
2378
+
2379
+ Returns
2380
+ -------
2381
+ result : ndarray
2382
+ np.datetime64[out_unit] if returned values represent wall times or UTC
2383
+ timestamps.
2384
+ object if mixed timezones
2385
+ inferred_tz : tzinfo or None
2386
+ If not None, then the datetime64 values in `result` denote UTC timestamps.
2387
+
2388
+ Raises
2389
+ ------
2390
+ ValueError : if data cannot be converted to datetimes
2391
+ TypeError : When a type cannot be converted to datetime
2392
+ """
2393
+ assert errors in ["raise", "ignore", "coerce"]
2394
+
2395
+ # if str-dtype, convert
2396
+ data = np.asarray(data, dtype=np.object_)
2397
+
2398
+ result, tz_parsed = tslib.array_to_datetime(
2399
+ data,
2400
+ errors=errors,
2401
+ utc=utc,
2402
+ dayfirst=dayfirst,
2403
+ yearfirst=yearfirst,
2404
+ creso=abbrev_to_npy_unit(out_unit),
2405
+ )
2406
+
2407
+ if tz_parsed is not None:
2408
+ # We can take a shortcut since the datetime64 numpy array
2409
+ # is in UTC
2410
+ return result, tz_parsed
2411
+ elif result.dtype.kind == "M":
2412
+ return result, tz_parsed
2413
+ elif result.dtype == object:
2414
+ # GH#23675 when called via `pd.to_datetime`, returning an object-dtype
2415
+ # array is allowed. When called via `pd.DatetimeIndex`, we can
2416
+ # only accept datetime64 dtype, so raise TypeError if object-dtype
2417
+ # is returned, as that indicates the values can be recognized as
2418
+ # datetimes but they have conflicting timezones/awareness
2419
+ if allow_object:
2420
+ return result, tz_parsed
2421
+ raise TypeError("DatetimeIndex has mixed timezones")
2422
+ else: # pragma: no cover
2423
+ # GH#23675 this TypeError should never be hit, whereas the TypeError
2424
+ # in the object-dtype branch above is reachable.
2425
+ raise TypeError(result)
2426
+
2427
+
2428
+ def maybe_convert_dtype(data, copy: bool, tz: tzinfo | None = None):
2429
+ """
2430
+ Convert data based on dtype conventions, issuing
2431
+ errors where appropriate.
2432
+
2433
+ Parameters
2434
+ ----------
2435
+ data : np.ndarray or pd.Index
2436
+ copy : bool
2437
+ tz : tzinfo or None, default None
2438
+
2439
+ Returns
2440
+ -------
2441
+ data : np.ndarray or pd.Index
2442
+ copy : bool
2443
+
2444
+ Raises
2445
+ ------
2446
+ TypeError : PeriodDType data is passed
2447
+ """
2448
+ if not hasattr(data, "dtype"):
2449
+ # e.g. collections.deque
2450
+ return data, copy
2451
+
2452
+ if is_float_dtype(data.dtype):
2453
+ # pre-2.0 we treated these as wall-times, inconsistent with ints
2454
+ # GH#23675, GH#45573 deprecated to treat symmetrically with integer dtypes.
2455
+ # Note: data.astype(np.int64) fails ARM tests, see
2456
+ # https://github.com/pandas-dev/pandas/issues/49468.
2457
+ data = data.astype(DT64NS_DTYPE).view("i8")
2458
+ copy = False
2459
+
2460
+ elif lib.is_np_dtype(data.dtype, "m") or is_bool_dtype(data.dtype):
2461
+ # GH#29794 enforcing deprecation introduced in GH#23539
2462
+ raise TypeError(f"dtype {data.dtype} cannot be converted to datetime64[ns]")
2463
+ elif isinstance(data.dtype, PeriodDtype):
2464
+ # Note: without explicitly raising here, PeriodIndex
2465
+ # test_setops.test_join_does_not_recur fails
2466
+ raise TypeError(
2467
+ "Passing PeriodDtype data is invalid. Use `data.to_timestamp()` instead"
2468
+ )
2469
+
2470
+ elif isinstance(data.dtype, ExtensionDtype) and not isinstance(
2471
+ data.dtype, DatetimeTZDtype
2472
+ ):
2473
+ # TODO: We have no tests for these
2474
+ data = np.array(data, dtype=np.object_)
2475
+ copy = False
2476
+
2477
+ return data, copy
2478
+
2479
+
2480
+ # -------------------------------------------------------------------
2481
+ # Validation and Inference
2482
+
2483
+
2484
+ def _maybe_infer_tz(tz: tzinfo | None, inferred_tz: tzinfo | None) -> tzinfo | None:
2485
+ """
2486
+ If a timezone is inferred from data, check that it is compatible with
2487
+ the user-provided timezone, if any.
2488
+
2489
+ Parameters
2490
+ ----------
2491
+ tz : tzinfo or None
2492
+ inferred_tz : tzinfo or None
2493
+
2494
+ Returns
2495
+ -------
2496
+ tz : tzinfo or None
2497
+
2498
+ Raises
2499
+ ------
2500
+ TypeError : if both timezones are present but do not match
2501
+ """
2502
+ if tz is None:
2503
+ tz = inferred_tz
2504
+ elif inferred_tz is None:
2505
+ pass
2506
+ elif not timezones.tz_compare(tz, inferred_tz):
2507
+ raise TypeError(
2508
+ f"data is already tz-aware {inferred_tz}, unable to "
2509
+ f"set specified tz: {tz}"
2510
+ )
2511
+ return tz
2512
+
2513
+
2514
+ def _validate_dt64_dtype(dtype):
2515
+ """
2516
+ Check that a dtype, if passed, represents either a numpy datetime64[ns]
2517
+ dtype or a pandas DatetimeTZDtype.
2518
+
2519
+ Parameters
2520
+ ----------
2521
+ dtype : object
2522
+
2523
+ Returns
2524
+ -------
2525
+ dtype : None, numpy.dtype, or DatetimeTZDtype
2526
+
2527
+ Raises
2528
+ ------
2529
+ ValueError : invalid dtype
2530
+
2531
+ Notes
2532
+ -----
2533
+ Unlike _validate_tz_from_dtype, this does _not_ allow non-existent
2534
+ tz errors to go through
2535
+ """
2536
+ if dtype is not None:
2537
+ dtype = pandas_dtype(dtype)
2538
+ if dtype == np.dtype("M8"):
2539
+ # no precision, disallowed GH#24806
2540
+ msg = (
2541
+ "Passing in 'datetime64' dtype with no precision is not allowed. "
2542
+ "Please pass in 'datetime64[ns]' instead."
2543
+ )
2544
+ raise ValueError(msg)
2545
+
2546
+ if (
2547
+ isinstance(dtype, np.dtype)
2548
+ and (dtype.kind != "M" or not is_supported_dtype(dtype))
2549
+ ) or not isinstance(dtype, (np.dtype, DatetimeTZDtype)):
2550
+ raise ValueError(
2551
+ f"Unexpected value for 'dtype': '{dtype}'. "
2552
+ "Must be 'datetime64[s]', 'datetime64[ms]', 'datetime64[us]', "
2553
+ "'datetime64[ns]' or DatetimeTZDtype'."
2554
+ )
2555
+
2556
+ if getattr(dtype, "tz", None):
2557
+ # https://github.com/pandas-dev/pandas/issues/18595
2558
+ # Ensure that we have a standard timezone for pytz objects.
2559
+ # Without this, things like adding an array of timedeltas and
2560
+ # a tz-aware Timestamp (with a tz specific to its datetime) will
2561
+ # be incorrect(ish?) for the array as a whole
2562
+ dtype = cast(DatetimeTZDtype, dtype)
2563
+ dtype = DatetimeTZDtype(
2564
+ unit=dtype.unit, tz=timezones.tz_standardize(dtype.tz)
2565
+ )
2566
+
2567
+ return dtype
2568
+
2569
+
2570
+ def _validate_tz_from_dtype(
2571
+ dtype, tz: tzinfo | None, explicit_tz_none: bool = False
2572
+ ) -> tzinfo | None:
2573
+ """
2574
+ If the given dtype is a DatetimeTZDtype, extract the implied
2575
+ tzinfo object from it and check that it does not conflict with the given
2576
+ tz.
2577
+
2578
+ Parameters
2579
+ ----------
2580
+ dtype : dtype, str
2581
+ tz : None, tzinfo
2582
+ explicit_tz_none : bool, default False
2583
+ Whether tz=None was passed explicitly, as opposed to lib.no_default.
2584
+
2585
+ Returns
2586
+ -------
2587
+ tz : consensus tzinfo
2588
+
2589
+ Raises
2590
+ ------
2591
+ ValueError : on tzinfo mismatch
2592
+ """
2593
+ if dtype is not None:
2594
+ if isinstance(dtype, str):
2595
+ try:
2596
+ dtype = DatetimeTZDtype.construct_from_string(dtype)
2597
+ except TypeError:
2598
+ # Things like `datetime64[ns]`, which is OK for the
2599
+ # constructors, but also nonsense, which should be validated
2600
+ # but not by us. We *do* allow non-existent tz errors to
2601
+ # go through
2602
+ pass
2603
+ dtz = getattr(dtype, "tz", None)
2604
+ if dtz is not None:
2605
+ if tz is not None and not timezones.tz_compare(tz, dtz):
2606
+ raise ValueError("cannot supply both a tz and a dtype with a tz")
2607
+ if explicit_tz_none:
2608
+ raise ValueError("Cannot pass both a timezone-aware dtype and tz=None")
2609
+ tz = dtz
2610
+
2611
+ if tz is not None and lib.is_np_dtype(dtype, "M"):
2612
+ # We also need to check for the case where the user passed a
2613
+ # tz-naive dtype (i.e. datetime64[ns])
2614
+ if tz is not None and not timezones.tz_compare(tz, dtz):
2615
+ raise ValueError(
2616
+ "cannot supply both a tz and a "
2617
+ "timezone-naive dtype (i.e. datetime64[ns])"
2618
+ )
2619
+
2620
+ return tz
2621
+
2622
+
2623
+ def _infer_tz_from_endpoints(
2624
+ start: Timestamp, end: Timestamp, tz: tzinfo | None
2625
+ ) -> tzinfo | None:
2626
+ """
2627
+ If a timezone is not explicitly given via `tz`, see if one can
2628
+ be inferred from the `start` and `end` endpoints. If more than one
2629
+ of these inputs provides a timezone, require that they all agree.
2630
+
2631
+ Parameters
2632
+ ----------
2633
+ start : Timestamp
2634
+ end : Timestamp
2635
+ tz : tzinfo or None
2636
+
2637
+ Returns
2638
+ -------
2639
+ tz : tzinfo or None
2640
+
2641
+ Raises
2642
+ ------
2643
+ TypeError : if start and end timezones do not agree
2644
+ """
2645
+ try:
2646
+ inferred_tz = timezones.infer_tzinfo(start, end)
2647
+ except AssertionError as err:
2648
+ # infer_tzinfo raises AssertionError if passed mismatched timezones
2649
+ raise TypeError(
2650
+ "Start and end cannot both be tz-aware with different timezones"
2651
+ ) from err
2652
+
2653
+ inferred_tz = timezones.maybe_get_tz(inferred_tz)
2654
+ tz = timezones.maybe_get_tz(tz)
2655
+
2656
+ if tz is not None and inferred_tz is not None:
2657
+ if not timezones.tz_compare(inferred_tz, tz):
2658
+ raise AssertionError("Inferred time zone not equal to passed time zone")
2659
+
2660
+ elif inferred_tz is not None:
2661
+ tz = inferred_tz
2662
+
2663
+ return tz
2664
+
2665
+
2666
+ def _maybe_normalize_endpoints(
2667
+ start: Timestamp | None, end: Timestamp | None, normalize: bool
2668
+ ):
2669
+ if normalize:
2670
+ if start is not None:
2671
+ start = start.normalize()
2672
+
2673
+ if end is not None:
2674
+ end = end.normalize()
2675
+
2676
+ return start, end
2677
+
2678
+
2679
+ def _maybe_localize_point(
2680
+ ts: Timestamp | None, freq, tz, ambiguous, nonexistent
2681
+ ) -> Timestamp | None:
2682
+ """
2683
+ Localize a start or end Timestamp to the timezone of the corresponding
2684
+ start or end Timestamp
2685
+
2686
+ Parameters
2687
+ ----------
2688
+ ts : start or end Timestamp to potentially localize
2689
+ freq : Tick, DateOffset, or None
2690
+ tz : str, timezone object or None
2691
+ ambiguous: str, localization behavior for ambiguous times
2692
+ nonexistent: str, localization behavior for nonexistent times
2693
+
2694
+ Returns
2695
+ -------
2696
+ ts : Timestamp
2697
+ """
2698
+ # Make sure start and end are timezone localized if:
2699
+ # 1) freq = a Timedelta-like frequency (Tick)
2700
+ # 2) freq = None i.e. generating a linspaced range
2701
+ if ts is not None and ts.tzinfo is None:
2702
+ # Note: We can't ambiguous='infer' a singular ambiguous time; however,
2703
+ # we have historically defaulted ambiguous=False
2704
+ ambiguous = ambiguous if ambiguous != "infer" else False
2705
+ localize_args = {"ambiguous": ambiguous, "nonexistent": nonexistent, "tz": None}
2706
+ if isinstance(freq, Tick) or freq is None:
2707
+ localize_args["tz"] = tz
2708
+ ts = ts.tz_localize(**localize_args)
2709
+ return ts
2710
+
2711
+
2712
+ def _generate_range(
2713
+ start: Timestamp | None,
2714
+ end: Timestamp | None,
2715
+ periods: int | None,
2716
+ offset: BaseOffset,
2717
+ *,
2718
+ unit: str,
2719
+ ):
2720
+ """
2721
+ Generates a sequence of dates corresponding to the specified time
2722
+ offset. Similar to dateutil.rrule except uses pandas DateOffset
2723
+ objects to represent time increments.
2724
+
2725
+ Parameters
2726
+ ----------
2727
+ start : Timestamp or None
2728
+ end : Timestamp or None
2729
+ periods : int or None
2730
+ offset : DateOffset
2731
+ unit : str
2732
+
2733
+ Notes
2734
+ -----
2735
+ * This method is faster for generating weekdays than dateutil.rrule
2736
+ * At least two of (start, end, periods) must be specified.
2737
+ * If both start and end are specified, the returned dates will
2738
+ satisfy start <= date <= end.
2739
+
2740
+ Returns
2741
+ -------
2742
+ dates : generator object
2743
+ """
2744
+ offset = to_offset(offset)
2745
+
2746
+ # Argument 1 to "Timestamp" has incompatible type "Optional[Timestamp]";
2747
+ # expected "Union[integer[Any], float, str, date, datetime64]"
2748
+ start = Timestamp(start) # type: ignore[arg-type]
2749
+ if start is not NaT:
2750
+ start = start.as_unit(unit)
2751
+ else:
2752
+ start = None
2753
+
2754
+ # Argument 1 to "Timestamp" has incompatible type "Optional[Timestamp]";
2755
+ # expected "Union[integer[Any], float, str, date, datetime64]"
2756
+ end = Timestamp(end) # type: ignore[arg-type]
2757
+ if end is not NaT:
2758
+ end = end.as_unit(unit)
2759
+ else:
2760
+ end = None
2761
+
2762
+ if start and not offset.is_on_offset(start):
2763
+ # Incompatible types in assignment (expression has type "datetime",
2764
+ # variable has type "Optional[Timestamp]")
2765
+ start = offset.rollforward(start) # type: ignore[assignment]
2766
+
2767
+ elif end and not offset.is_on_offset(end):
2768
+ # Incompatible types in assignment (expression has type "datetime",
2769
+ # variable has type "Optional[Timestamp]")
2770
+ end = offset.rollback(end) # type: ignore[assignment]
2771
+
2772
+ # Unsupported operand types for < ("Timestamp" and "None")
2773
+ if periods is None and end < start and offset.n >= 0: # type: ignore[operator]
2774
+ end = None
2775
+ periods = 0
2776
+
2777
+ if end is None:
2778
+ # error: No overload variant of "__radd__" of "BaseOffset" matches
2779
+ # argument type "None"
2780
+ end = start + (periods - 1) * offset # type: ignore[operator]
2781
+
2782
+ if start is None:
2783
+ # error: No overload variant of "__radd__" of "BaseOffset" matches
2784
+ # argument type "None"
2785
+ start = end - (periods - 1) * offset # type: ignore[operator]
2786
+
2787
+ start = cast(Timestamp, start)
2788
+ end = cast(Timestamp, end)
2789
+
2790
+ cur = start
2791
+ if offset.n >= 0:
2792
+ while cur <= end:
2793
+ yield cur
2794
+
2795
+ if cur == end:
2796
+ # GH#24252 avoid overflows by not performing the addition
2797
+ # in offset.apply unless we have to
2798
+ break
2799
+
2800
+ # faster than cur + offset
2801
+ next_date = offset._apply(cur)
2802
+ next_date = next_date.as_unit(unit)
2803
+ if next_date <= cur:
2804
+ raise ValueError(f"Offset {offset} did not increment date")
2805
+ cur = next_date
2806
+ else:
2807
+ while cur >= end:
2808
+ yield cur
2809
+
2810
+ if cur == end:
2811
+ # GH#24252 avoid overflows by not performing the addition
2812
+ # in offset.apply unless we have to
2813
+ break
2814
+
2815
+ # faster than cur + offset
2816
+ next_date = offset._apply(cur)
2817
+ next_date = next_date.as_unit(unit)
2818
+ if next_date >= cur:
2819
+ raise ValueError(f"Offset {offset} did not decrement date")
2820
+ cur = next_date
venv/lib/python3.10/site-packages/pandas/core/arrays/floating.py ADDED
@@ -0,0 +1,173 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from __future__ import annotations
2
+
3
+ from typing import ClassVar
4
+
5
+ import numpy as np
6
+
7
+ from pandas.core.dtypes.base import register_extension_dtype
8
+ from pandas.core.dtypes.common import is_float_dtype
9
+
10
+ from pandas.core.arrays.numeric import (
11
+ NumericArray,
12
+ NumericDtype,
13
+ )
14
+
15
+
16
+ class FloatingDtype(NumericDtype):
17
+ """
18
+ An ExtensionDtype to hold a single size of floating dtype.
19
+
20
+ These specific implementations are subclasses of the non-public
21
+ FloatingDtype. For example we have Float32Dtype to represent float32.
22
+
23
+ The attributes name & type are set when these subclasses are created.
24
+ """
25
+
26
+ _default_np_dtype = np.dtype(np.float64)
27
+ _checker = is_float_dtype
28
+
29
+ @classmethod
30
+ def construct_array_type(cls) -> type[FloatingArray]:
31
+ """
32
+ Return the array type associated with this dtype.
33
+
34
+ Returns
35
+ -------
36
+ type
37
+ """
38
+ return FloatingArray
39
+
40
+ @classmethod
41
+ def _get_dtype_mapping(cls) -> dict[np.dtype, FloatingDtype]:
42
+ return NUMPY_FLOAT_TO_DTYPE
43
+
44
+ @classmethod
45
+ def _safe_cast(cls, values: np.ndarray, dtype: np.dtype, copy: bool) -> np.ndarray:
46
+ """
47
+ Safely cast the values to the given dtype.
48
+
49
+ "safe" in this context means the casting is lossless.
50
+ """
51
+ # This is really only here for compatibility with IntegerDtype
52
+ # Here for compat with IntegerDtype
53
+ return values.astype(dtype, copy=copy)
54
+
55
+
56
+ class FloatingArray(NumericArray):
57
+ """
58
+ Array of floating (optional missing) values.
59
+
60
+ .. warning::
61
+
62
+ FloatingArray is currently experimental, and its API or internal
63
+ implementation may change without warning. Especially the behaviour
64
+ regarding NaN (distinct from NA missing values) is subject to change.
65
+
66
+ We represent a FloatingArray with 2 numpy arrays:
67
+
68
+ - data: contains a numpy float array of the appropriate dtype
69
+ - mask: a boolean array holding a mask on the data, True is missing
70
+
71
+ To construct an FloatingArray from generic array-like input, use
72
+ :func:`pandas.array` with one of the float dtypes (see examples).
73
+
74
+ See :ref:`integer_na` for more.
75
+
76
+ Parameters
77
+ ----------
78
+ values : numpy.ndarray
79
+ A 1-d float-dtype array.
80
+ mask : numpy.ndarray
81
+ A 1-d boolean-dtype array indicating missing values.
82
+ copy : bool, default False
83
+ Whether to copy the `values` and `mask`.
84
+
85
+ Attributes
86
+ ----------
87
+ None
88
+
89
+ Methods
90
+ -------
91
+ None
92
+
93
+ Returns
94
+ -------
95
+ FloatingArray
96
+
97
+ Examples
98
+ --------
99
+ Create an FloatingArray with :func:`pandas.array`:
100
+
101
+ >>> pd.array([0.1, None, 0.3], dtype=pd.Float32Dtype())
102
+ <FloatingArray>
103
+ [0.1, <NA>, 0.3]
104
+ Length: 3, dtype: Float32
105
+
106
+ String aliases for the dtypes are also available. They are capitalized.
107
+
108
+ >>> pd.array([0.1, None, 0.3], dtype="Float32")
109
+ <FloatingArray>
110
+ [0.1, <NA>, 0.3]
111
+ Length: 3, dtype: Float32
112
+ """
113
+
114
+ _dtype_cls = FloatingDtype
115
+
116
+ # The value used to fill '_data' to avoid upcasting
117
+ _internal_fill_value = np.nan
118
+ # Fill values used for any/all
119
+ # Incompatible types in assignment (expression has type "float", base class
120
+ # "BaseMaskedArray" defined the type as "<typing special form>")
121
+ _truthy_value = 1.0 # type: ignore[assignment]
122
+ _falsey_value = 0.0 # type: ignore[assignment]
123
+
124
+
125
+ _dtype_docstring = """
126
+ An ExtensionDtype for {dtype} data.
127
+
128
+ This dtype uses ``pd.NA`` as missing value indicator.
129
+
130
+ Attributes
131
+ ----------
132
+ None
133
+
134
+ Methods
135
+ -------
136
+ None
137
+
138
+ Examples
139
+ --------
140
+ For Float32Dtype:
141
+
142
+ >>> ser = pd.Series([2.25, pd.NA], dtype=pd.Float32Dtype())
143
+ >>> ser.dtype
144
+ Float32Dtype()
145
+
146
+ For Float64Dtype:
147
+
148
+ >>> ser = pd.Series([2.25, pd.NA], dtype=pd.Float64Dtype())
149
+ >>> ser.dtype
150
+ Float64Dtype()
151
+ """
152
+
153
+ # create the Dtype
154
+
155
+
156
+ @register_extension_dtype
157
+ class Float32Dtype(FloatingDtype):
158
+ type = np.float32
159
+ name: ClassVar[str] = "Float32"
160
+ __doc__ = _dtype_docstring.format(dtype="float32")
161
+
162
+
163
+ @register_extension_dtype
164
+ class Float64Dtype(FloatingDtype):
165
+ type = np.float64
166
+ name: ClassVar[str] = "Float64"
167
+ __doc__ = _dtype_docstring.format(dtype="float64")
168
+
169
+
170
+ NUMPY_FLOAT_TO_DTYPE: dict[np.dtype, FloatingDtype] = {
171
+ np.dtype(np.float32): Float32Dtype(),
172
+ np.dtype(np.float64): Float64Dtype(),
173
+ }
venv/lib/python3.10/site-packages/pandas/core/arrays/interval.py ADDED
@@ -0,0 +1,1917 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from __future__ import annotations
2
+
3
+ import operator
4
+ from operator import (
5
+ le,
6
+ lt,
7
+ )
8
+ import textwrap
9
+ from typing import (
10
+ TYPE_CHECKING,
11
+ Literal,
12
+ Union,
13
+ overload,
14
+ )
15
+ import warnings
16
+
17
+ import numpy as np
18
+
19
+ from pandas._libs import lib
20
+ from pandas._libs.interval import (
21
+ VALID_CLOSED,
22
+ Interval,
23
+ IntervalMixin,
24
+ intervals_to_interval_bounds,
25
+ )
26
+ from pandas._libs.missing import NA
27
+ from pandas._typing import (
28
+ ArrayLike,
29
+ AxisInt,
30
+ Dtype,
31
+ FillnaOptions,
32
+ IntervalClosedType,
33
+ NpDtype,
34
+ PositionalIndexer,
35
+ ScalarIndexer,
36
+ Self,
37
+ SequenceIndexer,
38
+ SortKind,
39
+ TimeArrayLike,
40
+ npt,
41
+ )
42
+ from pandas.compat.numpy import function as nv
43
+ from pandas.errors import IntCastingNaNError
44
+ from pandas.util._decorators import Appender
45
+
46
+ from pandas.core.dtypes.cast import (
47
+ LossySetitemError,
48
+ maybe_upcast_numeric_to_64bit,
49
+ )
50
+ from pandas.core.dtypes.common import (
51
+ is_float_dtype,
52
+ is_integer_dtype,
53
+ is_list_like,
54
+ is_object_dtype,
55
+ is_scalar,
56
+ is_string_dtype,
57
+ needs_i8_conversion,
58
+ pandas_dtype,
59
+ )
60
+ from pandas.core.dtypes.dtypes import (
61
+ CategoricalDtype,
62
+ IntervalDtype,
63
+ )
64
+ from pandas.core.dtypes.generic import (
65
+ ABCDataFrame,
66
+ ABCDatetimeIndex,
67
+ ABCIntervalIndex,
68
+ ABCPeriodIndex,
69
+ )
70
+ from pandas.core.dtypes.missing import (
71
+ is_valid_na_for_dtype,
72
+ isna,
73
+ notna,
74
+ )
75
+
76
+ from pandas.core.algorithms import (
77
+ isin,
78
+ take,
79
+ unique,
80
+ value_counts_internal as value_counts,
81
+ )
82
+ from pandas.core.arrays import ArrowExtensionArray
83
+ from pandas.core.arrays.base import (
84
+ ExtensionArray,
85
+ _extension_array_shared_docs,
86
+ )
87
+ from pandas.core.arrays.datetimes import DatetimeArray
88
+ from pandas.core.arrays.timedeltas import TimedeltaArray
89
+ import pandas.core.common as com
90
+ from pandas.core.construction import (
91
+ array as pd_array,
92
+ ensure_wrapped_if_datetimelike,
93
+ extract_array,
94
+ )
95
+ from pandas.core.indexers import check_array_indexer
96
+ from pandas.core.ops import (
97
+ invalid_comparison,
98
+ unpack_zerodim_and_defer,
99
+ )
100
+
101
+ if TYPE_CHECKING:
102
+ from collections.abc import (
103
+ Iterator,
104
+ Sequence,
105
+ )
106
+
107
+ from pandas import (
108
+ Index,
109
+ Series,
110
+ )
111
+
112
+
113
+ IntervalSide = Union[TimeArrayLike, np.ndarray]
114
+ IntervalOrNA = Union[Interval, float]
115
+
116
+ _interval_shared_docs: dict[str, str] = {}
117
+
118
+ _shared_docs_kwargs = {
119
+ "klass": "IntervalArray",
120
+ "qualname": "arrays.IntervalArray",
121
+ "name": "",
122
+ }
123
+
124
+
125
+ _interval_shared_docs[
126
+ "class"
127
+ ] = """
128
+ %(summary)s
129
+
130
+ Parameters
131
+ ----------
132
+ data : array-like (1-dimensional)
133
+ Array-like (ndarray, :class:`DateTimeArray`, :class:`TimeDeltaArray`) containing
134
+ Interval objects from which to build the %(klass)s.
135
+ closed : {'left', 'right', 'both', 'neither'}, default 'right'
136
+ Whether the intervals are closed on the left-side, right-side, both or
137
+ neither.
138
+ dtype : dtype or None, default None
139
+ If None, dtype will be inferred.
140
+ copy : bool, default False
141
+ Copy the input data.
142
+ %(name)s\
143
+ verify_integrity : bool, default True
144
+ Verify that the %(klass)s is valid.
145
+
146
+ Attributes
147
+ ----------
148
+ left
149
+ right
150
+ closed
151
+ mid
152
+ length
153
+ is_empty
154
+ is_non_overlapping_monotonic
155
+ %(extra_attributes)s\
156
+
157
+ Methods
158
+ -------
159
+ from_arrays
160
+ from_tuples
161
+ from_breaks
162
+ contains
163
+ overlaps
164
+ set_closed
165
+ to_tuples
166
+ %(extra_methods)s\
167
+
168
+ See Also
169
+ --------
170
+ Index : The base pandas Index type.
171
+ Interval : A bounded slice-like interval; the elements of an %(klass)s.
172
+ interval_range : Function to create a fixed frequency IntervalIndex.
173
+ cut : Bin values into discrete Intervals.
174
+ qcut : Bin values into equal-sized Intervals based on rank or sample quantiles.
175
+
176
+ Notes
177
+ -----
178
+ See the `user guide
179
+ <https://pandas.pydata.org/pandas-docs/stable/user_guide/advanced.html#intervalindex>`__
180
+ for more.
181
+
182
+ %(examples)s\
183
+ """
184
+
185
+
186
+ @Appender(
187
+ _interval_shared_docs["class"]
188
+ % {
189
+ "klass": "IntervalArray",
190
+ "summary": "Pandas array for interval data that are closed on the same side.",
191
+ "name": "",
192
+ "extra_attributes": "",
193
+ "extra_methods": "",
194
+ "examples": textwrap.dedent(
195
+ """\
196
+ Examples
197
+ --------
198
+ A new ``IntervalArray`` can be constructed directly from an array-like of
199
+ ``Interval`` objects:
200
+
201
+ >>> pd.arrays.IntervalArray([pd.Interval(0, 1), pd.Interval(1, 5)])
202
+ <IntervalArray>
203
+ [(0, 1], (1, 5]]
204
+ Length: 2, dtype: interval[int64, right]
205
+
206
+ It may also be constructed using one of the constructor
207
+ methods: :meth:`IntervalArray.from_arrays`,
208
+ :meth:`IntervalArray.from_breaks`, and :meth:`IntervalArray.from_tuples`.
209
+ """
210
+ ),
211
+ }
212
+ )
213
+ class IntervalArray(IntervalMixin, ExtensionArray):
214
+ can_hold_na = True
215
+ _na_value = _fill_value = np.nan
216
+
217
+ @property
218
+ def ndim(self) -> Literal[1]:
219
+ return 1
220
+
221
+ # To make mypy recognize the fields
222
+ _left: IntervalSide
223
+ _right: IntervalSide
224
+ _dtype: IntervalDtype
225
+
226
+ # ---------------------------------------------------------------------
227
+ # Constructors
228
+
229
+ def __new__(
230
+ cls,
231
+ data,
232
+ closed: IntervalClosedType | None = None,
233
+ dtype: Dtype | None = None,
234
+ copy: bool = False,
235
+ verify_integrity: bool = True,
236
+ ):
237
+ data = extract_array(data, extract_numpy=True)
238
+
239
+ if isinstance(data, cls):
240
+ left: IntervalSide = data._left
241
+ right: IntervalSide = data._right
242
+ closed = closed or data.closed
243
+ dtype = IntervalDtype(left.dtype, closed=closed)
244
+ else:
245
+ # don't allow scalars
246
+ if is_scalar(data):
247
+ msg = (
248
+ f"{cls.__name__}(...) must be called with a collection "
249
+ f"of some kind, {data} was passed"
250
+ )
251
+ raise TypeError(msg)
252
+
253
+ # might need to convert empty or purely na data
254
+ data = _maybe_convert_platform_interval(data)
255
+ left, right, infer_closed = intervals_to_interval_bounds(
256
+ data, validate_closed=closed is None
257
+ )
258
+ if left.dtype == object:
259
+ left = lib.maybe_convert_objects(left)
260
+ right = lib.maybe_convert_objects(right)
261
+ closed = closed or infer_closed
262
+
263
+ left, right, dtype = cls._ensure_simple_new_inputs(
264
+ left,
265
+ right,
266
+ closed=closed,
267
+ copy=copy,
268
+ dtype=dtype,
269
+ )
270
+
271
+ if verify_integrity:
272
+ cls._validate(left, right, dtype=dtype)
273
+
274
+ return cls._simple_new(
275
+ left,
276
+ right,
277
+ dtype=dtype,
278
+ )
279
+
280
+ @classmethod
281
+ def _simple_new(
282
+ cls,
283
+ left: IntervalSide,
284
+ right: IntervalSide,
285
+ dtype: IntervalDtype,
286
+ ) -> Self:
287
+ result = IntervalMixin.__new__(cls)
288
+ result._left = left
289
+ result._right = right
290
+ result._dtype = dtype
291
+
292
+ return result
293
+
294
+ @classmethod
295
+ def _ensure_simple_new_inputs(
296
+ cls,
297
+ left,
298
+ right,
299
+ closed: IntervalClosedType | None = None,
300
+ copy: bool = False,
301
+ dtype: Dtype | None = None,
302
+ ) -> tuple[IntervalSide, IntervalSide, IntervalDtype]:
303
+ """Ensure correctness of input parameters for cls._simple_new."""
304
+ from pandas.core.indexes.base import ensure_index
305
+
306
+ left = ensure_index(left, copy=copy)
307
+ left = maybe_upcast_numeric_to_64bit(left)
308
+
309
+ right = ensure_index(right, copy=copy)
310
+ right = maybe_upcast_numeric_to_64bit(right)
311
+
312
+ if closed is None and isinstance(dtype, IntervalDtype):
313
+ closed = dtype.closed
314
+
315
+ closed = closed or "right"
316
+
317
+ if dtype is not None:
318
+ # GH 19262: dtype must be an IntervalDtype to override inferred
319
+ dtype = pandas_dtype(dtype)
320
+ if isinstance(dtype, IntervalDtype):
321
+ if dtype.subtype is not None:
322
+ left = left.astype(dtype.subtype)
323
+ right = right.astype(dtype.subtype)
324
+ else:
325
+ msg = f"dtype must be an IntervalDtype, got {dtype}"
326
+ raise TypeError(msg)
327
+
328
+ if dtype.closed is None:
329
+ # possibly loading an old pickle
330
+ dtype = IntervalDtype(dtype.subtype, closed)
331
+ elif closed != dtype.closed:
332
+ raise ValueError("closed keyword does not match dtype.closed")
333
+
334
+ # coerce dtypes to match if needed
335
+ if is_float_dtype(left.dtype) and is_integer_dtype(right.dtype):
336
+ right = right.astype(left.dtype)
337
+ elif is_float_dtype(right.dtype) and is_integer_dtype(left.dtype):
338
+ left = left.astype(right.dtype)
339
+
340
+ if type(left) != type(right):
341
+ msg = (
342
+ f"must not have differing left [{type(left).__name__}] and "
343
+ f"right [{type(right).__name__}] types"
344
+ )
345
+ raise ValueError(msg)
346
+ if isinstance(left.dtype, CategoricalDtype) or is_string_dtype(left.dtype):
347
+ # GH 19016
348
+ msg = (
349
+ "category, object, and string subtypes are not supported "
350
+ "for IntervalArray"
351
+ )
352
+ raise TypeError(msg)
353
+ if isinstance(left, ABCPeriodIndex):
354
+ msg = "Period dtypes are not supported, use a PeriodIndex instead"
355
+ raise ValueError(msg)
356
+ if isinstance(left, ABCDatetimeIndex) and str(left.tz) != str(right.tz):
357
+ msg = (
358
+ "left and right must have the same time zone, got "
359
+ f"'{left.tz}' and '{right.tz}'"
360
+ )
361
+ raise ValueError(msg)
362
+ elif needs_i8_conversion(left.dtype) and left.unit != right.unit:
363
+ # e.g. m8[s] vs m8[ms], try to cast to a common dtype GH#55714
364
+ left_arr, right_arr = left._data._ensure_matching_resos(right._data)
365
+ left = ensure_index(left_arr)
366
+ right = ensure_index(right_arr)
367
+
368
+ # For dt64/td64 we want DatetimeArray/TimedeltaArray instead of ndarray
369
+ left = ensure_wrapped_if_datetimelike(left)
370
+ left = extract_array(left, extract_numpy=True)
371
+ right = ensure_wrapped_if_datetimelike(right)
372
+ right = extract_array(right, extract_numpy=True)
373
+
374
+ if isinstance(left, ArrowExtensionArray) or isinstance(
375
+ right, ArrowExtensionArray
376
+ ):
377
+ pass
378
+ else:
379
+ lbase = getattr(left, "_ndarray", left)
380
+ lbase = getattr(lbase, "_data", lbase).base
381
+ rbase = getattr(right, "_ndarray", right)
382
+ rbase = getattr(rbase, "_data", rbase).base
383
+ if lbase is not None and lbase is rbase:
384
+ # If these share data, then setitem could corrupt our IA
385
+ right = right.copy()
386
+
387
+ dtype = IntervalDtype(left.dtype, closed=closed)
388
+
389
+ return left, right, dtype
390
+
391
+ @classmethod
392
+ def _from_sequence(
393
+ cls,
394
+ scalars,
395
+ *,
396
+ dtype: Dtype | None = None,
397
+ copy: bool = False,
398
+ ) -> Self:
399
+ return cls(scalars, dtype=dtype, copy=copy)
400
+
401
+ @classmethod
402
+ def _from_factorized(cls, values: np.ndarray, original: IntervalArray) -> Self:
403
+ return cls._from_sequence(values, dtype=original.dtype)
404
+
405
+ _interval_shared_docs["from_breaks"] = textwrap.dedent(
406
+ """
407
+ Construct an %(klass)s from an array of splits.
408
+
409
+ Parameters
410
+ ----------
411
+ breaks : array-like (1-dimensional)
412
+ Left and right bounds for each interval.
413
+ closed : {'left', 'right', 'both', 'neither'}, default 'right'
414
+ Whether the intervals are closed on the left-side, right-side, both
415
+ or neither.\
416
+ %(name)s
417
+ copy : bool, default False
418
+ Copy the data.
419
+ dtype : dtype or None, default None
420
+ If None, dtype will be inferred.
421
+
422
+ Returns
423
+ -------
424
+ %(klass)s
425
+
426
+ See Also
427
+ --------
428
+ interval_range : Function to create a fixed frequency IntervalIndex.
429
+ %(klass)s.from_arrays : Construct from a left and right array.
430
+ %(klass)s.from_tuples : Construct from a sequence of tuples.
431
+
432
+ %(examples)s\
433
+ """
434
+ )
435
+
436
+ @classmethod
437
+ @Appender(
438
+ _interval_shared_docs["from_breaks"]
439
+ % {
440
+ "klass": "IntervalArray",
441
+ "name": "",
442
+ "examples": textwrap.dedent(
443
+ """\
444
+ Examples
445
+ --------
446
+ >>> pd.arrays.IntervalArray.from_breaks([0, 1, 2, 3])
447
+ <IntervalArray>
448
+ [(0, 1], (1, 2], (2, 3]]
449
+ Length: 3, dtype: interval[int64, right]
450
+ """
451
+ ),
452
+ }
453
+ )
454
+ def from_breaks(
455
+ cls,
456
+ breaks,
457
+ closed: IntervalClosedType | None = "right",
458
+ copy: bool = False,
459
+ dtype: Dtype | None = None,
460
+ ) -> Self:
461
+ breaks = _maybe_convert_platform_interval(breaks)
462
+
463
+ return cls.from_arrays(breaks[:-1], breaks[1:], closed, copy=copy, dtype=dtype)
464
+
465
+ _interval_shared_docs["from_arrays"] = textwrap.dedent(
466
+ """
467
+ Construct from two arrays defining the left and right bounds.
468
+
469
+ Parameters
470
+ ----------
471
+ left : array-like (1-dimensional)
472
+ Left bounds for each interval.
473
+ right : array-like (1-dimensional)
474
+ Right bounds for each interval.
475
+ closed : {'left', 'right', 'both', 'neither'}, default 'right'
476
+ Whether the intervals are closed on the left-side, right-side, both
477
+ or neither.\
478
+ %(name)s
479
+ copy : bool, default False
480
+ Copy the data.
481
+ dtype : dtype, optional
482
+ If None, dtype will be inferred.
483
+
484
+ Returns
485
+ -------
486
+ %(klass)s
487
+
488
+ Raises
489
+ ------
490
+ ValueError
491
+ When a value is missing in only one of `left` or `right`.
492
+ When a value in `left` is greater than the corresponding value
493
+ in `right`.
494
+
495
+ See Also
496
+ --------
497
+ interval_range : Function to create a fixed frequency IntervalIndex.
498
+ %(klass)s.from_breaks : Construct an %(klass)s from an array of
499
+ splits.
500
+ %(klass)s.from_tuples : Construct an %(klass)s from an
501
+ array-like of tuples.
502
+
503
+ Notes
504
+ -----
505
+ Each element of `left` must be less than or equal to the `right`
506
+ element at the same position. If an element is missing, it must be
507
+ missing in both `left` and `right`. A TypeError is raised when
508
+ using an unsupported type for `left` or `right`. At the moment,
509
+ 'category', 'object', and 'string' subtypes are not supported.
510
+
511
+ %(examples)s\
512
+ """
513
+ )
514
+
515
+ @classmethod
516
+ @Appender(
517
+ _interval_shared_docs["from_arrays"]
518
+ % {
519
+ "klass": "IntervalArray",
520
+ "name": "",
521
+ "examples": textwrap.dedent(
522
+ """\
523
+ Examples
524
+ --------
525
+ >>> pd.arrays.IntervalArray.from_arrays([0, 1, 2], [1, 2, 3])
526
+ <IntervalArray>
527
+ [(0, 1], (1, 2], (2, 3]]
528
+ Length: 3, dtype: interval[int64, right]
529
+ """
530
+ ),
531
+ }
532
+ )
533
+ def from_arrays(
534
+ cls,
535
+ left,
536
+ right,
537
+ closed: IntervalClosedType | None = "right",
538
+ copy: bool = False,
539
+ dtype: Dtype | None = None,
540
+ ) -> Self:
541
+ left = _maybe_convert_platform_interval(left)
542
+ right = _maybe_convert_platform_interval(right)
543
+
544
+ left, right, dtype = cls._ensure_simple_new_inputs(
545
+ left,
546
+ right,
547
+ closed=closed,
548
+ copy=copy,
549
+ dtype=dtype,
550
+ )
551
+ cls._validate(left, right, dtype=dtype)
552
+
553
+ return cls._simple_new(left, right, dtype=dtype)
554
+
555
+ _interval_shared_docs["from_tuples"] = textwrap.dedent(
556
+ """
557
+ Construct an %(klass)s from an array-like of tuples.
558
+
559
+ Parameters
560
+ ----------
561
+ data : array-like (1-dimensional)
562
+ Array of tuples.
563
+ closed : {'left', 'right', 'both', 'neither'}, default 'right'
564
+ Whether the intervals are closed on the left-side, right-side, both
565
+ or neither.\
566
+ %(name)s
567
+ copy : bool, default False
568
+ By-default copy the data, this is compat only and ignored.
569
+ dtype : dtype or None, default None
570
+ If None, dtype will be inferred.
571
+
572
+ Returns
573
+ -------
574
+ %(klass)s
575
+
576
+ See Also
577
+ --------
578
+ interval_range : Function to create a fixed frequency IntervalIndex.
579
+ %(klass)s.from_arrays : Construct an %(klass)s from a left and
580
+ right array.
581
+ %(klass)s.from_breaks : Construct an %(klass)s from an array of
582
+ splits.
583
+
584
+ %(examples)s\
585
+ """
586
+ )
587
+
588
+ @classmethod
589
+ @Appender(
590
+ _interval_shared_docs["from_tuples"]
591
+ % {
592
+ "klass": "IntervalArray",
593
+ "name": "",
594
+ "examples": textwrap.dedent(
595
+ """\
596
+ Examples
597
+ --------
598
+ >>> pd.arrays.IntervalArray.from_tuples([(0, 1), (1, 2)])
599
+ <IntervalArray>
600
+ [(0, 1], (1, 2]]
601
+ Length: 2, dtype: interval[int64, right]
602
+ """
603
+ ),
604
+ }
605
+ )
606
+ def from_tuples(
607
+ cls,
608
+ data,
609
+ closed: IntervalClosedType | None = "right",
610
+ copy: bool = False,
611
+ dtype: Dtype | None = None,
612
+ ) -> Self:
613
+ if len(data):
614
+ left, right = [], []
615
+ else:
616
+ # ensure that empty data keeps input dtype
617
+ left = right = data
618
+
619
+ for d in data:
620
+ if not isinstance(d, tuple) and isna(d):
621
+ lhs = rhs = np.nan
622
+ else:
623
+ name = cls.__name__
624
+ try:
625
+ # need list of length 2 tuples, e.g. [(0, 1), (1, 2), ...]
626
+ lhs, rhs = d
627
+ except ValueError as err:
628
+ msg = f"{name}.from_tuples requires tuples of length 2, got {d}"
629
+ raise ValueError(msg) from err
630
+ except TypeError as err:
631
+ msg = f"{name}.from_tuples received an invalid item, {d}"
632
+ raise TypeError(msg) from err
633
+ left.append(lhs)
634
+ right.append(rhs)
635
+
636
+ return cls.from_arrays(left, right, closed, copy=False, dtype=dtype)
637
+
638
+ @classmethod
639
+ def _validate(cls, left, right, dtype: IntervalDtype) -> None:
640
+ """
641
+ Verify that the IntervalArray is valid.
642
+
643
+ Checks that
644
+
645
+ * dtype is correct
646
+ * left and right match lengths
647
+ * left and right have the same missing values
648
+ * left is always below right
649
+ """
650
+ if not isinstance(dtype, IntervalDtype):
651
+ msg = f"invalid dtype: {dtype}"
652
+ raise ValueError(msg)
653
+ if len(left) != len(right):
654
+ msg = "left and right must have the same length"
655
+ raise ValueError(msg)
656
+ left_mask = notna(left)
657
+ right_mask = notna(right)
658
+ if not (left_mask == right_mask).all():
659
+ msg = (
660
+ "missing values must be missing in the same "
661
+ "location both left and right sides"
662
+ )
663
+ raise ValueError(msg)
664
+ if not (left[left_mask] <= right[left_mask]).all():
665
+ msg = "left side of interval must be <= right side"
666
+ raise ValueError(msg)
667
+
668
+ def _shallow_copy(self, left, right) -> Self:
669
+ """
670
+ Return a new IntervalArray with the replacement attributes
671
+
672
+ Parameters
673
+ ----------
674
+ left : Index
675
+ Values to be used for the left-side of the intervals.
676
+ right : Index
677
+ Values to be used for the right-side of the intervals.
678
+ """
679
+ dtype = IntervalDtype(left.dtype, closed=self.closed)
680
+ left, right, dtype = self._ensure_simple_new_inputs(left, right, dtype=dtype)
681
+
682
+ return self._simple_new(left, right, dtype=dtype)
683
+
684
+ # ---------------------------------------------------------------------
685
+ # Descriptive
686
+
687
+ @property
688
+ def dtype(self) -> IntervalDtype:
689
+ return self._dtype
690
+
691
+ @property
692
+ def nbytes(self) -> int:
693
+ return self.left.nbytes + self.right.nbytes
694
+
695
+ @property
696
+ def size(self) -> int:
697
+ # Avoid materializing self.values
698
+ return self.left.size
699
+
700
+ # ---------------------------------------------------------------------
701
+ # EA Interface
702
+
703
+ def __iter__(self) -> Iterator:
704
+ return iter(np.asarray(self))
705
+
706
+ def __len__(self) -> int:
707
+ return len(self._left)
708
+
709
+ @overload
710
+ def __getitem__(self, key: ScalarIndexer) -> IntervalOrNA:
711
+ ...
712
+
713
+ @overload
714
+ def __getitem__(self, key: SequenceIndexer) -> Self:
715
+ ...
716
+
717
+ def __getitem__(self, key: PositionalIndexer) -> Self | IntervalOrNA:
718
+ key = check_array_indexer(self, key)
719
+ left = self._left[key]
720
+ right = self._right[key]
721
+
722
+ if not isinstance(left, (np.ndarray, ExtensionArray)):
723
+ # scalar
724
+ if is_scalar(left) and isna(left):
725
+ return self._fill_value
726
+ return Interval(left, right, self.closed)
727
+ if np.ndim(left) > 1:
728
+ # GH#30588 multi-dimensional indexer disallowed
729
+ raise ValueError("multi-dimensional indexing not allowed")
730
+ # Argument 2 to "_simple_new" of "IntervalArray" has incompatible type
731
+ # "Union[Period, Timestamp, Timedelta, NaTType, DatetimeArray, TimedeltaArray,
732
+ # ndarray[Any, Any]]"; expected "Union[Union[DatetimeArray, TimedeltaArray],
733
+ # ndarray[Any, Any]]"
734
+ return self._simple_new(left, right, dtype=self.dtype) # type: ignore[arg-type]
735
+
736
+ def __setitem__(self, key, value) -> None:
737
+ value_left, value_right = self._validate_setitem_value(value)
738
+ key = check_array_indexer(self, key)
739
+
740
+ self._left[key] = value_left
741
+ self._right[key] = value_right
742
+
743
+ def _cmp_method(self, other, op):
744
+ # ensure pandas array for list-like and eliminate non-interval scalars
745
+ if is_list_like(other):
746
+ if len(self) != len(other):
747
+ raise ValueError("Lengths must match to compare")
748
+ other = pd_array(other)
749
+ elif not isinstance(other, Interval):
750
+ # non-interval scalar -> no matches
751
+ if other is NA:
752
+ # GH#31882
753
+ from pandas.core.arrays import BooleanArray
754
+
755
+ arr = np.empty(self.shape, dtype=bool)
756
+ mask = np.ones(self.shape, dtype=bool)
757
+ return BooleanArray(arr, mask)
758
+ return invalid_comparison(self, other, op)
759
+
760
+ # determine the dtype of the elements we want to compare
761
+ if isinstance(other, Interval):
762
+ other_dtype = pandas_dtype("interval")
763
+ elif not isinstance(other.dtype, CategoricalDtype):
764
+ other_dtype = other.dtype
765
+ else:
766
+ # for categorical defer to categories for dtype
767
+ other_dtype = other.categories.dtype
768
+
769
+ # extract intervals if we have interval categories with matching closed
770
+ if isinstance(other_dtype, IntervalDtype):
771
+ if self.closed != other.categories.closed:
772
+ return invalid_comparison(self, other, op)
773
+
774
+ other = other.categories._values.take(
775
+ other.codes, allow_fill=True, fill_value=other.categories._na_value
776
+ )
777
+
778
+ # interval-like -> need same closed and matching endpoints
779
+ if isinstance(other_dtype, IntervalDtype):
780
+ if self.closed != other.closed:
781
+ return invalid_comparison(self, other, op)
782
+ elif not isinstance(other, Interval):
783
+ other = type(self)(other)
784
+
785
+ if op is operator.eq:
786
+ return (self._left == other.left) & (self._right == other.right)
787
+ elif op is operator.ne:
788
+ return (self._left != other.left) | (self._right != other.right)
789
+ elif op is operator.gt:
790
+ return (self._left > other.left) | (
791
+ (self._left == other.left) & (self._right > other.right)
792
+ )
793
+ elif op is operator.ge:
794
+ return (self == other) | (self > other)
795
+ elif op is operator.lt:
796
+ return (self._left < other.left) | (
797
+ (self._left == other.left) & (self._right < other.right)
798
+ )
799
+ else:
800
+ # operator.lt
801
+ return (self == other) | (self < other)
802
+
803
+ # non-interval/non-object dtype -> no matches
804
+ if not is_object_dtype(other_dtype):
805
+ return invalid_comparison(self, other, op)
806
+
807
+ # object dtype -> iteratively check for intervals
808
+ result = np.zeros(len(self), dtype=bool)
809
+ for i, obj in enumerate(other):
810
+ try:
811
+ result[i] = op(self[i], obj)
812
+ except TypeError:
813
+ if obj is NA:
814
+ # comparison with np.nan returns NA
815
+ # github.com/pandas-dev/pandas/pull/37124#discussion_r509095092
816
+ result = result.astype(object)
817
+ result[i] = NA
818
+ else:
819
+ raise
820
+ return result
821
+
822
+ @unpack_zerodim_and_defer("__eq__")
823
+ def __eq__(self, other):
824
+ return self._cmp_method(other, operator.eq)
825
+
826
+ @unpack_zerodim_and_defer("__ne__")
827
+ def __ne__(self, other):
828
+ return self._cmp_method(other, operator.ne)
829
+
830
+ @unpack_zerodim_and_defer("__gt__")
831
+ def __gt__(self, other):
832
+ return self._cmp_method(other, operator.gt)
833
+
834
+ @unpack_zerodim_and_defer("__ge__")
835
+ def __ge__(self, other):
836
+ return self._cmp_method(other, operator.ge)
837
+
838
+ @unpack_zerodim_and_defer("__lt__")
839
+ def __lt__(self, other):
840
+ return self._cmp_method(other, operator.lt)
841
+
842
+ @unpack_zerodim_and_defer("__le__")
843
+ def __le__(self, other):
844
+ return self._cmp_method(other, operator.le)
845
+
846
+ def argsort(
847
+ self,
848
+ *,
849
+ ascending: bool = True,
850
+ kind: SortKind = "quicksort",
851
+ na_position: str = "last",
852
+ **kwargs,
853
+ ) -> np.ndarray:
854
+ ascending = nv.validate_argsort_with_ascending(ascending, (), kwargs)
855
+
856
+ if ascending and kind == "quicksort" and na_position == "last":
857
+ # TODO: in an IntervalIndex we can reuse the cached
858
+ # IntervalTree.left_sorter
859
+ return np.lexsort((self.right, self.left))
860
+
861
+ # TODO: other cases we can use lexsort for? much more performant.
862
+ return super().argsort(
863
+ ascending=ascending, kind=kind, na_position=na_position, **kwargs
864
+ )
865
+
866
+ def min(self, *, axis: AxisInt | None = None, skipna: bool = True) -> IntervalOrNA:
867
+ nv.validate_minmax_axis(axis, self.ndim)
868
+
869
+ if not len(self):
870
+ return self._na_value
871
+
872
+ mask = self.isna()
873
+ if mask.any():
874
+ if not skipna:
875
+ return self._na_value
876
+ obj = self[~mask]
877
+ else:
878
+ obj = self
879
+
880
+ indexer = obj.argsort()[0]
881
+ return obj[indexer]
882
+
883
+ def max(self, *, axis: AxisInt | None = None, skipna: bool = True) -> IntervalOrNA:
884
+ nv.validate_minmax_axis(axis, self.ndim)
885
+
886
+ if not len(self):
887
+ return self._na_value
888
+
889
+ mask = self.isna()
890
+ if mask.any():
891
+ if not skipna:
892
+ return self._na_value
893
+ obj = self[~mask]
894
+ else:
895
+ obj = self
896
+
897
+ indexer = obj.argsort()[-1]
898
+ return obj[indexer]
899
+
900
+ def _pad_or_backfill( # pylint: disable=useless-parent-delegation
901
+ self,
902
+ *,
903
+ method: FillnaOptions,
904
+ limit: int | None = None,
905
+ limit_area: Literal["inside", "outside"] | None = None,
906
+ copy: bool = True,
907
+ ) -> Self:
908
+ # TODO(3.0): after EA.fillna 'method' deprecation is enforced, we can remove
909
+ # this method entirely.
910
+ return super()._pad_or_backfill(
911
+ method=method, limit=limit, limit_area=limit_area, copy=copy
912
+ )
913
+
914
+ def fillna(
915
+ self, value=None, method=None, limit: int | None = None, copy: bool = True
916
+ ) -> Self:
917
+ """
918
+ Fill NA/NaN values using the specified method.
919
+
920
+ Parameters
921
+ ----------
922
+ value : scalar, dict, Series
923
+ If a scalar value is passed it is used to fill all missing values.
924
+ Alternatively, a Series or dict can be used to fill in different
925
+ values for each index. The value should not be a list. The
926
+ value(s) passed should be either Interval objects or NA/NaN.
927
+ method : {'backfill', 'bfill', 'pad', 'ffill', None}, default None
928
+ (Not implemented yet for IntervalArray)
929
+ Method to use for filling holes in reindexed Series
930
+ limit : int, default None
931
+ (Not implemented yet for IntervalArray)
932
+ If method is specified, this is the maximum number of consecutive
933
+ NaN values to forward/backward fill. In other words, if there is
934
+ a gap with more than this number of consecutive NaNs, it will only
935
+ be partially filled. If method is not specified, this is the
936
+ maximum number of entries along the entire axis where NaNs will be
937
+ filled.
938
+ copy : bool, default True
939
+ Whether to make a copy of the data before filling. If False, then
940
+ the original should be modified and no new memory should be allocated.
941
+ For ExtensionArray subclasses that cannot do this, it is at the
942
+ author's discretion whether to ignore "copy=False" or to raise.
943
+
944
+ Returns
945
+ -------
946
+ filled : IntervalArray with NA/NaN filled
947
+ """
948
+ if copy is False:
949
+ raise NotImplementedError
950
+ if method is not None:
951
+ return super().fillna(value=value, method=method, limit=limit)
952
+
953
+ value_left, value_right = self._validate_scalar(value)
954
+
955
+ left = self.left.fillna(value=value_left)
956
+ right = self.right.fillna(value=value_right)
957
+ return self._shallow_copy(left, right)
958
+
959
+ def astype(self, dtype, copy: bool = True):
960
+ """
961
+ Cast to an ExtensionArray or NumPy array with dtype 'dtype'.
962
+
963
+ Parameters
964
+ ----------
965
+ dtype : str or dtype
966
+ Typecode or data-type to which the array is cast.
967
+
968
+ copy : bool, default True
969
+ Whether to copy the data, even if not necessary. If False,
970
+ a copy is made only if the old dtype does not match the
971
+ new dtype.
972
+
973
+ Returns
974
+ -------
975
+ array : ExtensionArray or ndarray
976
+ ExtensionArray or NumPy ndarray with 'dtype' for its dtype.
977
+ """
978
+ from pandas import Index
979
+
980
+ if dtype is not None:
981
+ dtype = pandas_dtype(dtype)
982
+
983
+ if isinstance(dtype, IntervalDtype):
984
+ if dtype == self.dtype:
985
+ return self.copy() if copy else self
986
+
987
+ if is_float_dtype(self.dtype.subtype) and needs_i8_conversion(
988
+ dtype.subtype
989
+ ):
990
+ # This is allowed on the Index.astype but we disallow it here
991
+ msg = (
992
+ f"Cannot convert {self.dtype} to {dtype}; subtypes are incompatible"
993
+ )
994
+ raise TypeError(msg)
995
+
996
+ # need to cast to different subtype
997
+ try:
998
+ # We need to use Index rules for astype to prevent casting
999
+ # np.nan entries to int subtypes
1000
+ new_left = Index(self._left, copy=False).astype(dtype.subtype)
1001
+ new_right = Index(self._right, copy=False).astype(dtype.subtype)
1002
+ except IntCastingNaNError:
1003
+ # e.g test_subtype_integer
1004
+ raise
1005
+ except (TypeError, ValueError) as err:
1006
+ # e.g. test_subtype_integer_errors f8->u8 can be lossy
1007
+ # and raises ValueError
1008
+ msg = (
1009
+ f"Cannot convert {self.dtype} to {dtype}; subtypes are incompatible"
1010
+ )
1011
+ raise TypeError(msg) from err
1012
+ return self._shallow_copy(new_left, new_right)
1013
+ else:
1014
+ try:
1015
+ return super().astype(dtype, copy=copy)
1016
+ except (TypeError, ValueError) as err:
1017
+ msg = f"Cannot cast {type(self).__name__} to dtype {dtype}"
1018
+ raise TypeError(msg) from err
1019
+
1020
+ def equals(self, other) -> bool:
1021
+ if type(self) != type(other):
1022
+ return False
1023
+
1024
+ return bool(
1025
+ self.closed == other.closed
1026
+ and self.left.equals(other.left)
1027
+ and self.right.equals(other.right)
1028
+ )
1029
+
1030
+ @classmethod
1031
+ def _concat_same_type(cls, to_concat: Sequence[IntervalArray]) -> Self:
1032
+ """
1033
+ Concatenate multiple IntervalArray
1034
+
1035
+ Parameters
1036
+ ----------
1037
+ to_concat : sequence of IntervalArray
1038
+
1039
+ Returns
1040
+ -------
1041
+ IntervalArray
1042
+ """
1043
+ closed_set = {interval.closed for interval in to_concat}
1044
+ if len(closed_set) != 1:
1045
+ raise ValueError("Intervals must all be closed on the same side.")
1046
+ closed = closed_set.pop()
1047
+
1048
+ left: IntervalSide = np.concatenate([interval.left for interval in to_concat])
1049
+ right: IntervalSide = np.concatenate([interval.right for interval in to_concat])
1050
+
1051
+ left, right, dtype = cls._ensure_simple_new_inputs(left, right, closed=closed)
1052
+
1053
+ return cls._simple_new(left, right, dtype=dtype)
1054
+
1055
+ def copy(self) -> Self:
1056
+ """
1057
+ Return a copy of the array.
1058
+
1059
+ Returns
1060
+ -------
1061
+ IntervalArray
1062
+ """
1063
+ left = self._left.copy()
1064
+ right = self._right.copy()
1065
+ dtype = self.dtype
1066
+ return self._simple_new(left, right, dtype=dtype)
1067
+
1068
+ def isna(self) -> np.ndarray:
1069
+ return isna(self._left)
1070
+
1071
+ def shift(self, periods: int = 1, fill_value: object = None) -> IntervalArray:
1072
+ if not len(self) or periods == 0:
1073
+ return self.copy()
1074
+
1075
+ self._validate_scalar(fill_value)
1076
+
1077
+ # ExtensionArray.shift doesn't work for two reasons
1078
+ # 1. IntervalArray.dtype.na_value may not be correct for the dtype.
1079
+ # 2. IntervalArray._from_sequence only accepts NaN for missing values,
1080
+ # not other values like NaT
1081
+
1082
+ empty_len = min(abs(periods), len(self))
1083
+ if isna(fill_value):
1084
+ from pandas import Index
1085
+
1086
+ fill_value = Index(self._left, copy=False)._na_value
1087
+ empty = IntervalArray.from_breaks([fill_value] * (empty_len + 1))
1088
+ else:
1089
+ empty = self._from_sequence([fill_value] * empty_len, dtype=self.dtype)
1090
+
1091
+ if periods > 0:
1092
+ a = empty
1093
+ b = self[:-periods]
1094
+ else:
1095
+ a = self[abs(periods) :]
1096
+ b = empty
1097
+ return self._concat_same_type([a, b])
1098
+
1099
+ def take(
1100
+ self,
1101
+ indices,
1102
+ *,
1103
+ allow_fill: bool = False,
1104
+ fill_value=None,
1105
+ axis=None,
1106
+ **kwargs,
1107
+ ) -> Self:
1108
+ """
1109
+ Take elements from the IntervalArray.
1110
+
1111
+ Parameters
1112
+ ----------
1113
+ indices : sequence of integers
1114
+ Indices to be taken.
1115
+
1116
+ allow_fill : bool, default False
1117
+ How to handle negative values in `indices`.
1118
+
1119
+ * False: negative values in `indices` indicate positional indices
1120
+ from the right (the default). This is similar to
1121
+ :func:`numpy.take`.
1122
+
1123
+ * True: negative values in `indices` indicate
1124
+ missing values. These values are set to `fill_value`. Any other
1125
+ other negative values raise a ``ValueError``.
1126
+
1127
+ fill_value : Interval or NA, optional
1128
+ Fill value to use for NA-indices when `allow_fill` is True.
1129
+ This may be ``None``, in which case the default NA value for
1130
+ the type, ``self.dtype.na_value``, is used.
1131
+
1132
+ For many ExtensionArrays, there will be two representations of
1133
+ `fill_value`: a user-facing "boxed" scalar, and a low-level
1134
+ physical NA value. `fill_value` should be the user-facing version,
1135
+ and the implementation should handle translating that to the
1136
+ physical version for processing the take if necessary.
1137
+
1138
+ axis : any, default None
1139
+ Present for compat with IntervalIndex; does nothing.
1140
+
1141
+ Returns
1142
+ -------
1143
+ IntervalArray
1144
+
1145
+ Raises
1146
+ ------
1147
+ IndexError
1148
+ When the indices are out of bounds for the array.
1149
+ ValueError
1150
+ When `indices` contains negative values other than ``-1``
1151
+ and `allow_fill` is True.
1152
+ """
1153
+ nv.validate_take((), kwargs)
1154
+
1155
+ fill_left = fill_right = fill_value
1156
+ if allow_fill:
1157
+ fill_left, fill_right = self._validate_scalar(fill_value)
1158
+
1159
+ left_take = take(
1160
+ self._left, indices, allow_fill=allow_fill, fill_value=fill_left
1161
+ )
1162
+ right_take = take(
1163
+ self._right, indices, allow_fill=allow_fill, fill_value=fill_right
1164
+ )
1165
+
1166
+ return self._shallow_copy(left_take, right_take)
1167
+
1168
+ def _validate_listlike(self, value):
1169
+ # list-like of intervals
1170
+ try:
1171
+ array = IntervalArray(value)
1172
+ self._check_closed_matches(array, name="value")
1173
+ value_left, value_right = array.left, array.right
1174
+ except TypeError as err:
1175
+ # wrong type: not interval or NA
1176
+ msg = f"'value' should be an interval type, got {type(value)} instead."
1177
+ raise TypeError(msg) from err
1178
+
1179
+ try:
1180
+ self.left._validate_fill_value(value_left)
1181
+ except (LossySetitemError, TypeError) as err:
1182
+ msg = (
1183
+ "'value' should be a compatible interval type, "
1184
+ f"got {type(value)} instead."
1185
+ )
1186
+ raise TypeError(msg) from err
1187
+
1188
+ return value_left, value_right
1189
+
1190
+ def _validate_scalar(self, value):
1191
+ if isinstance(value, Interval):
1192
+ self._check_closed_matches(value, name="value")
1193
+ left, right = value.left, value.right
1194
+ # TODO: check subdtype match like _validate_setitem_value?
1195
+ elif is_valid_na_for_dtype(value, self.left.dtype):
1196
+ # GH#18295
1197
+ left = right = self.left._na_value
1198
+ else:
1199
+ raise TypeError(
1200
+ "can only insert Interval objects and NA into an IntervalArray"
1201
+ )
1202
+ return left, right
1203
+
1204
+ def _validate_setitem_value(self, value):
1205
+ if is_valid_na_for_dtype(value, self.left.dtype):
1206
+ # na value: need special casing to set directly on numpy arrays
1207
+ value = self.left._na_value
1208
+ if is_integer_dtype(self.dtype.subtype):
1209
+ # can't set NaN on a numpy integer array
1210
+ # GH#45484 TypeError, not ValueError, matches what we get with
1211
+ # non-NA un-holdable value.
1212
+ raise TypeError("Cannot set float NaN to integer-backed IntervalArray")
1213
+ value_left, value_right = value, value
1214
+
1215
+ elif isinstance(value, Interval):
1216
+ # scalar interval
1217
+ self._check_closed_matches(value, name="value")
1218
+ value_left, value_right = value.left, value.right
1219
+ self.left._validate_fill_value(value_left)
1220
+ self.left._validate_fill_value(value_right)
1221
+
1222
+ else:
1223
+ return self._validate_listlike(value)
1224
+
1225
+ return value_left, value_right
1226
+
1227
+ def value_counts(self, dropna: bool = True) -> Series:
1228
+ """
1229
+ Returns a Series containing counts of each interval.
1230
+
1231
+ Parameters
1232
+ ----------
1233
+ dropna : bool, default True
1234
+ Don't include counts of NaN.
1235
+
1236
+ Returns
1237
+ -------
1238
+ counts : Series
1239
+
1240
+ See Also
1241
+ --------
1242
+ Series.value_counts
1243
+ """
1244
+ # TODO: implement this is a non-naive way!
1245
+ with warnings.catch_warnings():
1246
+ warnings.filterwarnings(
1247
+ "ignore",
1248
+ "The behavior of value_counts with object-dtype is deprecated",
1249
+ category=FutureWarning,
1250
+ )
1251
+ result = value_counts(np.asarray(self), dropna=dropna)
1252
+ # Once the deprecation is enforced, we will need to do
1253
+ # `result.index = result.index.astype(self.dtype)`
1254
+ return result
1255
+
1256
+ # ---------------------------------------------------------------------
1257
+ # Rendering Methods
1258
+
1259
+ def _formatter(self, boxed: bool = False):
1260
+ # returning 'str' here causes us to render as e.g. "(0, 1]" instead of
1261
+ # "Interval(0, 1, closed='right')"
1262
+ return str
1263
+
1264
+ # ---------------------------------------------------------------------
1265
+ # Vectorized Interval Properties/Attributes
1266
+
1267
+ @property
1268
+ def left(self) -> Index:
1269
+ """
1270
+ Return the left endpoints of each Interval in the IntervalArray as an Index.
1271
+
1272
+ Examples
1273
+ --------
1274
+
1275
+ >>> interv_arr = pd.arrays.IntervalArray([pd.Interval(0, 1), pd.Interval(2, 5)])
1276
+ >>> interv_arr
1277
+ <IntervalArray>
1278
+ [(0, 1], (2, 5]]
1279
+ Length: 2, dtype: interval[int64, right]
1280
+ >>> interv_arr.left
1281
+ Index([0, 2], dtype='int64')
1282
+ """
1283
+ from pandas import Index
1284
+
1285
+ return Index(self._left, copy=False)
1286
+
1287
+ @property
1288
+ def right(self) -> Index:
1289
+ """
1290
+ Return the right endpoints of each Interval in the IntervalArray as an Index.
1291
+
1292
+ Examples
1293
+ --------
1294
+
1295
+ >>> interv_arr = pd.arrays.IntervalArray([pd.Interval(0, 1), pd.Interval(2, 5)])
1296
+ >>> interv_arr
1297
+ <IntervalArray>
1298
+ [(0, 1], (2, 5]]
1299
+ Length: 2, dtype: interval[int64, right]
1300
+ >>> interv_arr.right
1301
+ Index([1, 5], dtype='int64')
1302
+ """
1303
+ from pandas import Index
1304
+
1305
+ return Index(self._right, copy=False)
1306
+
1307
+ @property
1308
+ def length(self) -> Index:
1309
+ """
1310
+ Return an Index with entries denoting the length of each Interval.
1311
+
1312
+ Examples
1313
+ --------
1314
+
1315
+ >>> interv_arr = pd.arrays.IntervalArray([pd.Interval(0, 1), pd.Interval(1, 5)])
1316
+ >>> interv_arr
1317
+ <IntervalArray>
1318
+ [(0, 1], (1, 5]]
1319
+ Length: 2, dtype: interval[int64, right]
1320
+ >>> interv_arr.length
1321
+ Index([1, 4], dtype='int64')
1322
+ """
1323
+ return self.right - self.left
1324
+
1325
+ @property
1326
+ def mid(self) -> Index:
1327
+ """
1328
+ Return the midpoint of each Interval in the IntervalArray as an Index.
1329
+
1330
+ Examples
1331
+ --------
1332
+
1333
+ >>> interv_arr = pd.arrays.IntervalArray([pd.Interval(0, 1), pd.Interval(1, 5)])
1334
+ >>> interv_arr
1335
+ <IntervalArray>
1336
+ [(0, 1], (1, 5]]
1337
+ Length: 2, dtype: interval[int64, right]
1338
+ >>> interv_arr.mid
1339
+ Index([0.5, 3.0], dtype='float64')
1340
+ """
1341
+ try:
1342
+ return 0.5 * (self.left + self.right)
1343
+ except TypeError:
1344
+ # datetime safe version
1345
+ return self.left + 0.5 * self.length
1346
+
1347
+ _interval_shared_docs["overlaps"] = textwrap.dedent(
1348
+ """
1349
+ Check elementwise if an Interval overlaps the values in the %(klass)s.
1350
+
1351
+ Two intervals overlap if they share a common point, including closed
1352
+ endpoints. Intervals that only have an open endpoint in common do not
1353
+ overlap.
1354
+
1355
+ Parameters
1356
+ ----------
1357
+ other : %(klass)s
1358
+ Interval to check against for an overlap.
1359
+
1360
+ Returns
1361
+ -------
1362
+ ndarray
1363
+ Boolean array positionally indicating where an overlap occurs.
1364
+
1365
+ See Also
1366
+ --------
1367
+ Interval.overlaps : Check whether two Interval objects overlap.
1368
+
1369
+ Examples
1370
+ --------
1371
+ %(examples)s
1372
+ >>> intervals.overlaps(pd.Interval(0.5, 1.5))
1373
+ array([ True, True, False])
1374
+
1375
+ Intervals that share closed endpoints overlap:
1376
+
1377
+ >>> intervals.overlaps(pd.Interval(1, 3, closed='left'))
1378
+ array([ True, True, True])
1379
+
1380
+ Intervals that only have an open endpoint in common do not overlap:
1381
+
1382
+ >>> intervals.overlaps(pd.Interval(1, 2, closed='right'))
1383
+ array([False, True, False])
1384
+ """
1385
+ )
1386
+
1387
+ @Appender(
1388
+ _interval_shared_docs["overlaps"]
1389
+ % {
1390
+ "klass": "IntervalArray",
1391
+ "examples": textwrap.dedent(
1392
+ """\
1393
+ >>> data = [(0, 1), (1, 3), (2, 4)]
1394
+ >>> intervals = pd.arrays.IntervalArray.from_tuples(data)
1395
+ >>> intervals
1396
+ <IntervalArray>
1397
+ [(0, 1], (1, 3], (2, 4]]
1398
+ Length: 3, dtype: interval[int64, right]
1399
+ """
1400
+ ),
1401
+ }
1402
+ )
1403
+ def overlaps(self, other):
1404
+ if isinstance(other, (IntervalArray, ABCIntervalIndex)):
1405
+ raise NotImplementedError
1406
+ if not isinstance(other, Interval):
1407
+ msg = f"`other` must be Interval-like, got {type(other).__name__}"
1408
+ raise TypeError(msg)
1409
+
1410
+ # equality is okay if both endpoints are closed (overlap at a point)
1411
+ op1 = le if (self.closed_left and other.closed_right) else lt
1412
+ op2 = le if (other.closed_left and self.closed_right) else lt
1413
+
1414
+ # overlaps is equivalent negation of two interval being disjoint:
1415
+ # disjoint = (A.left > B.right) or (B.left > A.right)
1416
+ # (simplifying the negation allows this to be done in less operations)
1417
+ return op1(self.left, other.right) & op2(other.left, self.right)
1418
+
1419
+ # ---------------------------------------------------------------------
1420
+
1421
+ @property
1422
+ def closed(self) -> IntervalClosedType:
1423
+ """
1424
+ String describing the inclusive side the intervals.
1425
+
1426
+ Either ``left``, ``right``, ``both`` or ``neither``.
1427
+
1428
+ Examples
1429
+ --------
1430
+
1431
+ For arrays:
1432
+
1433
+ >>> interv_arr = pd.arrays.IntervalArray([pd.Interval(0, 1), pd.Interval(1, 5)])
1434
+ >>> interv_arr
1435
+ <IntervalArray>
1436
+ [(0, 1], (1, 5]]
1437
+ Length: 2, dtype: interval[int64, right]
1438
+ >>> interv_arr.closed
1439
+ 'right'
1440
+
1441
+ For Interval Index:
1442
+
1443
+ >>> interv_idx = pd.interval_range(start=0, end=2)
1444
+ >>> interv_idx
1445
+ IntervalIndex([(0, 1], (1, 2]], dtype='interval[int64, right]')
1446
+ >>> interv_idx.closed
1447
+ 'right'
1448
+ """
1449
+ return self.dtype.closed
1450
+
1451
+ _interval_shared_docs["set_closed"] = textwrap.dedent(
1452
+ """
1453
+ Return an identical %(klass)s closed on the specified side.
1454
+
1455
+ Parameters
1456
+ ----------
1457
+ closed : {'left', 'right', 'both', 'neither'}
1458
+ Whether the intervals are closed on the left-side, right-side, both
1459
+ or neither.
1460
+
1461
+ Returns
1462
+ -------
1463
+ %(klass)s
1464
+
1465
+ %(examples)s\
1466
+ """
1467
+ )
1468
+
1469
+ @Appender(
1470
+ _interval_shared_docs["set_closed"]
1471
+ % {
1472
+ "klass": "IntervalArray",
1473
+ "examples": textwrap.dedent(
1474
+ """\
1475
+ Examples
1476
+ --------
1477
+ >>> index = pd.arrays.IntervalArray.from_breaks(range(4))
1478
+ >>> index
1479
+ <IntervalArray>
1480
+ [(0, 1], (1, 2], (2, 3]]
1481
+ Length: 3, dtype: interval[int64, right]
1482
+ >>> index.set_closed('both')
1483
+ <IntervalArray>
1484
+ [[0, 1], [1, 2], [2, 3]]
1485
+ Length: 3, dtype: interval[int64, both]
1486
+ """
1487
+ ),
1488
+ }
1489
+ )
1490
+ def set_closed(self, closed: IntervalClosedType) -> Self:
1491
+ if closed not in VALID_CLOSED:
1492
+ msg = f"invalid option for 'closed': {closed}"
1493
+ raise ValueError(msg)
1494
+
1495
+ left, right = self._left, self._right
1496
+ dtype = IntervalDtype(left.dtype, closed=closed)
1497
+ return self._simple_new(left, right, dtype=dtype)
1498
+
1499
+ _interval_shared_docs[
1500
+ "is_non_overlapping_monotonic"
1501
+ ] = """
1502
+ Return a boolean whether the %(klass)s is non-overlapping and monotonic.
1503
+
1504
+ Non-overlapping means (no Intervals share points), and monotonic means
1505
+ either monotonic increasing or monotonic decreasing.
1506
+
1507
+ Examples
1508
+ --------
1509
+ For arrays:
1510
+
1511
+ >>> interv_arr = pd.arrays.IntervalArray([pd.Interval(0, 1), pd.Interval(1, 5)])
1512
+ >>> interv_arr
1513
+ <IntervalArray>
1514
+ [(0, 1], (1, 5]]
1515
+ Length: 2, dtype: interval[int64, right]
1516
+ >>> interv_arr.is_non_overlapping_monotonic
1517
+ True
1518
+
1519
+ >>> interv_arr = pd.arrays.IntervalArray([pd.Interval(0, 1),
1520
+ ... pd.Interval(-1, 0.1)])
1521
+ >>> interv_arr
1522
+ <IntervalArray>
1523
+ [(0.0, 1.0], (-1.0, 0.1]]
1524
+ Length: 2, dtype: interval[float64, right]
1525
+ >>> interv_arr.is_non_overlapping_monotonic
1526
+ False
1527
+
1528
+ For Interval Index:
1529
+
1530
+ >>> interv_idx = pd.interval_range(start=0, end=2)
1531
+ >>> interv_idx
1532
+ IntervalIndex([(0, 1], (1, 2]], dtype='interval[int64, right]')
1533
+ >>> interv_idx.is_non_overlapping_monotonic
1534
+ True
1535
+
1536
+ >>> interv_idx = pd.interval_range(start=0, end=2, closed='both')
1537
+ >>> interv_idx
1538
+ IntervalIndex([[0, 1], [1, 2]], dtype='interval[int64, both]')
1539
+ >>> interv_idx.is_non_overlapping_monotonic
1540
+ False
1541
+ """
1542
+
1543
+ @property
1544
+ @Appender(
1545
+ _interval_shared_docs["is_non_overlapping_monotonic"] % _shared_docs_kwargs
1546
+ )
1547
+ def is_non_overlapping_monotonic(self) -> bool:
1548
+ # must be increasing (e.g., [0, 1), [1, 2), [2, 3), ... )
1549
+ # or decreasing (e.g., [-1, 0), [-2, -1), [-3, -2), ...)
1550
+ # we already require left <= right
1551
+
1552
+ # strict inequality for closed == 'both'; equality implies overlapping
1553
+ # at a point when both sides of intervals are included
1554
+ if self.closed == "both":
1555
+ return bool(
1556
+ (self._right[:-1] < self._left[1:]).all()
1557
+ or (self._left[:-1] > self._right[1:]).all()
1558
+ )
1559
+
1560
+ # non-strict inequality when closed != 'both'; at least one side is
1561
+ # not included in the intervals, so equality does not imply overlapping
1562
+ return bool(
1563
+ (self._right[:-1] <= self._left[1:]).all()
1564
+ or (self._left[:-1] >= self._right[1:]).all()
1565
+ )
1566
+
1567
+ # ---------------------------------------------------------------------
1568
+ # Conversion
1569
+
1570
+ def __array__(
1571
+ self, dtype: NpDtype | None = None, copy: bool | None = None
1572
+ ) -> np.ndarray:
1573
+ """
1574
+ Return the IntervalArray's data as a numpy array of Interval
1575
+ objects (with dtype='object')
1576
+ """
1577
+ left = self._left
1578
+ right = self._right
1579
+ mask = self.isna()
1580
+ closed = self.closed
1581
+
1582
+ result = np.empty(len(left), dtype=object)
1583
+ for i, left_value in enumerate(left):
1584
+ if mask[i]:
1585
+ result[i] = np.nan
1586
+ else:
1587
+ result[i] = Interval(left_value, right[i], closed)
1588
+ return result
1589
+
1590
+ def __arrow_array__(self, type=None):
1591
+ """
1592
+ Convert myself into a pyarrow Array.
1593
+ """
1594
+ import pyarrow
1595
+
1596
+ from pandas.core.arrays.arrow.extension_types import ArrowIntervalType
1597
+
1598
+ try:
1599
+ subtype = pyarrow.from_numpy_dtype(self.dtype.subtype)
1600
+ except TypeError as err:
1601
+ raise TypeError(
1602
+ f"Conversion to arrow with subtype '{self.dtype.subtype}' "
1603
+ "is not supported"
1604
+ ) from err
1605
+ interval_type = ArrowIntervalType(subtype, self.closed)
1606
+ storage_array = pyarrow.StructArray.from_arrays(
1607
+ [
1608
+ pyarrow.array(self._left, type=subtype, from_pandas=True),
1609
+ pyarrow.array(self._right, type=subtype, from_pandas=True),
1610
+ ],
1611
+ names=["left", "right"],
1612
+ )
1613
+ mask = self.isna()
1614
+ if mask.any():
1615
+ # if there are missing values, set validity bitmap also on the array level
1616
+ null_bitmap = pyarrow.array(~mask).buffers()[1]
1617
+ storage_array = pyarrow.StructArray.from_buffers(
1618
+ storage_array.type,
1619
+ len(storage_array),
1620
+ [null_bitmap],
1621
+ children=[storage_array.field(0), storage_array.field(1)],
1622
+ )
1623
+
1624
+ if type is not None:
1625
+ if type.equals(interval_type.storage_type):
1626
+ return storage_array
1627
+ elif isinstance(type, ArrowIntervalType):
1628
+ # ensure we have the same subtype and closed attributes
1629
+ if not type.equals(interval_type):
1630
+ raise TypeError(
1631
+ "Not supported to convert IntervalArray to type with "
1632
+ f"different 'subtype' ({self.dtype.subtype} vs {type.subtype}) "
1633
+ f"and 'closed' ({self.closed} vs {type.closed}) attributes"
1634
+ )
1635
+ else:
1636
+ raise TypeError(
1637
+ f"Not supported to convert IntervalArray to '{type}' type"
1638
+ )
1639
+
1640
+ return pyarrow.ExtensionArray.from_storage(interval_type, storage_array)
1641
+
1642
+ _interval_shared_docs["to_tuples"] = textwrap.dedent(
1643
+ """
1644
+ Return an %(return_type)s of tuples of the form (left, right).
1645
+
1646
+ Parameters
1647
+ ----------
1648
+ na_tuple : bool, default True
1649
+ If ``True``, return ``NA`` as a tuple ``(nan, nan)``. If ``False``,
1650
+ just return ``NA`` as ``nan``.
1651
+
1652
+ Returns
1653
+ -------
1654
+ tuples: %(return_type)s
1655
+ %(examples)s\
1656
+ """
1657
+ )
1658
+
1659
+ @Appender(
1660
+ _interval_shared_docs["to_tuples"]
1661
+ % {
1662
+ "return_type": (
1663
+ "ndarray (if self is IntervalArray) or Index (if self is IntervalIndex)"
1664
+ ),
1665
+ "examples": textwrap.dedent(
1666
+ """\
1667
+
1668
+ Examples
1669
+ --------
1670
+ For :class:`pandas.IntervalArray`:
1671
+
1672
+ >>> idx = pd.arrays.IntervalArray.from_tuples([(0, 1), (1, 2)])
1673
+ >>> idx
1674
+ <IntervalArray>
1675
+ [(0, 1], (1, 2]]
1676
+ Length: 2, dtype: interval[int64, right]
1677
+ >>> idx.to_tuples()
1678
+ array([(0, 1), (1, 2)], dtype=object)
1679
+
1680
+ For :class:`pandas.IntervalIndex`:
1681
+
1682
+ >>> idx = pd.interval_range(start=0, end=2)
1683
+ >>> idx
1684
+ IntervalIndex([(0, 1], (1, 2]], dtype='interval[int64, right]')
1685
+ >>> idx.to_tuples()
1686
+ Index([(0, 1), (1, 2)], dtype='object')
1687
+ """
1688
+ ),
1689
+ }
1690
+ )
1691
+ def to_tuples(self, na_tuple: bool = True) -> np.ndarray:
1692
+ tuples = com.asarray_tuplesafe(zip(self._left, self._right))
1693
+ if not na_tuple:
1694
+ # GH 18756
1695
+ tuples = np.where(~self.isna(), tuples, np.nan)
1696
+ return tuples
1697
+
1698
+ # ---------------------------------------------------------------------
1699
+
1700
+ def _putmask(self, mask: npt.NDArray[np.bool_], value) -> None:
1701
+ value_left, value_right = self._validate_setitem_value(value)
1702
+
1703
+ if isinstance(self._left, np.ndarray):
1704
+ np.putmask(self._left, mask, value_left)
1705
+ assert isinstance(self._right, np.ndarray)
1706
+ np.putmask(self._right, mask, value_right)
1707
+ else:
1708
+ self._left._putmask(mask, value_left)
1709
+ assert not isinstance(self._right, np.ndarray)
1710
+ self._right._putmask(mask, value_right)
1711
+
1712
+ def insert(self, loc: int, item: Interval) -> Self:
1713
+ """
1714
+ Return a new IntervalArray inserting new item at location. Follows
1715
+ Python numpy.insert semantics for negative values. Only Interval
1716
+ objects and NA can be inserted into an IntervalIndex
1717
+
1718
+ Parameters
1719
+ ----------
1720
+ loc : int
1721
+ item : Interval
1722
+
1723
+ Returns
1724
+ -------
1725
+ IntervalArray
1726
+ """
1727
+ left_insert, right_insert = self._validate_scalar(item)
1728
+
1729
+ new_left = self.left.insert(loc, left_insert)
1730
+ new_right = self.right.insert(loc, right_insert)
1731
+
1732
+ return self._shallow_copy(new_left, new_right)
1733
+
1734
+ def delete(self, loc) -> Self:
1735
+ if isinstance(self._left, np.ndarray):
1736
+ new_left = np.delete(self._left, loc)
1737
+ assert isinstance(self._right, np.ndarray)
1738
+ new_right = np.delete(self._right, loc)
1739
+ else:
1740
+ new_left = self._left.delete(loc)
1741
+ assert not isinstance(self._right, np.ndarray)
1742
+ new_right = self._right.delete(loc)
1743
+ return self._shallow_copy(left=new_left, right=new_right)
1744
+
1745
+ @Appender(_extension_array_shared_docs["repeat"] % _shared_docs_kwargs)
1746
+ def repeat(
1747
+ self,
1748
+ repeats: int | Sequence[int],
1749
+ axis: AxisInt | None = None,
1750
+ ) -> Self:
1751
+ nv.validate_repeat((), {"axis": axis})
1752
+ left_repeat = self.left.repeat(repeats)
1753
+ right_repeat = self.right.repeat(repeats)
1754
+ return self._shallow_copy(left=left_repeat, right=right_repeat)
1755
+
1756
+ _interval_shared_docs["contains"] = textwrap.dedent(
1757
+ """
1758
+ Check elementwise if the Intervals contain the value.
1759
+
1760
+ Return a boolean mask whether the value is contained in the Intervals
1761
+ of the %(klass)s.
1762
+
1763
+ Parameters
1764
+ ----------
1765
+ other : scalar
1766
+ The value to check whether it is contained in the Intervals.
1767
+
1768
+ Returns
1769
+ -------
1770
+ boolean array
1771
+
1772
+ See Also
1773
+ --------
1774
+ Interval.contains : Check whether Interval object contains value.
1775
+ %(klass)s.overlaps : Check if an Interval overlaps the values in the
1776
+ %(klass)s.
1777
+
1778
+ Examples
1779
+ --------
1780
+ %(examples)s
1781
+ >>> intervals.contains(0.5)
1782
+ array([ True, False, False])
1783
+ """
1784
+ )
1785
+
1786
+ @Appender(
1787
+ _interval_shared_docs["contains"]
1788
+ % {
1789
+ "klass": "IntervalArray",
1790
+ "examples": textwrap.dedent(
1791
+ """\
1792
+ >>> intervals = pd.arrays.IntervalArray.from_tuples([(0, 1), (1, 3), (2, 4)])
1793
+ >>> intervals
1794
+ <IntervalArray>
1795
+ [(0, 1], (1, 3], (2, 4]]
1796
+ Length: 3, dtype: interval[int64, right]
1797
+ """
1798
+ ),
1799
+ }
1800
+ )
1801
+ def contains(self, other):
1802
+ if isinstance(other, Interval):
1803
+ raise NotImplementedError("contains not implemented for two intervals")
1804
+
1805
+ return (self._left < other if self.open_left else self._left <= other) & (
1806
+ other < self._right if self.open_right else other <= self._right
1807
+ )
1808
+
1809
+ def isin(self, values: ArrayLike) -> npt.NDArray[np.bool_]:
1810
+ if isinstance(values, IntervalArray):
1811
+ if self.closed != values.closed:
1812
+ # not comparable -> no overlap
1813
+ return np.zeros(self.shape, dtype=bool)
1814
+
1815
+ if self.dtype == values.dtype:
1816
+ # GH#38353 instead of casting to object, operating on a
1817
+ # complex128 ndarray is much more performant.
1818
+ left = self._combined.view("complex128")
1819
+ right = values._combined.view("complex128")
1820
+ # error: Argument 1 to "isin" has incompatible type
1821
+ # "Union[ExtensionArray, ndarray[Any, Any],
1822
+ # ndarray[Any, dtype[Any]]]"; expected
1823
+ # "Union[_SupportsArray[dtype[Any]],
1824
+ # _NestedSequence[_SupportsArray[dtype[Any]]], bool,
1825
+ # int, float, complex, str, bytes, _NestedSequence[
1826
+ # Union[bool, int, float, complex, str, bytes]]]"
1827
+ return np.isin(left, right).ravel() # type: ignore[arg-type]
1828
+
1829
+ elif needs_i8_conversion(self.left.dtype) ^ needs_i8_conversion(
1830
+ values.left.dtype
1831
+ ):
1832
+ # not comparable -> no overlap
1833
+ return np.zeros(self.shape, dtype=bool)
1834
+
1835
+ return isin(self.astype(object), values.astype(object))
1836
+
1837
+ @property
1838
+ def _combined(self) -> IntervalSide:
1839
+ # error: Item "ExtensionArray" of "ExtensionArray | ndarray[Any, Any]"
1840
+ # has no attribute "reshape" [union-attr]
1841
+ left = self.left._values.reshape(-1, 1) # type: ignore[union-attr]
1842
+ right = self.right._values.reshape(-1, 1) # type: ignore[union-attr]
1843
+ if needs_i8_conversion(left.dtype):
1844
+ # error: Item "ndarray[Any, Any]" of "Any | ndarray[Any, Any]" has
1845
+ # no attribute "_concat_same_type"
1846
+ comb = left._concat_same_type( # type: ignore[union-attr]
1847
+ [left, right], axis=1
1848
+ )
1849
+ else:
1850
+ comb = np.concatenate([left, right], axis=1)
1851
+ return comb
1852
+
1853
+ def _from_combined(self, combined: np.ndarray) -> IntervalArray:
1854
+ """
1855
+ Create a new IntervalArray with our dtype from a 1D complex128 ndarray.
1856
+ """
1857
+ nc = combined.view("i8").reshape(-1, 2)
1858
+
1859
+ dtype = self._left.dtype
1860
+ if needs_i8_conversion(dtype):
1861
+ assert isinstance(self._left, (DatetimeArray, TimedeltaArray))
1862
+ new_left = type(self._left)._from_sequence(nc[:, 0], dtype=dtype)
1863
+ assert isinstance(self._right, (DatetimeArray, TimedeltaArray))
1864
+ new_right = type(self._right)._from_sequence(nc[:, 1], dtype=dtype)
1865
+ else:
1866
+ assert isinstance(dtype, np.dtype)
1867
+ new_left = nc[:, 0].view(dtype)
1868
+ new_right = nc[:, 1].view(dtype)
1869
+ return self._shallow_copy(left=new_left, right=new_right)
1870
+
1871
+ def unique(self) -> IntervalArray:
1872
+ # No overload variant of "__getitem__" of "ExtensionArray" matches argument
1873
+ # type "Tuple[slice, int]"
1874
+ nc = unique(
1875
+ self._combined.view("complex128")[:, 0] # type: ignore[call-overload]
1876
+ )
1877
+ nc = nc[:, None]
1878
+ return self._from_combined(nc)
1879
+
1880
+
1881
+ def _maybe_convert_platform_interval(values) -> ArrayLike:
1882
+ """
1883
+ Try to do platform conversion, with special casing for IntervalArray.
1884
+ Wrapper around maybe_convert_platform that alters the default return
1885
+ dtype in certain cases to be compatible with IntervalArray. For example,
1886
+ empty lists return with integer dtype instead of object dtype, which is
1887
+ prohibited for IntervalArray.
1888
+
1889
+ Parameters
1890
+ ----------
1891
+ values : array-like
1892
+
1893
+ Returns
1894
+ -------
1895
+ array
1896
+ """
1897
+ if isinstance(values, (list, tuple)) and len(values) == 0:
1898
+ # GH 19016
1899
+ # empty lists/tuples get object dtype by default, but this is
1900
+ # prohibited for IntervalArray, so coerce to integer instead
1901
+ return np.array([], dtype=np.int64)
1902
+ elif not is_list_like(values) or isinstance(values, ABCDataFrame):
1903
+ # This will raise later, but we avoid passing to maybe_convert_platform
1904
+ return values
1905
+ elif isinstance(getattr(values, "dtype", None), CategoricalDtype):
1906
+ values = np.asarray(values)
1907
+ elif not hasattr(values, "dtype") and not isinstance(values, (list, tuple, range)):
1908
+ # TODO: should we just cast these to list?
1909
+ return values
1910
+ else:
1911
+ values = extract_array(values, extract_numpy=True)
1912
+
1913
+ if not hasattr(values, "dtype"):
1914
+ values = np.asarray(values)
1915
+ if values.dtype.kind in "iu" and values.dtype != np.int64:
1916
+ values = values.astype(np.int64)
1917
+ return values
venv/lib/python3.10/site-packages/pandas/core/arrays/numeric.py ADDED
@@ -0,0 +1,286 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from __future__ import annotations
2
+
3
+ import numbers
4
+ from typing import (
5
+ TYPE_CHECKING,
6
+ Any,
7
+ Callable,
8
+ )
9
+
10
+ import numpy as np
11
+
12
+ from pandas._libs import (
13
+ lib,
14
+ missing as libmissing,
15
+ )
16
+ from pandas.errors import AbstractMethodError
17
+ from pandas.util._decorators import cache_readonly
18
+
19
+ from pandas.core.dtypes.common import (
20
+ is_integer_dtype,
21
+ is_string_dtype,
22
+ pandas_dtype,
23
+ )
24
+
25
+ from pandas.core.arrays.masked import (
26
+ BaseMaskedArray,
27
+ BaseMaskedDtype,
28
+ )
29
+
30
+ if TYPE_CHECKING:
31
+ from collections.abc import Mapping
32
+
33
+ import pyarrow
34
+
35
+ from pandas._typing import (
36
+ Dtype,
37
+ DtypeObj,
38
+ Self,
39
+ npt,
40
+ )
41
+
42
+
43
+ class NumericDtype(BaseMaskedDtype):
44
+ _default_np_dtype: np.dtype
45
+ _checker: Callable[[Any], bool] # is_foo_dtype
46
+
47
+ def __repr__(self) -> str:
48
+ return f"{self.name}Dtype()"
49
+
50
+ @cache_readonly
51
+ def is_signed_integer(self) -> bool:
52
+ return self.kind == "i"
53
+
54
+ @cache_readonly
55
+ def is_unsigned_integer(self) -> bool:
56
+ return self.kind == "u"
57
+
58
+ @property
59
+ def _is_numeric(self) -> bool:
60
+ return True
61
+
62
+ def __from_arrow__(
63
+ self, array: pyarrow.Array | pyarrow.ChunkedArray
64
+ ) -> BaseMaskedArray:
65
+ """
66
+ Construct IntegerArray/FloatingArray from pyarrow Array/ChunkedArray.
67
+ """
68
+ import pyarrow
69
+
70
+ from pandas.core.arrays.arrow._arrow_utils import (
71
+ pyarrow_array_to_numpy_and_mask,
72
+ )
73
+
74
+ array_class = self.construct_array_type()
75
+
76
+ pyarrow_type = pyarrow.from_numpy_dtype(self.type)
77
+ if not array.type.equals(pyarrow_type) and not pyarrow.types.is_null(
78
+ array.type
79
+ ):
80
+ # test_from_arrow_type_error raise for string, but allow
81
+ # through itemsize conversion GH#31896
82
+ rt_dtype = pandas_dtype(array.type.to_pandas_dtype())
83
+ if rt_dtype.kind not in "iuf":
84
+ # Could allow "c" or potentially disallow float<->int conversion,
85
+ # but at the moment we specifically test that uint<->int works
86
+ raise TypeError(
87
+ f"Expected array of {self} type, got {array.type} instead"
88
+ )
89
+
90
+ array = array.cast(pyarrow_type)
91
+
92
+ if isinstance(array, pyarrow.ChunkedArray):
93
+ # TODO this "if" can be removed when requiring pyarrow >= 10.0, which fixed
94
+ # combine_chunks for empty arrays https://github.com/apache/arrow/pull/13757
95
+ if array.num_chunks == 0:
96
+ array = pyarrow.array([], type=array.type)
97
+ else:
98
+ array = array.combine_chunks()
99
+
100
+ data, mask = pyarrow_array_to_numpy_and_mask(array, dtype=self.numpy_dtype)
101
+ return array_class(data.copy(), ~mask, copy=False)
102
+
103
+ @classmethod
104
+ def _get_dtype_mapping(cls) -> Mapping[np.dtype, NumericDtype]:
105
+ raise AbstractMethodError(cls)
106
+
107
+ @classmethod
108
+ def _standardize_dtype(cls, dtype: NumericDtype | str | np.dtype) -> NumericDtype:
109
+ """
110
+ Convert a string representation or a numpy dtype to NumericDtype.
111
+ """
112
+ if isinstance(dtype, str) and (dtype.startswith(("Int", "UInt", "Float"))):
113
+ # Avoid DeprecationWarning from NumPy about np.dtype("Int64")
114
+ # https://github.com/numpy/numpy/pull/7476
115
+ dtype = dtype.lower()
116
+
117
+ if not isinstance(dtype, NumericDtype):
118
+ mapping = cls._get_dtype_mapping()
119
+ try:
120
+ dtype = mapping[np.dtype(dtype)]
121
+ except KeyError as err:
122
+ raise ValueError(f"invalid dtype specified {dtype}") from err
123
+ return dtype
124
+
125
+ @classmethod
126
+ def _safe_cast(cls, values: np.ndarray, dtype: np.dtype, copy: bool) -> np.ndarray:
127
+ """
128
+ Safely cast the values to the given dtype.
129
+
130
+ "safe" in this context means the casting is lossless.
131
+ """
132
+ raise AbstractMethodError(cls)
133
+
134
+
135
+ def _coerce_to_data_and_mask(
136
+ values, dtype, copy: bool, dtype_cls: type[NumericDtype], default_dtype: np.dtype
137
+ ):
138
+ checker = dtype_cls._checker
139
+
140
+ mask = None
141
+ inferred_type = None
142
+
143
+ if dtype is None and hasattr(values, "dtype"):
144
+ if checker(values.dtype):
145
+ dtype = values.dtype
146
+
147
+ if dtype is not None:
148
+ dtype = dtype_cls._standardize_dtype(dtype)
149
+
150
+ cls = dtype_cls.construct_array_type()
151
+ if isinstance(values, cls):
152
+ values, mask = values._data, values._mask
153
+ if dtype is not None:
154
+ values = values.astype(dtype.numpy_dtype, copy=False)
155
+
156
+ if copy:
157
+ values = values.copy()
158
+ mask = mask.copy()
159
+ return values, mask, dtype, inferred_type
160
+
161
+ original = values
162
+ if not copy:
163
+ values = np.asarray(values)
164
+ else:
165
+ values = np.array(values, copy=copy)
166
+ inferred_type = None
167
+ if values.dtype == object or is_string_dtype(values.dtype):
168
+ inferred_type = lib.infer_dtype(values, skipna=True)
169
+ if inferred_type == "boolean" and dtype is None:
170
+ name = dtype_cls.__name__.strip("_")
171
+ raise TypeError(f"{values.dtype} cannot be converted to {name}")
172
+
173
+ elif values.dtype.kind == "b" and checker(dtype):
174
+ if not copy:
175
+ values = np.asarray(values, dtype=default_dtype)
176
+ else:
177
+ values = np.array(values, dtype=default_dtype, copy=copy)
178
+
179
+ elif values.dtype.kind not in "iuf":
180
+ name = dtype_cls.__name__.strip("_")
181
+ raise TypeError(f"{values.dtype} cannot be converted to {name}")
182
+
183
+ if values.ndim != 1:
184
+ raise TypeError("values must be a 1D list-like")
185
+
186
+ if mask is None:
187
+ if values.dtype.kind in "iu":
188
+ # fastpath
189
+ mask = np.zeros(len(values), dtype=np.bool_)
190
+ else:
191
+ mask = libmissing.is_numeric_na(values)
192
+ else:
193
+ assert len(mask) == len(values)
194
+
195
+ if mask.ndim != 1:
196
+ raise TypeError("mask must be a 1D list-like")
197
+
198
+ # infer dtype if needed
199
+ if dtype is None:
200
+ dtype = default_dtype
201
+ else:
202
+ dtype = dtype.numpy_dtype
203
+
204
+ if is_integer_dtype(dtype) and values.dtype.kind == "f" and len(values) > 0:
205
+ if mask.all():
206
+ values = np.ones(values.shape, dtype=dtype)
207
+ else:
208
+ idx = np.nanargmax(values)
209
+ if int(values[idx]) != original[idx]:
210
+ # We have ints that lost precision during the cast.
211
+ inferred_type = lib.infer_dtype(original, skipna=True)
212
+ if (
213
+ inferred_type not in ["floating", "mixed-integer-float"]
214
+ and not mask.any()
215
+ ):
216
+ values = np.asarray(original, dtype=dtype)
217
+ else:
218
+ values = np.asarray(original, dtype="object")
219
+
220
+ # we copy as need to coerce here
221
+ if mask.any():
222
+ values = values.copy()
223
+ values[mask] = cls._internal_fill_value
224
+ if inferred_type in ("string", "unicode"):
225
+ # casts from str are always safe since they raise
226
+ # a ValueError if the str cannot be parsed into a float
227
+ values = values.astype(dtype, copy=copy)
228
+ else:
229
+ values = dtype_cls._safe_cast(values, dtype, copy=False)
230
+
231
+ return values, mask, dtype, inferred_type
232
+
233
+
234
+ class NumericArray(BaseMaskedArray):
235
+ """
236
+ Base class for IntegerArray and FloatingArray.
237
+ """
238
+
239
+ _dtype_cls: type[NumericDtype]
240
+
241
+ def __init__(
242
+ self, values: np.ndarray, mask: npt.NDArray[np.bool_], copy: bool = False
243
+ ) -> None:
244
+ checker = self._dtype_cls._checker
245
+ if not (isinstance(values, np.ndarray) and checker(values.dtype)):
246
+ descr = (
247
+ "floating"
248
+ if self._dtype_cls.kind == "f" # type: ignore[comparison-overlap]
249
+ else "integer"
250
+ )
251
+ raise TypeError(
252
+ f"values should be {descr} numpy array. Use "
253
+ "the 'pd.array' function instead"
254
+ )
255
+ if values.dtype == np.float16:
256
+ # If we don't raise here, then accessing self.dtype would raise
257
+ raise TypeError("FloatingArray does not support np.float16 dtype.")
258
+
259
+ super().__init__(values, mask, copy=copy)
260
+
261
+ @cache_readonly
262
+ def dtype(self) -> NumericDtype:
263
+ mapping = self._dtype_cls._get_dtype_mapping()
264
+ return mapping[self._data.dtype]
265
+
266
+ @classmethod
267
+ def _coerce_to_array(
268
+ cls, value, *, dtype: DtypeObj, copy: bool = False
269
+ ) -> tuple[np.ndarray, np.ndarray]:
270
+ dtype_cls = cls._dtype_cls
271
+ default_dtype = dtype_cls._default_np_dtype
272
+ values, mask, _, _ = _coerce_to_data_and_mask(
273
+ value, dtype, copy, dtype_cls, default_dtype
274
+ )
275
+ return values, mask
276
+
277
+ @classmethod
278
+ def _from_sequence_of_strings(
279
+ cls, strings, *, dtype: Dtype | None = None, copy: bool = False
280
+ ) -> Self:
281
+ from pandas.core.tools.numeric import to_numeric
282
+
283
+ scalars = to_numeric(strings, errors="raise", dtype_backend="numpy_nullable")
284
+ return cls._from_sequence(scalars, dtype=dtype, copy=copy)
285
+
286
+ _HANDLED_TYPES = (np.ndarray, numbers.Number)
venv/lib/python3.10/site-packages/pandas/core/arrays/period.py ADDED
@@ -0,0 +1,1313 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from __future__ import annotations
2
+
3
+ from datetime import timedelta
4
+ import operator
5
+ from typing import (
6
+ TYPE_CHECKING,
7
+ Any,
8
+ Callable,
9
+ Literal,
10
+ TypeVar,
11
+ cast,
12
+ overload,
13
+ )
14
+ import warnings
15
+
16
+ import numpy as np
17
+
18
+ from pandas._libs import (
19
+ algos as libalgos,
20
+ lib,
21
+ )
22
+ from pandas._libs.arrays import NDArrayBacked
23
+ from pandas._libs.tslibs import (
24
+ BaseOffset,
25
+ NaT,
26
+ NaTType,
27
+ Timedelta,
28
+ add_overflowsafe,
29
+ astype_overflowsafe,
30
+ dt64arr_to_periodarr as c_dt64arr_to_periodarr,
31
+ get_unit_from_dtype,
32
+ iNaT,
33
+ parsing,
34
+ period as libperiod,
35
+ to_offset,
36
+ )
37
+ from pandas._libs.tslibs.dtypes import (
38
+ FreqGroup,
39
+ PeriodDtypeBase,
40
+ freq_to_period_freqstr,
41
+ )
42
+ from pandas._libs.tslibs.fields import isleapyear_arr
43
+ from pandas._libs.tslibs.offsets import (
44
+ Tick,
45
+ delta_to_tick,
46
+ )
47
+ from pandas._libs.tslibs.period import (
48
+ DIFFERENT_FREQ,
49
+ IncompatibleFrequency,
50
+ Period,
51
+ get_period_field_arr,
52
+ period_asfreq_arr,
53
+ )
54
+ from pandas.util._decorators import (
55
+ cache_readonly,
56
+ doc,
57
+ )
58
+ from pandas.util._exceptions import find_stack_level
59
+
60
+ from pandas.core.dtypes.common import (
61
+ ensure_object,
62
+ pandas_dtype,
63
+ )
64
+ from pandas.core.dtypes.dtypes import (
65
+ DatetimeTZDtype,
66
+ PeriodDtype,
67
+ )
68
+ from pandas.core.dtypes.generic import (
69
+ ABCIndex,
70
+ ABCPeriodIndex,
71
+ ABCSeries,
72
+ ABCTimedeltaArray,
73
+ )
74
+ from pandas.core.dtypes.missing import isna
75
+
76
+ from pandas.core.arrays import datetimelike as dtl
77
+ import pandas.core.common as com
78
+
79
+ if TYPE_CHECKING:
80
+ from collections.abc import Sequence
81
+
82
+ from pandas._typing import (
83
+ AnyArrayLike,
84
+ Dtype,
85
+ FillnaOptions,
86
+ NpDtype,
87
+ NumpySorter,
88
+ NumpyValueArrayLike,
89
+ Self,
90
+ npt,
91
+ )
92
+
93
+ from pandas.core.arrays import (
94
+ DatetimeArray,
95
+ TimedeltaArray,
96
+ )
97
+ from pandas.core.arrays.base import ExtensionArray
98
+
99
+
100
+ BaseOffsetT = TypeVar("BaseOffsetT", bound=BaseOffset)
101
+
102
+
103
+ _shared_doc_kwargs = {
104
+ "klass": "PeriodArray",
105
+ }
106
+
107
+
108
+ def _field_accessor(name: str, docstring: str | None = None):
109
+ def f(self):
110
+ base = self.dtype._dtype_code
111
+ result = get_period_field_arr(name, self.asi8, base)
112
+ return result
113
+
114
+ f.__name__ = name
115
+ f.__doc__ = docstring
116
+ return property(f)
117
+
118
+
119
+ # error: Definition of "_concat_same_type" in base class "NDArrayBacked" is
120
+ # incompatible with definition in base class "ExtensionArray"
121
+ class PeriodArray(dtl.DatelikeOps, libperiod.PeriodMixin): # type: ignore[misc]
122
+ """
123
+ Pandas ExtensionArray for storing Period data.
124
+
125
+ Users should use :func:`~pandas.array` to create new instances.
126
+
127
+ Parameters
128
+ ----------
129
+ values : Union[PeriodArray, Series[period], ndarray[int], PeriodIndex]
130
+ The data to store. These should be arrays that can be directly
131
+ converted to ordinals without inference or copy (PeriodArray,
132
+ ndarray[int64]), or a box around such an array (Series[period],
133
+ PeriodIndex).
134
+ dtype : PeriodDtype, optional
135
+ A PeriodDtype instance from which to extract a `freq`. If both
136
+ `freq` and `dtype` are specified, then the frequencies must match.
137
+ freq : str or DateOffset
138
+ The `freq` to use for the array. Mostly applicable when `values`
139
+ is an ndarray of integers, when `freq` is required. When `values`
140
+ is a PeriodArray (or box around), it's checked that ``values.freq``
141
+ matches `freq`.
142
+ copy : bool, default False
143
+ Whether to copy the ordinals before storing.
144
+
145
+ Attributes
146
+ ----------
147
+ None
148
+
149
+ Methods
150
+ -------
151
+ None
152
+
153
+ See Also
154
+ --------
155
+ Period: Represents a period of time.
156
+ PeriodIndex : Immutable Index for period data.
157
+ period_range: Create a fixed-frequency PeriodArray.
158
+ array: Construct a pandas array.
159
+
160
+ Notes
161
+ -----
162
+ There are two components to a PeriodArray
163
+
164
+ - ordinals : integer ndarray
165
+ - freq : pd.tseries.offsets.Offset
166
+
167
+ The values are physically stored as a 1-D ndarray of integers. These are
168
+ called "ordinals" and represent some kind of offset from a base.
169
+
170
+ The `freq` indicates the span covered by each element of the array.
171
+ All elements in the PeriodArray have the same `freq`.
172
+
173
+ Examples
174
+ --------
175
+ >>> pd.arrays.PeriodArray(pd.PeriodIndex(['2023-01-01',
176
+ ... '2023-01-02'], freq='D'))
177
+ <PeriodArray>
178
+ ['2023-01-01', '2023-01-02']
179
+ Length: 2, dtype: period[D]
180
+ """
181
+
182
+ # array priority higher than numpy scalars
183
+ __array_priority__ = 1000
184
+ _typ = "periodarray" # ABCPeriodArray
185
+ _internal_fill_value = np.int64(iNaT)
186
+ _recognized_scalars = (Period,)
187
+ _is_recognized_dtype = lambda x: isinstance(
188
+ x, PeriodDtype
189
+ ) # check_compatible_with checks freq match
190
+ _infer_matches = ("period",)
191
+
192
+ @property
193
+ def _scalar_type(self) -> type[Period]:
194
+ return Period
195
+
196
+ # Names others delegate to us
197
+ _other_ops: list[str] = []
198
+ _bool_ops: list[str] = ["is_leap_year"]
199
+ _object_ops: list[str] = ["start_time", "end_time", "freq"]
200
+ _field_ops: list[str] = [
201
+ "year",
202
+ "month",
203
+ "day",
204
+ "hour",
205
+ "minute",
206
+ "second",
207
+ "weekofyear",
208
+ "weekday",
209
+ "week",
210
+ "dayofweek",
211
+ "day_of_week",
212
+ "dayofyear",
213
+ "day_of_year",
214
+ "quarter",
215
+ "qyear",
216
+ "days_in_month",
217
+ "daysinmonth",
218
+ ]
219
+ _datetimelike_ops: list[str] = _field_ops + _object_ops + _bool_ops
220
+ _datetimelike_methods: list[str] = ["strftime", "to_timestamp", "asfreq"]
221
+
222
+ _dtype: PeriodDtype
223
+
224
+ # --------------------------------------------------------------------
225
+ # Constructors
226
+
227
+ def __init__(
228
+ self, values, dtype: Dtype | None = None, freq=None, copy: bool = False
229
+ ) -> None:
230
+ if freq is not None:
231
+ # GH#52462
232
+ warnings.warn(
233
+ "The 'freq' keyword in the PeriodArray constructor is deprecated "
234
+ "and will be removed in a future version. Pass 'dtype' instead",
235
+ FutureWarning,
236
+ stacklevel=find_stack_level(),
237
+ )
238
+ freq = validate_dtype_freq(dtype, freq)
239
+ dtype = PeriodDtype(freq)
240
+
241
+ if dtype is not None:
242
+ dtype = pandas_dtype(dtype)
243
+ if not isinstance(dtype, PeriodDtype):
244
+ raise ValueError(f"Invalid dtype {dtype} for PeriodArray")
245
+
246
+ if isinstance(values, ABCSeries):
247
+ values = values._values
248
+ if not isinstance(values, type(self)):
249
+ raise TypeError("Incorrect dtype")
250
+
251
+ elif isinstance(values, ABCPeriodIndex):
252
+ values = values._values
253
+
254
+ if isinstance(values, type(self)):
255
+ if dtype is not None and dtype != values.dtype:
256
+ raise raise_on_incompatible(values, dtype.freq)
257
+ values, dtype = values._ndarray, values.dtype
258
+
259
+ if not copy:
260
+ values = np.asarray(values, dtype="int64")
261
+ else:
262
+ values = np.array(values, dtype="int64", copy=copy)
263
+ if dtype is None:
264
+ raise ValueError("dtype is not specified and cannot be inferred")
265
+ dtype = cast(PeriodDtype, dtype)
266
+ NDArrayBacked.__init__(self, values, dtype)
267
+
268
+ # error: Signature of "_simple_new" incompatible with supertype "NDArrayBacked"
269
+ @classmethod
270
+ def _simple_new( # type: ignore[override]
271
+ cls,
272
+ values: npt.NDArray[np.int64],
273
+ dtype: PeriodDtype,
274
+ ) -> Self:
275
+ # alias for PeriodArray.__init__
276
+ assertion_msg = "Should be numpy array of type i8"
277
+ assert isinstance(values, np.ndarray) and values.dtype == "i8", assertion_msg
278
+ return cls(values, dtype=dtype)
279
+
280
+ @classmethod
281
+ def _from_sequence(
282
+ cls,
283
+ scalars,
284
+ *,
285
+ dtype: Dtype | None = None,
286
+ copy: bool = False,
287
+ ) -> Self:
288
+ if dtype is not None:
289
+ dtype = pandas_dtype(dtype)
290
+ if dtype and isinstance(dtype, PeriodDtype):
291
+ freq = dtype.freq
292
+ else:
293
+ freq = None
294
+
295
+ if isinstance(scalars, cls):
296
+ validate_dtype_freq(scalars.dtype, freq)
297
+ if copy:
298
+ scalars = scalars.copy()
299
+ return scalars
300
+
301
+ periods = np.asarray(scalars, dtype=object)
302
+
303
+ freq = freq or libperiod.extract_freq(periods)
304
+ ordinals = libperiod.extract_ordinals(periods, freq)
305
+ dtype = PeriodDtype(freq)
306
+ return cls(ordinals, dtype=dtype)
307
+
308
+ @classmethod
309
+ def _from_sequence_of_strings(
310
+ cls, strings, *, dtype: Dtype | None = None, copy: bool = False
311
+ ) -> Self:
312
+ return cls._from_sequence(strings, dtype=dtype, copy=copy)
313
+
314
+ @classmethod
315
+ def _from_datetime64(cls, data, freq, tz=None) -> Self:
316
+ """
317
+ Construct a PeriodArray from a datetime64 array
318
+
319
+ Parameters
320
+ ----------
321
+ data : ndarray[datetime64[ns], datetime64[ns, tz]]
322
+ freq : str or Tick
323
+ tz : tzinfo, optional
324
+
325
+ Returns
326
+ -------
327
+ PeriodArray[freq]
328
+ """
329
+ if isinstance(freq, BaseOffset):
330
+ freq = freq_to_period_freqstr(freq.n, freq.name)
331
+ data, freq = dt64arr_to_periodarr(data, freq, tz)
332
+ dtype = PeriodDtype(freq)
333
+ return cls(data, dtype=dtype)
334
+
335
+ @classmethod
336
+ def _generate_range(cls, start, end, periods, freq):
337
+ periods = dtl.validate_periods(periods)
338
+
339
+ if freq is not None:
340
+ freq = Period._maybe_convert_freq(freq)
341
+
342
+ if start is not None or end is not None:
343
+ subarr, freq = _get_ordinal_range(start, end, periods, freq)
344
+ else:
345
+ raise ValueError("Not enough parameters to construct Period range")
346
+
347
+ return subarr, freq
348
+
349
+ @classmethod
350
+ def _from_fields(cls, *, fields: dict, freq) -> Self:
351
+ subarr, freq = _range_from_fields(freq=freq, **fields)
352
+ dtype = PeriodDtype(freq)
353
+ return cls._simple_new(subarr, dtype=dtype)
354
+
355
+ # -----------------------------------------------------------------
356
+ # DatetimeLike Interface
357
+
358
+ # error: Argument 1 of "_unbox_scalar" is incompatible with supertype
359
+ # "DatetimeLikeArrayMixin"; supertype defines the argument type as
360
+ # "Union[Union[Period, Any, Timedelta], NaTType]"
361
+ def _unbox_scalar( # type: ignore[override]
362
+ self,
363
+ value: Period | NaTType,
364
+ ) -> np.int64:
365
+ if value is NaT:
366
+ # error: Item "Period" of "Union[Period, NaTType]" has no attribute "value"
367
+ return np.int64(value._value) # type: ignore[union-attr]
368
+ elif isinstance(value, self._scalar_type):
369
+ self._check_compatible_with(value)
370
+ return np.int64(value.ordinal)
371
+ else:
372
+ raise ValueError(f"'value' should be a Period. Got '{value}' instead.")
373
+
374
+ def _scalar_from_string(self, value: str) -> Period:
375
+ return Period(value, freq=self.freq)
376
+
377
+ # error: Argument 1 of "_check_compatible_with" is incompatible with
378
+ # supertype "DatetimeLikeArrayMixin"; supertype defines the argument type
379
+ # as "Period | Timestamp | Timedelta | NaTType"
380
+ def _check_compatible_with(self, other: Period | NaTType | PeriodArray) -> None: # type: ignore[override]
381
+ if other is NaT:
382
+ return
383
+ # error: Item "NaTType" of "Period | NaTType | PeriodArray" has no
384
+ # attribute "freq"
385
+ self._require_matching_freq(other.freq) # type: ignore[union-attr]
386
+
387
+ # --------------------------------------------------------------------
388
+ # Data / Attributes
389
+
390
+ @cache_readonly
391
+ def dtype(self) -> PeriodDtype:
392
+ return self._dtype
393
+
394
+ # error: Cannot override writeable attribute with read-only property
395
+ @property # type: ignore[override]
396
+ def freq(self) -> BaseOffset:
397
+ """
398
+ Return the frequency object for this PeriodArray.
399
+ """
400
+ return self.dtype.freq
401
+
402
+ @property
403
+ def freqstr(self) -> str:
404
+ return freq_to_period_freqstr(self.freq.n, self.freq.name)
405
+
406
+ def __array__(
407
+ self, dtype: NpDtype | None = None, copy: bool | None = None
408
+ ) -> np.ndarray:
409
+ if dtype == "i8":
410
+ return self.asi8
411
+ elif dtype == bool:
412
+ return ~self._isnan
413
+
414
+ # This will raise TypeError for non-object dtypes
415
+ return np.array(list(self), dtype=object)
416
+
417
+ def __arrow_array__(self, type=None):
418
+ """
419
+ Convert myself into a pyarrow Array.
420
+ """
421
+ import pyarrow
422
+
423
+ from pandas.core.arrays.arrow.extension_types import ArrowPeriodType
424
+
425
+ if type is not None:
426
+ if pyarrow.types.is_integer(type):
427
+ return pyarrow.array(self._ndarray, mask=self.isna(), type=type)
428
+ elif isinstance(type, ArrowPeriodType):
429
+ # ensure we have the same freq
430
+ if self.freqstr != type.freq:
431
+ raise TypeError(
432
+ "Not supported to convert PeriodArray to array with different "
433
+ f"'freq' ({self.freqstr} vs {type.freq})"
434
+ )
435
+ else:
436
+ raise TypeError(
437
+ f"Not supported to convert PeriodArray to '{type}' type"
438
+ )
439
+
440
+ period_type = ArrowPeriodType(self.freqstr)
441
+ storage_array = pyarrow.array(self._ndarray, mask=self.isna(), type="int64")
442
+ return pyarrow.ExtensionArray.from_storage(period_type, storage_array)
443
+
444
+ # --------------------------------------------------------------------
445
+ # Vectorized analogues of Period properties
446
+
447
+ year = _field_accessor(
448
+ "year",
449
+ """
450
+ The year of the period.
451
+
452
+ Examples
453
+ --------
454
+ >>> idx = pd.PeriodIndex(["2023", "2024", "2025"], freq="Y")
455
+ >>> idx.year
456
+ Index([2023, 2024, 2025], dtype='int64')
457
+ """,
458
+ )
459
+ month = _field_accessor(
460
+ "month",
461
+ """
462
+ The month as January=1, December=12.
463
+
464
+ Examples
465
+ --------
466
+ >>> idx = pd.PeriodIndex(["2023-01", "2023-02", "2023-03"], freq="M")
467
+ >>> idx.month
468
+ Index([1, 2, 3], dtype='int64')
469
+ """,
470
+ )
471
+ day = _field_accessor(
472
+ "day",
473
+ """
474
+ The days of the period.
475
+
476
+ Examples
477
+ --------
478
+ >>> idx = pd.PeriodIndex(['2020-01-31', '2020-02-28'], freq='D')
479
+ >>> idx.day
480
+ Index([31, 28], dtype='int64')
481
+ """,
482
+ )
483
+ hour = _field_accessor(
484
+ "hour",
485
+ """
486
+ The hour of the period.
487
+
488
+ Examples
489
+ --------
490
+ >>> idx = pd.PeriodIndex(["2023-01-01 10:00", "2023-01-01 11:00"], freq='h')
491
+ >>> idx.hour
492
+ Index([10, 11], dtype='int64')
493
+ """,
494
+ )
495
+ minute = _field_accessor(
496
+ "minute",
497
+ """
498
+ The minute of the period.
499
+
500
+ Examples
501
+ --------
502
+ >>> idx = pd.PeriodIndex(["2023-01-01 10:30:00",
503
+ ... "2023-01-01 11:50:00"], freq='min')
504
+ >>> idx.minute
505
+ Index([30, 50], dtype='int64')
506
+ """,
507
+ )
508
+ second = _field_accessor(
509
+ "second",
510
+ """
511
+ The second of the period.
512
+
513
+ Examples
514
+ --------
515
+ >>> idx = pd.PeriodIndex(["2023-01-01 10:00:30",
516
+ ... "2023-01-01 10:00:31"], freq='s')
517
+ >>> idx.second
518
+ Index([30, 31], dtype='int64')
519
+ """,
520
+ )
521
+ weekofyear = _field_accessor(
522
+ "week",
523
+ """
524
+ The week ordinal of the year.
525
+
526
+ Examples
527
+ --------
528
+ >>> idx = pd.PeriodIndex(["2023-01", "2023-02", "2023-03"], freq="M")
529
+ >>> idx.week # It can be written `weekofyear`
530
+ Index([5, 9, 13], dtype='int64')
531
+ """,
532
+ )
533
+ week = weekofyear
534
+ day_of_week = _field_accessor(
535
+ "day_of_week",
536
+ """
537
+ The day of the week with Monday=0, Sunday=6.
538
+
539
+ Examples
540
+ --------
541
+ >>> idx = pd.PeriodIndex(["2023-01-01", "2023-01-02", "2023-01-03"], freq="D")
542
+ >>> idx.weekday
543
+ Index([6, 0, 1], dtype='int64')
544
+ """,
545
+ )
546
+ dayofweek = day_of_week
547
+ weekday = dayofweek
548
+ dayofyear = day_of_year = _field_accessor(
549
+ "day_of_year",
550
+ """
551
+ The ordinal day of the year.
552
+
553
+ Examples
554
+ --------
555
+ >>> idx = pd.PeriodIndex(["2023-01-10", "2023-02-01", "2023-03-01"], freq="D")
556
+ >>> idx.dayofyear
557
+ Index([10, 32, 60], dtype='int64')
558
+
559
+ >>> idx = pd.PeriodIndex(["2023", "2024", "2025"], freq="Y")
560
+ >>> idx
561
+ PeriodIndex(['2023', '2024', '2025'], dtype='period[Y-DEC]')
562
+ >>> idx.dayofyear
563
+ Index([365, 366, 365], dtype='int64')
564
+ """,
565
+ )
566
+ quarter = _field_accessor(
567
+ "quarter",
568
+ """
569
+ The quarter of the date.
570
+
571
+ Examples
572
+ --------
573
+ >>> idx = pd.PeriodIndex(["2023-01", "2023-02", "2023-03"], freq="M")
574
+ >>> idx.quarter
575
+ Index([1, 1, 1], dtype='int64')
576
+ """,
577
+ )
578
+ qyear = _field_accessor("qyear")
579
+ days_in_month = _field_accessor(
580
+ "days_in_month",
581
+ """
582
+ The number of days in the month.
583
+
584
+ Examples
585
+ --------
586
+ For Series:
587
+
588
+ >>> period = pd.period_range('2020-1-1 00:00', '2020-3-1 00:00', freq='M')
589
+ >>> s = pd.Series(period)
590
+ >>> s
591
+ 0 2020-01
592
+ 1 2020-02
593
+ 2 2020-03
594
+ dtype: period[M]
595
+ >>> s.dt.days_in_month
596
+ 0 31
597
+ 1 29
598
+ 2 31
599
+ dtype: int64
600
+
601
+ For PeriodIndex:
602
+
603
+ >>> idx = pd.PeriodIndex(["2023-01", "2023-02", "2023-03"], freq="M")
604
+ >>> idx.days_in_month # It can be also entered as `daysinmonth`
605
+ Index([31, 28, 31], dtype='int64')
606
+ """,
607
+ )
608
+ daysinmonth = days_in_month
609
+
610
+ @property
611
+ def is_leap_year(self) -> npt.NDArray[np.bool_]:
612
+ """
613
+ Logical indicating if the date belongs to a leap year.
614
+
615
+ Examples
616
+ --------
617
+ >>> idx = pd.PeriodIndex(["2023", "2024", "2025"], freq="Y")
618
+ >>> idx.is_leap_year
619
+ array([False, True, False])
620
+ """
621
+ return isleapyear_arr(np.asarray(self.year))
622
+
623
+ def to_timestamp(self, freq=None, how: str = "start") -> DatetimeArray:
624
+ """
625
+ Cast to DatetimeArray/Index.
626
+
627
+ Parameters
628
+ ----------
629
+ freq : str or DateOffset, optional
630
+ Target frequency. The default is 'D' for week or longer,
631
+ 's' otherwise.
632
+ how : {'s', 'e', 'start', 'end'}
633
+ Whether to use the start or end of the time period being converted.
634
+
635
+ Returns
636
+ -------
637
+ DatetimeArray/Index
638
+
639
+ Examples
640
+ --------
641
+ >>> idx = pd.PeriodIndex(["2023-01", "2023-02", "2023-03"], freq="M")
642
+ >>> idx.to_timestamp()
643
+ DatetimeIndex(['2023-01-01', '2023-02-01', '2023-03-01'],
644
+ dtype='datetime64[ns]', freq='MS')
645
+ """
646
+ from pandas.core.arrays import DatetimeArray
647
+
648
+ how = libperiod.validate_end_alias(how)
649
+
650
+ end = how == "E"
651
+ if end:
652
+ if freq == "B" or self.freq == "B":
653
+ # roll forward to ensure we land on B date
654
+ adjust = Timedelta(1, "D") - Timedelta(1, "ns")
655
+ return self.to_timestamp(how="start") + adjust
656
+ else:
657
+ adjust = Timedelta(1, "ns")
658
+ return (self + self.freq).to_timestamp(how="start") - adjust
659
+
660
+ if freq is None:
661
+ freq_code = self._dtype._get_to_timestamp_base()
662
+ dtype = PeriodDtypeBase(freq_code, 1)
663
+ freq = dtype._freqstr
664
+ base = freq_code
665
+ else:
666
+ freq = Period._maybe_convert_freq(freq)
667
+ base = freq._period_dtype_code
668
+
669
+ new_parr = self.asfreq(freq, how=how)
670
+
671
+ new_data = libperiod.periodarr_to_dt64arr(new_parr.asi8, base)
672
+ dta = DatetimeArray._from_sequence(new_data)
673
+
674
+ if self.freq.name == "B":
675
+ # See if we can retain BDay instead of Day in cases where
676
+ # len(self) is too small for infer_freq to distinguish between them
677
+ diffs = libalgos.unique_deltas(self.asi8)
678
+ if len(diffs) == 1:
679
+ diff = diffs[0]
680
+ if diff == self.dtype._n:
681
+ dta._freq = self.freq
682
+ elif diff == 1:
683
+ dta._freq = self.freq.base
684
+ # TODO: other cases?
685
+ return dta
686
+ else:
687
+ return dta._with_freq("infer")
688
+
689
+ # --------------------------------------------------------------------
690
+
691
+ def _box_func(self, x) -> Period | NaTType:
692
+ return Period._from_ordinal(ordinal=x, freq=self.freq)
693
+
694
+ @doc(**_shared_doc_kwargs, other="PeriodIndex", other_name="PeriodIndex")
695
+ def asfreq(self, freq=None, how: str = "E") -> Self:
696
+ """
697
+ Convert the {klass} to the specified frequency `freq`.
698
+
699
+ Equivalent to applying :meth:`pandas.Period.asfreq` with the given arguments
700
+ to each :class:`~pandas.Period` in this {klass}.
701
+
702
+ Parameters
703
+ ----------
704
+ freq : str
705
+ A frequency.
706
+ how : str {{'E', 'S'}}, default 'E'
707
+ Whether the elements should be aligned to the end
708
+ or start within pa period.
709
+
710
+ * 'E', 'END', or 'FINISH' for end,
711
+ * 'S', 'START', or 'BEGIN' for start.
712
+
713
+ January 31st ('END') vs. January 1st ('START') for example.
714
+
715
+ Returns
716
+ -------
717
+ {klass}
718
+ The transformed {klass} with the new frequency.
719
+
720
+ See Also
721
+ --------
722
+ {other}.asfreq: Convert each Period in a {other_name} to the given frequency.
723
+ Period.asfreq : Convert a :class:`~pandas.Period` object to the given frequency.
724
+
725
+ Examples
726
+ --------
727
+ >>> pidx = pd.period_range('2010-01-01', '2015-01-01', freq='Y')
728
+ >>> pidx
729
+ PeriodIndex(['2010', '2011', '2012', '2013', '2014', '2015'],
730
+ dtype='period[Y-DEC]')
731
+
732
+ >>> pidx.asfreq('M')
733
+ PeriodIndex(['2010-12', '2011-12', '2012-12', '2013-12', '2014-12',
734
+ '2015-12'], dtype='period[M]')
735
+
736
+ >>> pidx.asfreq('M', how='S')
737
+ PeriodIndex(['2010-01', '2011-01', '2012-01', '2013-01', '2014-01',
738
+ '2015-01'], dtype='period[M]')
739
+ """
740
+ how = libperiod.validate_end_alias(how)
741
+ if isinstance(freq, BaseOffset) and hasattr(freq, "_period_dtype_code"):
742
+ freq = PeriodDtype(freq)._freqstr
743
+ freq = Period._maybe_convert_freq(freq)
744
+
745
+ base1 = self._dtype._dtype_code
746
+ base2 = freq._period_dtype_code
747
+
748
+ asi8 = self.asi8
749
+ # self.freq.n can't be negative or 0
750
+ end = how == "E"
751
+ if end:
752
+ ordinal = asi8 + self.dtype._n - 1
753
+ else:
754
+ ordinal = asi8
755
+
756
+ new_data = period_asfreq_arr(ordinal, base1, base2, end)
757
+
758
+ if self._hasna:
759
+ new_data[self._isnan] = iNaT
760
+
761
+ dtype = PeriodDtype(freq)
762
+ return type(self)(new_data, dtype=dtype)
763
+
764
+ # ------------------------------------------------------------------
765
+ # Rendering Methods
766
+
767
+ def _formatter(self, boxed: bool = False):
768
+ if boxed:
769
+ return str
770
+ return "'{}'".format
771
+
772
+ def _format_native_types(
773
+ self, *, na_rep: str | float = "NaT", date_format=None, **kwargs
774
+ ) -> npt.NDArray[np.object_]:
775
+ """
776
+ actually format my specific types
777
+ """
778
+ return libperiod.period_array_strftime(
779
+ self.asi8, self.dtype._dtype_code, na_rep, date_format
780
+ )
781
+
782
+ # ------------------------------------------------------------------
783
+
784
+ def astype(self, dtype, copy: bool = True):
785
+ # We handle Period[T] -> Period[U]
786
+ # Our parent handles everything else.
787
+ dtype = pandas_dtype(dtype)
788
+ if dtype == self._dtype:
789
+ if not copy:
790
+ return self
791
+ else:
792
+ return self.copy()
793
+ if isinstance(dtype, PeriodDtype):
794
+ return self.asfreq(dtype.freq)
795
+
796
+ if lib.is_np_dtype(dtype, "M") or isinstance(dtype, DatetimeTZDtype):
797
+ # GH#45038 match PeriodIndex behavior.
798
+ tz = getattr(dtype, "tz", None)
799
+ unit = dtl.dtype_to_unit(dtype)
800
+ return self.to_timestamp().tz_localize(tz).as_unit(unit)
801
+
802
+ return super().astype(dtype, copy=copy)
803
+
804
+ def searchsorted(
805
+ self,
806
+ value: NumpyValueArrayLike | ExtensionArray,
807
+ side: Literal["left", "right"] = "left",
808
+ sorter: NumpySorter | None = None,
809
+ ) -> npt.NDArray[np.intp] | np.intp:
810
+ npvalue = self._validate_setitem_value(value).view("M8[ns]")
811
+
812
+ # Cast to M8 to get datetime-like NaT placement,
813
+ # similar to dtl._period_dispatch
814
+ m8arr = self._ndarray.view("M8[ns]")
815
+ return m8arr.searchsorted(npvalue, side=side, sorter=sorter)
816
+
817
+ def _pad_or_backfill(
818
+ self,
819
+ *,
820
+ method: FillnaOptions,
821
+ limit: int | None = None,
822
+ limit_area: Literal["inside", "outside"] | None = None,
823
+ copy: bool = True,
824
+ ) -> Self:
825
+ # view as dt64 so we get treated as timelike in core.missing,
826
+ # similar to dtl._period_dispatch
827
+ dta = self.view("M8[ns]")
828
+ result = dta._pad_or_backfill(
829
+ method=method, limit=limit, limit_area=limit_area, copy=copy
830
+ )
831
+ if copy:
832
+ return cast("Self", result.view(self.dtype))
833
+ else:
834
+ return self
835
+
836
+ def fillna(
837
+ self, value=None, method=None, limit: int | None = None, copy: bool = True
838
+ ) -> Self:
839
+ if method is not None:
840
+ # view as dt64 so we get treated as timelike in core.missing,
841
+ # similar to dtl._period_dispatch
842
+ dta = self.view("M8[ns]")
843
+ result = dta.fillna(value=value, method=method, limit=limit, copy=copy)
844
+ # error: Incompatible return value type (got "Union[ExtensionArray,
845
+ # ndarray[Any, Any]]", expected "PeriodArray")
846
+ return result.view(self.dtype) # type: ignore[return-value]
847
+ return super().fillna(value=value, method=method, limit=limit, copy=copy)
848
+
849
+ # ------------------------------------------------------------------
850
+ # Arithmetic Methods
851
+
852
+ def _addsub_int_array_or_scalar(
853
+ self, other: np.ndarray | int, op: Callable[[Any, Any], Any]
854
+ ) -> Self:
855
+ """
856
+ Add or subtract array of integers.
857
+
858
+ Parameters
859
+ ----------
860
+ other : np.ndarray[int64] or int
861
+ op : {operator.add, operator.sub}
862
+
863
+ Returns
864
+ -------
865
+ result : PeriodArray
866
+ """
867
+ assert op in [operator.add, operator.sub]
868
+ if op is operator.sub:
869
+ other = -other
870
+ res_values = add_overflowsafe(self.asi8, np.asarray(other, dtype="i8"))
871
+ return type(self)(res_values, dtype=self.dtype)
872
+
873
+ def _add_offset(self, other: BaseOffset):
874
+ assert not isinstance(other, Tick)
875
+
876
+ self._require_matching_freq(other, base=True)
877
+ return self._addsub_int_array_or_scalar(other.n, operator.add)
878
+
879
+ # TODO: can we de-duplicate with Period._add_timedeltalike_scalar?
880
+ def _add_timedeltalike_scalar(self, other):
881
+ """
882
+ Parameters
883
+ ----------
884
+ other : timedelta, Tick, np.timedelta64
885
+
886
+ Returns
887
+ -------
888
+ PeriodArray
889
+ """
890
+ if not isinstance(self.freq, Tick):
891
+ # We cannot add timedelta-like to non-tick PeriodArray
892
+ raise raise_on_incompatible(self, other)
893
+
894
+ if isna(other):
895
+ # i.e. np.timedelta64("NaT")
896
+ return super()._add_timedeltalike_scalar(other)
897
+
898
+ td = np.asarray(Timedelta(other).asm8)
899
+ return self._add_timedelta_arraylike(td)
900
+
901
+ def _add_timedelta_arraylike(
902
+ self, other: TimedeltaArray | npt.NDArray[np.timedelta64]
903
+ ) -> Self:
904
+ """
905
+ Parameters
906
+ ----------
907
+ other : TimedeltaArray or ndarray[timedelta64]
908
+
909
+ Returns
910
+ -------
911
+ PeriodArray
912
+ """
913
+ if not self.dtype._is_tick_like():
914
+ # We cannot add timedelta-like to non-tick PeriodArray
915
+ raise TypeError(
916
+ f"Cannot add or subtract timedelta64[ns] dtype from {self.dtype}"
917
+ )
918
+
919
+ dtype = np.dtype(f"m8[{self.dtype._td64_unit}]")
920
+
921
+ # Similar to _check_timedeltalike_freq_compat, but we raise with a
922
+ # more specific exception message if necessary.
923
+ try:
924
+ delta = astype_overflowsafe(
925
+ np.asarray(other), dtype=dtype, copy=False, round_ok=False
926
+ )
927
+ except ValueError as err:
928
+ # e.g. if we have minutes freq and try to add 30s
929
+ # "Cannot losslessly convert units"
930
+ raise IncompatibleFrequency(
931
+ "Cannot add/subtract timedelta-like from PeriodArray that is "
932
+ "not an integer multiple of the PeriodArray's freq."
933
+ ) from err
934
+
935
+ res_values = add_overflowsafe(self.asi8, np.asarray(delta.view("i8")))
936
+ return type(self)(res_values, dtype=self.dtype)
937
+
938
+ def _check_timedeltalike_freq_compat(self, other):
939
+ """
940
+ Arithmetic operations with timedelta-like scalars or array `other`
941
+ are only valid if `other` is an integer multiple of `self.freq`.
942
+ If the operation is valid, find that integer multiple. Otherwise,
943
+ raise because the operation is invalid.
944
+
945
+ Parameters
946
+ ----------
947
+ other : timedelta, np.timedelta64, Tick,
948
+ ndarray[timedelta64], TimedeltaArray, TimedeltaIndex
949
+
950
+ Returns
951
+ -------
952
+ multiple : int or ndarray[int64]
953
+
954
+ Raises
955
+ ------
956
+ IncompatibleFrequency
957
+ """
958
+ assert self.dtype._is_tick_like() # checked by calling function
959
+
960
+ dtype = np.dtype(f"m8[{self.dtype._td64_unit}]")
961
+
962
+ if isinstance(other, (timedelta, np.timedelta64, Tick)):
963
+ td = np.asarray(Timedelta(other).asm8)
964
+ else:
965
+ td = np.asarray(other)
966
+
967
+ try:
968
+ delta = astype_overflowsafe(td, dtype=dtype, copy=False, round_ok=False)
969
+ except ValueError as err:
970
+ raise raise_on_incompatible(self, other) from err
971
+
972
+ delta = delta.view("i8")
973
+ return lib.item_from_zerodim(delta)
974
+
975
+
976
+ def raise_on_incompatible(left, right) -> IncompatibleFrequency:
977
+ """
978
+ Helper function to render a consistent error message when raising
979
+ IncompatibleFrequency.
980
+
981
+ Parameters
982
+ ----------
983
+ left : PeriodArray
984
+ right : None, DateOffset, Period, ndarray, or timedelta-like
985
+
986
+ Returns
987
+ -------
988
+ IncompatibleFrequency
989
+ Exception to be raised by the caller.
990
+ """
991
+ # GH#24283 error message format depends on whether right is scalar
992
+ if isinstance(right, (np.ndarray, ABCTimedeltaArray)) or right is None:
993
+ other_freq = None
994
+ elif isinstance(right, BaseOffset):
995
+ other_freq = freq_to_period_freqstr(right.n, right.name)
996
+ elif isinstance(right, (ABCPeriodIndex, PeriodArray, Period)):
997
+ other_freq = right.freqstr
998
+ else:
999
+ other_freq = delta_to_tick(Timedelta(right)).freqstr
1000
+
1001
+ own_freq = freq_to_period_freqstr(left.freq.n, left.freq.name)
1002
+ msg = DIFFERENT_FREQ.format(
1003
+ cls=type(left).__name__, own_freq=own_freq, other_freq=other_freq
1004
+ )
1005
+ return IncompatibleFrequency(msg)
1006
+
1007
+
1008
+ # -------------------------------------------------------------------
1009
+ # Constructor Helpers
1010
+
1011
+
1012
+ def period_array(
1013
+ data: Sequence[Period | str | None] | AnyArrayLike,
1014
+ freq: str | Tick | BaseOffset | None = None,
1015
+ copy: bool = False,
1016
+ ) -> PeriodArray:
1017
+ """
1018
+ Construct a new PeriodArray from a sequence of Period scalars.
1019
+
1020
+ Parameters
1021
+ ----------
1022
+ data : Sequence of Period objects
1023
+ A sequence of Period objects. These are required to all have
1024
+ the same ``freq.`` Missing values can be indicated by ``None``
1025
+ or ``pandas.NaT``.
1026
+ freq : str, Tick, or Offset
1027
+ The frequency of every element of the array. This can be specified
1028
+ to avoid inferring the `freq` from `data`.
1029
+ copy : bool, default False
1030
+ Whether to ensure a copy of the data is made.
1031
+
1032
+ Returns
1033
+ -------
1034
+ PeriodArray
1035
+
1036
+ See Also
1037
+ --------
1038
+ PeriodArray
1039
+ pandas.PeriodIndex
1040
+
1041
+ Examples
1042
+ --------
1043
+ >>> period_array([pd.Period('2017', freq='Y'),
1044
+ ... pd.Period('2018', freq='Y')])
1045
+ <PeriodArray>
1046
+ ['2017', '2018']
1047
+ Length: 2, dtype: period[Y-DEC]
1048
+
1049
+ >>> period_array([pd.Period('2017', freq='Y'),
1050
+ ... pd.Period('2018', freq='Y'),
1051
+ ... pd.NaT])
1052
+ <PeriodArray>
1053
+ ['2017', '2018', 'NaT']
1054
+ Length: 3, dtype: period[Y-DEC]
1055
+
1056
+ Integers that look like years are handled
1057
+
1058
+ >>> period_array([2000, 2001, 2002], freq='D')
1059
+ <PeriodArray>
1060
+ ['2000-01-01', '2001-01-01', '2002-01-01']
1061
+ Length: 3, dtype: period[D]
1062
+
1063
+ Datetime-like strings may also be passed
1064
+
1065
+ >>> period_array(['2000-Q1', '2000-Q2', '2000-Q3', '2000-Q4'], freq='Q')
1066
+ <PeriodArray>
1067
+ ['2000Q1', '2000Q2', '2000Q3', '2000Q4']
1068
+ Length: 4, dtype: period[Q-DEC]
1069
+ """
1070
+ data_dtype = getattr(data, "dtype", None)
1071
+
1072
+ if lib.is_np_dtype(data_dtype, "M"):
1073
+ return PeriodArray._from_datetime64(data, freq)
1074
+ if isinstance(data_dtype, PeriodDtype):
1075
+ out = PeriodArray(data)
1076
+ if freq is not None:
1077
+ if freq == data_dtype.freq:
1078
+ return out
1079
+ return out.asfreq(freq)
1080
+ return out
1081
+
1082
+ # other iterable of some kind
1083
+ if not isinstance(data, (np.ndarray, list, tuple, ABCSeries)):
1084
+ data = list(data)
1085
+
1086
+ arrdata = np.asarray(data)
1087
+
1088
+ dtype: PeriodDtype | None
1089
+ if freq:
1090
+ dtype = PeriodDtype(freq)
1091
+ else:
1092
+ dtype = None
1093
+
1094
+ if arrdata.dtype.kind == "f" and len(arrdata) > 0:
1095
+ raise TypeError("PeriodIndex does not allow floating point in construction")
1096
+
1097
+ if arrdata.dtype.kind in "iu":
1098
+ arr = arrdata.astype(np.int64, copy=False)
1099
+ # error: Argument 2 to "from_ordinals" has incompatible type "Union[str,
1100
+ # Tick, None]"; expected "Union[timedelta, BaseOffset, str]"
1101
+ ordinals = libperiod.from_ordinals(arr, freq) # type: ignore[arg-type]
1102
+ return PeriodArray(ordinals, dtype=dtype)
1103
+
1104
+ data = ensure_object(arrdata)
1105
+ if freq is None:
1106
+ freq = libperiod.extract_freq(data)
1107
+ dtype = PeriodDtype(freq)
1108
+ return PeriodArray._from_sequence(data, dtype=dtype)
1109
+
1110
+
1111
+ @overload
1112
+ def validate_dtype_freq(dtype, freq: BaseOffsetT) -> BaseOffsetT:
1113
+ ...
1114
+
1115
+
1116
+ @overload
1117
+ def validate_dtype_freq(dtype, freq: timedelta | str | None) -> BaseOffset:
1118
+ ...
1119
+
1120
+
1121
+ def validate_dtype_freq(
1122
+ dtype, freq: BaseOffsetT | BaseOffset | timedelta | str | None
1123
+ ) -> BaseOffsetT:
1124
+ """
1125
+ If both a dtype and a freq are available, ensure they match. If only
1126
+ dtype is available, extract the implied freq.
1127
+
1128
+ Parameters
1129
+ ----------
1130
+ dtype : dtype
1131
+ freq : DateOffset or None
1132
+
1133
+ Returns
1134
+ -------
1135
+ freq : DateOffset
1136
+
1137
+ Raises
1138
+ ------
1139
+ ValueError : non-period dtype
1140
+ IncompatibleFrequency : mismatch between dtype and freq
1141
+ """
1142
+ if freq is not None:
1143
+ freq = to_offset(freq, is_period=True)
1144
+
1145
+ if dtype is not None:
1146
+ dtype = pandas_dtype(dtype)
1147
+ if not isinstance(dtype, PeriodDtype):
1148
+ raise ValueError("dtype must be PeriodDtype")
1149
+ if freq is None:
1150
+ freq = dtype.freq
1151
+ elif freq != dtype.freq:
1152
+ raise IncompatibleFrequency("specified freq and dtype are different")
1153
+ # error: Incompatible return value type (got "Union[BaseOffset, Any, None]",
1154
+ # expected "BaseOffset")
1155
+ return freq # type: ignore[return-value]
1156
+
1157
+
1158
+ def dt64arr_to_periodarr(
1159
+ data, freq, tz=None
1160
+ ) -> tuple[npt.NDArray[np.int64], BaseOffset]:
1161
+ """
1162
+ Convert an datetime-like array to values Period ordinals.
1163
+
1164
+ Parameters
1165
+ ----------
1166
+ data : Union[Series[datetime64[ns]], DatetimeIndex, ndarray[datetime64ns]]
1167
+ freq : Optional[Union[str, Tick]]
1168
+ Must match the `freq` on the `data` if `data` is a DatetimeIndex
1169
+ or Series.
1170
+ tz : Optional[tzinfo]
1171
+
1172
+ Returns
1173
+ -------
1174
+ ordinals : ndarray[int64]
1175
+ freq : Tick
1176
+ The frequency extracted from the Series or DatetimeIndex if that's
1177
+ used.
1178
+
1179
+ """
1180
+ if not isinstance(data.dtype, np.dtype) or data.dtype.kind != "M":
1181
+ raise ValueError(f"Wrong dtype: {data.dtype}")
1182
+
1183
+ if freq is None:
1184
+ if isinstance(data, ABCIndex):
1185
+ data, freq = data._values, data.freq
1186
+ elif isinstance(data, ABCSeries):
1187
+ data, freq = data._values, data.dt.freq
1188
+
1189
+ elif isinstance(data, (ABCIndex, ABCSeries)):
1190
+ data = data._values
1191
+
1192
+ reso = get_unit_from_dtype(data.dtype)
1193
+ freq = Period._maybe_convert_freq(freq)
1194
+ base = freq._period_dtype_code
1195
+ return c_dt64arr_to_periodarr(data.view("i8"), base, tz, reso=reso), freq
1196
+
1197
+
1198
+ def _get_ordinal_range(start, end, periods, freq, mult: int = 1):
1199
+ if com.count_not_none(start, end, periods) != 2:
1200
+ raise ValueError(
1201
+ "Of the three parameters: start, end, and periods, "
1202
+ "exactly two must be specified"
1203
+ )
1204
+
1205
+ if freq is not None:
1206
+ freq = to_offset(freq, is_period=True)
1207
+ mult = freq.n
1208
+
1209
+ if start is not None:
1210
+ start = Period(start, freq)
1211
+ if end is not None:
1212
+ end = Period(end, freq)
1213
+
1214
+ is_start_per = isinstance(start, Period)
1215
+ is_end_per = isinstance(end, Period)
1216
+
1217
+ if is_start_per and is_end_per and start.freq != end.freq:
1218
+ raise ValueError("start and end must have same freq")
1219
+ if start is NaT or end is NaT:
1220
+ raise ValueError("start and end must not be NaT")
1221
+
1222
+ if freq is None:
1223
+ if is_start_per:
1224
+ freq = start.freq
1225
+ elif is_end_per:
1226
+ freq = end.freq
1227
+ else: # pragma: no cover
1228
+ raise ValueError("Could not infer freq from start/end")
1229
+ mult = freq.n
1230
+
1231
+ if periods is not None:
1232
+ periods = periods * mult
1233
+ if start is None:
1234
+ data = np.arange(
1235
+ end.ordinal - periods + mult, end.ordinal + 1, mult, dtype=np.int64
1236
+ )
1237
+ else:
1238
+ data = np.arange(
1239
+ start.ordinal, start.ordinal + periods, mult, dtype=np.int64
1240
+ )
1241
+ else:
1242
+ data = np.arange(start.ordinal, end.ordinal + 1, mult, dtype=np.int64)
1243
+
1244
+ return data, freq
1245
+
1246
+
1247
+ def _range_from_fields(
1248
+ year=None,
1249
+ month=None,
1250
+ quarter=None,
1251
+ day=None,
1252
+ hour=None,
1253
+ minute=None,
1254
+ second=None,
1255
+ freq=None,
1256
+ ) -> tuple[np.ndarray, BaseOffset]:
1257
+ if hour is None:
1258
+ hour = 0
1259
+ if minute is None:
1260
+ minute = 0
1261
+ if second is None:
1262
+ second = 0
1263
+ if day is None:
1264
+ day = 1
1265
+
1266
+ ordinals = []
1267
+
1268
+ if quarter is not None:
1269
+ if freq is None:
1270
+ freq = to_offset("Q", is_period=True)
1271
+ base = FreqGroup.FR_QTR.value
1272
+ else:
1273
+ freq = to_offset(freq, is_period=True)
1274
+ base = libperiod.freq_to_dtype_code(freq)
1275
+ if base != FreqGroup.FR_QTR.value:
1276
+ raise AssertionError("base must equal FR_QTR")
1277
+
1278
+ freqstr = freq.freqstr
1279
+ year, quarter = _make_field_arrays(year, quarter)
1280
+ for y, q in zip(year, quarter):
1281
+ calendar_year, calendar_month = parsing.quarter_to_myear(y, q, freqstr)
1282
+ val = libperiod.period_ordinal(
1283
+ calendar_year, calendar_month, 1, 1, 1, 1, 0, 0, base
1284
+ )
1285
+ ordinals.append(val)
1286
+ else:
1287
+ freq = to_offset(freq, is_period=True)
1288
+ base = libperiod.freq_to_dtype_code(freq)
1289
+ arrays = _make_field_arrays(year, month, day, hour, minute, second)
1290
+ for y, mth, d, h, mn, s in zip(*arrays):
1291
+ ordinals.append(libperiod.period_ordinal(y, mth, d, h, mn, s, 0, 0, base))
1292
+
1293
+ return np.array(ordinals, dtype=np.int64), freq
1294
+
1295
+
1296
+ def _make_field_arrays(*fields) -> list[np.ndarray]:
1297
+ length = None
1298
+ for x in fields:
1299
+ if isinstance(x, (list, np.ndarray, ABCSeries)):
1300
+ if length is not None and len(x) != length:
1301
+ raise ValueError("Mismatched Period array lengths")
1302
+ if length is None:
1303
+ length = len(x)
1304
+
1305
+ # error: Argument 2 to "repeat" has incompatible type "Optional[int]"; expected
1306
+ # "Union[Union[int, integer[Any]], Union[bool, bool_], ndarray, Sequence[Union[int,
1307
+ # integer[Any]]], Sequence[Union[bool, bool_]], Sequence[Sequence[Any]]]"
1308
+ return [
1309
+ np.asarray(x)
1310
+ if isinstance(x, (np.ndarray, list, ABCSeries))
1311
+ else np.repeat(x, length) # type: ignore[arg-type]
1312
+ for x in fields
1313
+ ]
venv/lib/python3.10/site-packages/pandas/core/arrays/string_.py ADDED
@@ -0,0 +1,657 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from __future__ import annotations
2
+
3
+ from typing import (
4
+ TYPE_CHECKING,
5
+ ClassVar,
6
+ Literal,
7
+ )
8
+
9
+ import numpy as np
10
+
11
+ from pandas._config import get_option
12
+
13
+ from pandas._libs import (
14
+ lib,
15
+ missing as libmissing,
16
+ )
17
+ from pandas._libs.arrays import NDArrayBacked
18
+ from pandas._libs.lib import ensure_string_array
19
+ from pandas.compat import pa_version_under10p1
20
+ from pandas.compat.numpy import function as nv
21
+ from pandas.util._decorators import doc
22
+
23
+ from pandas.core.dtypes.base import (
24
+ ExtensionDtype,
25
+ StorageExtensionDtype,
26
+ register_extension_dtype,
27
+ )
28
+ from pandas.core.dtypes.common import (
29
+ is_array_like,
30
+ is_bool_dtype,
31
+ is_integer_dtype,
32
+ is_object_dtype,
33
+ is_string_dtype,
34
+ pandas_dtype,
35
+ )
36
+
37
+ from pandas.core import ops
38
+ from pandas.core.array_algos import masked_reductions
39
+ from pandas.core.arrays.base import ExtensionArray
40
+ from pandas.core.arrays.floating import (
41
+ FloatingArray,
42
+ FloatingDtype,
43
+ )
44
+ from pandas.core.arrays.integer import (
45
+ IntegerArray,
46
+ IntegerDtype,
47
+ )
48
+ from pandas.core.arrays.numpy_ import NumpyExtensionArray
49
+ from pandas.core.construction import extract_array
50
+ from pandas.core.indexers import check_array_indexer
51
+ from pandas.core.missing import isna
52
+
53
+ if TYPE_CHECKING:
54
+ import pyarrow
55
+
56
+ from pandas._typing import (
57
+ AxisInt,
58
+ Dtype,
59
+ DtypeObj,
60
+ NumpySorter,
61
+ NumpyValueArrayLike,
62
+ Scalar,
63
+ Self,
64
+ npt,
65
+ type_t,
66
+ )
67
+
68
+ from pandas import Series
69
+
70
+
71
+ @register_extension_dtype
72
+ class StringDtype(StorageExtensionDtype):
73
+ """
74
+ Extension dtype for string data.
75
+
76
+ .. warning::
77
+
78
+ StringDtype is considered experimental. The implementation and
79
+ parts of the API may change without warning.
80
+
81
+ Parameters
82
+ ----------
83
+ storage : {"python", "pyarrow", "pyarrow_numpy"}, optional
84
+ If not given, the value of ``pd.options.mode.string_storage``.
85
+
86
+ Attributes
87
+ ----------
88
+ None
89
+
90
+ Methods
91
+ -------
92
+ None
93
+
94
+ Examples
95
+ --------
96
+ >>> pd.StringDtype()
97
+ string[python]
98
+
99
+ >>> pd.StringDtype(storage="pyarrow")
100
+ string[pyarrow]
101
+ """
102
+
103
+ # error: Cannot override instance variable (previously declared on
104
+ # base class "StorageExtensionDtype") with class variable
105
+ name: ClassVar[str] = "string" # type: ignore[misc]
106
+
107
+ #: StringDtype().na_value uses pandas.NA except the implementation that
108
+ # follows NumPy semantics, which uses nan.
109
+ @property
110
+ def na_value(self) -> libmissing.NAType | float: # type: ignore[override]
111
+ if self.storage == "pyarrow_numpy":
112
+ return np.nan
113
+ else:
114
+ return libmissing.NA
115
+
116
+ _metadata = ("storage",)
117
+
118
+ def __init__(self, storage=None) -> None:
119
+ if storage is None:
120
+ infer_string = get_option("future.infer_string")
121
+ if infer_string:
122
+ storage = "pyarrow_numpy"
123
+ else:
124
+ storage = get_option("mode.string_storage")
125
+ if storage not in {"python", "pyarrow", "pyarrow_numpy"}:
126
+ raise ValueError(
127
+ f"Storage must be 'python', 'pyarrow' or 'pyarrow_numpy'. "
128
+ f"Got {storage} instead."
129
+ )
130
+ if storage in ("pyarrow", "pyarrow_numpy") and pa_version_under10p1:
131
+ raise ImportError(
132
+ "pyarrow>=10.0.1 is required for PyArrow backed StringArray."
133
+ )
134
+ self.storage = storage
135
+
136
+ @property
137
+ def type(self) -> type[str]:
138
+ return str
139
+
140
+ @classmethod
141
+ def construct_from_string(cls, string) -> Self:
142
+ """
143
+ Construct a StringDtype from a string.
144
+
145
+ Parameters
146
+ ----------
147
+ string : str
148
+ The type of the name. The storage type will be taking from `string`.
149
+ Valid options and their storage types are
150
+
151
+ ========================== ==============================================
152
+ string result storage
153
+ ========================== ==============================================
154
+ ``'string'`` pd.options.mode.string_storage, default python
155
+ ``'string[python]'`` python
156
+ ``'string[pyarrow]'`` pyarrow
157
+ ========================== ==============================================
158
+
159
+ Returns
160
+ -------
161
+ StringDtype
162
+
163
+ Raise
164
+ -----
165
+ TypeError
166
+ If the string is not a valid option.
167
+ """
168
+ if not isinstance(string, str):
169
+ raise TypeError(
170
+ f"'construct_from_string' expects a string, got {type(string)}"
171
+ )
172
+ if string == "string":
173
+ return cls()
174
+ elif string == "string[python]":
175
+ return cls(storage="python")
176
+ elif string == "string[pyarrow]":
177
+ return cls(storage="pyarrow")
178
+ elif string == "string[pyarrow_numpy]":
179
+ return cls(storage="pyarrow_numpy")
180
+ else:
181
+ raise TypeError(f"Cannot construct a '{cls.__name__}' from '{string}'")
182
+
183
+ # https://github.com/pandas-dev/pandas/issues/36126
184
+ # error: Signature of "construct_array_type" incompatible with supertype
185
+ # "ExtensionDtype"
186
+ def construct_array_type( # type: ignore[override]
187
+ self,
188
+ ) -> type_t[BaseStringArray]:
189
+ """
190
+ Return the array type associated with this dtype.
191
+
192
+ Returns
193
+ -------
194
+ type
195
+ """
196
+ from pandas.core.arrays.string_arrow import (
197
+ ArrowStringArray,
198
+ ArrowStringArrayNumpySemantics,
199
+ )
200
+
201
+ if self.storage == "python":
202
+ return StringArray
203
+ elif self.storage == "pyarrow":
204
+ return ArrowStringArray
205
+ else:
206
+ return ArrowStringArrayNumpySemantics
207
+
208
+ def __from_arrow__(
209
+ self, array: pyarrow.Array | pyarrow.ChunkedArray
210
+ ) -> BaseStringArray:
211
+ """
212
+ Construct StringArray from pyarrow Array/ChunkedArray.
213
+ """
214
+ if self.storage == "pyarrow":
215
+ from pandas.core.arrays.string_arrow import ArrowStringArray
216
+
217
+ return ArrowStringArray(array)
218
+ elif self.storage == "pyarrow_numpy":
219
+ from pandas.core.arrays.string_arrow import ArrowStringArrayNumpySemantics
220
+
221
+ return ArrowStringArrayNumpySemantics(array)
222
+ else:
223
+ import pyarrow
224
+
225
+ if isinstance(array, pyarrow.Array):
226
+ chunks = [array]
227
+ else:
228
+ # pyarrow.ChunkedArray
229
+ chunks = array.chunks
230
+
231
+ results = []
232
+ for arr in chunks:
233
+ # convert chunk by chunk to numpy and concatenate then, to avoid
234
+ # overflow for large string data when concatenating the pyarrow arrays
235
+ arr = arr.to_numpy(zero_copy_only=False)
236
+ arr = ensure_string_array(arr, na_value=libmissing.NA)
237
+ results.append(arr)
238
+
239
+ if len(chunks) == 0:
240
+ arr = np.array([], dtype=object)
241
+ else:
242
+ arr = np.concatenate(results)
243
+
244
+ # Bypass validation inside StringArray constructor, see GH#47781
245
+ new_string_array = StringArray.__new__(StringArray)
246
+ NDArrayBacked.__init__(
247
+ new_string_array,
248
+ arr,
249
+ StringDtype(storage="python"),
250
+ )
251
+ return new_string_array
252
+
253
+
254
+ class BaseStringArray(ExtensionArray):
255
+ """
256
+ Mixin class for StringArray, ArrowStringArray.
257
+ """
258
+
259
+ @doc(ExtensionArray.tolist)
260
+ def tolist(self):
261
+ if self.ndim > 1:
262
+ return [x.tolist() for x in self]
263
+ return list(self.to_numpy())
264
+
265
+ @classmethod
266
+ def _from_scalars(cls, scalars, dtype: DtypeObj) -> Self:
267
+ if lib.infer_dtype(scalars, skipna=True) not in ["string", "empty"]:
268
+ # TODO: require any NAs be valid-for-string
269
+ raise ValueError
270
+ return cls._from_sequence(scalars, dtype=dtype)
271
+
272
+
273
+ # error: Definition of "_concat_same_type" in base class "NDArrayBacked" is
274
+ # incompatible with definition in base class "ExtensionArray"
275
+ class StringArray(BaseStringArray, NumpyExtensionArray): # type: ignore[misc]
276
+ """
277
+ Extension array for string data.
278
+
279
+ .. warning::
280
+
281
+ StringArray is considered experimental. The implementation and
282
+ parts of the API may change without warning.
283
+
284
+ Parameters
285
+ ----------
286
+ values : array-like
287
+ The array of data.
288
+
289
+ .. warning::
290
+
291
+ Currently, this expects an object-dtype ndarray
292
+ where the elements are Python strings
293
+ or nan-likes (``None``, ``np.nan``, ``NA``).
294
+ This may change without warning in the future. Use
295
+ :meth:`pandas.array` with ``dtype="string"`` for a stable way of
296
+ creating a `StringArray` from any sequence.
297
+
298
+ .. versionchanged:: 1.5.0
299
+
300
+ StringArray now accepts array-likes containing
301
+ nan-likes(``None``, ``np.nan``) for the ``values`` parameter
302
+ in addition to strings and :attr:`pandas.NA`
303
+
304
+ copy : bool, default False
305
+ Whether to copy the array of data.
306
+
307
+ Attributes
308
+ ----------
309
+ None
310
+
311
+ Methods
312
+ -------
313
+ None
314
+
315
+ See Also
316
+ --------
317
+ :func:`pandas.array`
318
+ The recommended function for creating a StringArray.
319
+ Series.str
320
+ The string methods are available on Series backed by
321
+ a StringArray.
322
+
323
+ Notes
324
+ -----
325
+ StringArray returns a BooleanArray for comparison methods.
326
+
327
+ Examples
328
+ --------
329
+ >>> pd.array(['This is', 'some text', None, 'data.'], dtype="string")
330
+ <StringArray>
331
+ ['This is', 'some text', <NA>, 'data.']
332
+ Length: 4, dtype: string
333
+
334
+ Unlike arrays instantiated with ``dtype="object"``, ``StringArray``
335
+ will convert the values to strings.
336
+
337
+ >>> pd.array(['1', 1], dtype="object")
338
+ <NumpyExtensionArray>
339
+ ['1', 1]
340
+ Length: 2, dtype: object
341
+ >>> pd.array(['1', 1], dtype="string")
342
+ <StringArray>
343
+ ['1', '1']
344
+ Length: 2, dtype: string
345
+
346
+ However, instantiating StringArrays directly with non-strings will raise an error.
347
+
348
+ For comparison methods, `StringArray` returns a :class:`pandas.BooleanArray`:
349
+
350
+ >>> pd.array(["a", None, "c"], dtype="string") == "a"
351
+ <BooleanArray>
352
+ [True, <NA>, False]
353
+ Length: 3, dtype: boolean
354
+ """
355
+
356
+ # undo the NumpyExtensionArray hack
357
+ _typ = "extension"
358
+
359
+ def __init__(self, values, copy: bool = False) -> None:
360
+ values = extract_array(values)
361
+
362
+ super().__init__(values, copy=copy)
363
+ if not isinstance(values, type(self)):
364
+ self._validate()
365
+ NDArrayBacked.__init__(self, self._ndarray, StringDtype(storage="python"))
366
+
367
+ def _validate(self):
368
+ """Validate that we only store NA or strings."""
369
+ if len(self._ndarray) and not lib.is_string_array(self._ndarray, skipna=True):
370
+ raise ValueError("StringArray requires a sequence of strings or pandas.NA")
371
+ if self._ndarray.dtype != "object":
372
+ raise ValueError(
373
+ "StringArray requires a sequence of strings or pandas.NA. Got "
374
+ f"'{self._ndarray.dtype}' dtype instead."
375
+ )
376
+ # Check to see if need to convert Na values to pd.NA
377
+ if self._ndarray.ndim > 2:
378
+ # Ravel if ndims > 2 b/c no cythonized version available
379
+ lib.convert_nans_to_NA(self._ndarray.ravel("K"))
380
+ else:
381
+ lib.convert_nans_to_NA(self._ndarray)
382
+
383
+ @classmethod
384
+ def _from_sequence(cls, scalars, *, dtype: Dtype | None = None, copy: bool = False):
385
+ if dtype and not (isinstance(dtype, str) and dtype == "string"):
386
+ dtype = pandas_dtype(dtype)
387
+ assert isinstance(dtype, StringDtype) and dtype.storage == "python"
388
+
389
+ from pandas.core.arrays.masked import BaseMaskedArray
390
+
391
+ if isinstance(scalars, BaseMaskedArray):
392
+ # avoid costly conversion to object dtype
393
+ na_values = scalars._mask
394
+ result = scalars._data
395
+ result = lib.ensure_string_array(result, copy=copy, convert_na_value=False)
396
+ result[na_values] = libmissing.NA
397
+
398
+ else:
399
+ if lib.is_pyarrow_array(scalars):
400
+ # pyarrow array; we cannot rely on the "to_numpy" check in
401
+ # ensure_string_array because calling scalars.to_numpy would set
402
+ # zero_copy_only to True which caused problems see GH#52076
403
+ scalars = np.array(scalars)
404
+ # convert non-na-likes to str, and nan-likes to StringDtype().na_value
405
+ result = lib.ensure_string_array(scalars, na_value=libmissing.NA, copy=copy)
406
+
407
+ # Manually creating new array avoids the validation step in the __init__, so is
408
+ # faster. Refactor need for validation?
409
+ new_string_array = cls.__new__(cls)
410
+ NDArrayBacked.__init__(new_string_array, result, StringDtype(storage="python"))
411
+
412
+ return new_string_array
413
+
414
+ @classmethod
415
+ def _from_sequence_of_strings(
416
+ cls, strings, *, dtype: Dtype | None = None, copy: bool = False
417
+ ):
418
+ return cls._from_sequence(strings, dtype=dtype, copy=copy)
419
+
420
+ @classmethod
421
+ def _empty(cls, shape, dtype) -> StringArray:
422
+ values = np.empty(shape, dtype=object)
423
+ values[:] = libmissing.NA
424
+ return cls(values).astype(dtype, copy=False)
425
+
426
+ def __arrow_array__(self, type=None):
427
+ """
428
+ Convert myself into a pyarrow Array.
429
+ """
430
+ import pyarrow as pa
431
+
432
+ if type is None:
433
+ type = pa.string()
434
+
435
+ values = self._ndarray.copy()
436
+ values[self.isna()] = None
437
+ return pa.array(values, type=type, from_pandas=True)
438
+
439
+ def _values_for_factorize(self):
440
+ arr = self._ndarray.copy()
441
+ mask = self.isna()
442
+ arr[mask] = None
443
+ return arr, None
444
+
445
+ def __setitem__(self, key, value) -> None:
446
+ value = extract_array(value, extract_numpy=True)
447
+ if isinstance(value, type(self)):
448
+ # extract_array doesn't extract NumpyExtensionArray subclasses
449
+ value = value._ndarray
450
+
451
+ key = check_array_indexer(self, key)
452
+ scalar_key = lib.is_scalar(key)
453
+ scalar_value = lib.is_scalar(value)
454
+ if scalar_key and not scalar_value:
455
+ raise ValueError("setting an array element with a sequence.")
456
+
457
+ # validate new items
458
+ if scalar_value:
459
+ if isna(value):
460
+ value = libmissing.NA
461
+ elif not isinstance(value, str):
462
+ raise TypeError(
463
+ f"Cannot set non-string value '{value}' into a StringArray."
464
+ )
465
+ else:
466
+ if not is_array_like(value):
467
+ value = np.asarray(value, dtype=object)
468
+ if len(value) and not lib.is_string_array(value, skipna=True):
469
+ raise TypeError("Must provide strings.")
470
+
471
+ mask = isna(value)
472
+ if mask.any():
473
+ value = value.copy()
474
+ value[isna(value)] = libmissing.NA
475
+
476
+ super().__setitem__(key, value)
477
+
478
+ def _putmask(self, mask: npt.NDArray[np.bool_], value) -> None:
479
+ # the super() method NDArrayBackedExtensionArray._putmask uses
480
+ # np.putmask which doesn't properly handle None/pd.NA, so using the
481
+ # base class implementation that uses __setitem__
482
+ ExtensionArray._putmask(self, mask, value)
483
+
484
+ def astype(self, dtype, copy: bool = True):
485
+ dtype = pandas_dtype(dtype)
486
+
487
+ if dtype == self.dtype:
488
+ if copy:
489
+ return self.copy()
490
+ return self
491
+
492
+ elif isinstance(dtype, IntegerDtype):
493
+ arr = self._ndarray.copy()
494
+ mask = self.isna()
495
+ arr[mask] = 0
496
+ values = arr.astype(dtype.numpy_dtype)
497
+ return IntegerArray(values, mask, copy=False)
498
+ elif isinstance(dtype, FloatingDtype):
499
+ arr = self.copy()
500
+ mask = self.isna()
501
+ arr[mask] = "0"
502
+ values = arr.astype(dtype.numpy_dtype)
503
+ return FloatingArray(values, mask, copy=False)
504
+ elif isinstance(dtype, ExtensionDtype):
505
+ # Skip the NumpyExtensionArray.astype method
506
+ return ExtensionArray.astype(self, dtype, copy)
507
+ elif np.issubdtype(dtype, np.floating):
508
+ arr = self._ndarray.copy()
509
+ mask = self.isna()
510
+ arr[mask] = 0
511
+ values = arr.astype(dtype)
512
+ values[mask] = np.nan
513
+ return values
514
+
515
+ return super().astype(dtype, copy)
516
+
517
+ def _reduce(
518
+ self, name: str, *, skipna: bool = True, axis: AxisInt | None = 0, **kwargs
519
+ ):
520
+ if name in ["min", "max"]:
521
+ return getattr(self, name)(skipna=skipna, axis=axis)
522
+
523
+ raise TypeError(f"Cannot perform reduction '{name}' with string dtype")
524
+
525
+ def min(self, axis=None, skipna: bool = True, **kwargs) -> Scalar:
526
+ nv.validate_min((), kwargs)
527
+ result = masked_reductions.min(
528
+ values=self.to_numpy(), mask=self.isna(), skipna=skipna
529
+ )
530
+ return self._wrap_reduction_result(axis, result)
531
+
532
+ def max(self, axis=None, skipna: bool = True, **kwargs) -> Scalar:
533
+ nv.validate_max((), kwargs)
534
+ result = masked_reductions.max(
535
+ values=self.to_numpy(), mask=self.isna(), skipna=skipna
536
+ )
537
+ return self._wrap_reduction_result(axis, result)
538
+
539
+ def value_counts(self, dropna: bool = True) -> Series:
540
+ from pandas.core.algorithms import value_counts_internal as value_counts
541
+
542
+ result = value_counts(self._ndarray, dropna=dropna).astype("Int64")
543
+ result.index = result.index.astype(self.dtype)
544
+ return result
545
+
546
+ def memory_usage(self, deep: bool = False) -> int:
547
+ result = self._ndarray.nbytes
548
+ if deep:
549
+ return result + lib.memory_usage_of_objects(self._ndarray)
550
+ return result
551
+
552
+ @doc(ExtensionArray.searchsorted)
553
+ def searchsorted(
554
+ self,
555
+ value: NumpyValueArrayLike | ExtensionArray,
556
+ side: Literal["left", "right"] = "left",
557
+ sorter: NumpySorter | None = None,
558
+ ) -> npt.NDArray[np.intp] | np.intp:
559
+ if self._hasna:
560
+ raise ValueError(
561
+ "searchsorted requires array to be sorted, which is impossible "
562
+ "with NAs present."
563
+ )
564
+ return super().searchsorted(value=value, side=side, sorter=sorter)
565
+
566
+ def _cmp_method(self, other, op):
567
+ from pandas.arrays import BooleanArray
568
+
569
+ if isinstance(other, StringArray):
570
+ other = other._ndarray
571
+
572
+ mask = isna(self) | isna(other)
573
+ valid = ~mask
574
+
575
+ if not lib.is_scalar(other):
576
+ if len(other) != len(self):
577
+ # prevent improper broadcasting when other is 2D
578
+ raise ValueError(
579
+ f"Lengths of operands do not match: {len(self)} != {len(other)}"
580
+ )
581
+
582
+ other = np.asarray(other)
583
+ other = other[valid]
584
+
585
+ if op.__name__ in ops.ARITHMETIC_BINOPS:
586
+ result = np.empty_like(self._ndarray, dtype="object")
587
+ result[mask] = libmissing.NA
588
+ result[valid] = op(self._ndarray[valid], other)
589
+ return StringArray(result)
590
+ else:
591
+ # logical
592
+ result = np.zeros(len(self._ndarray), dtype="bool")
593
+ result[valid] = op(self._ndarray[valid], other)
594
+ return BooleanArray(result, mask)
595
+
596
+ _arith_method = _cmp_method
597
+
598
+ # ------------------------------------------------------------------------
599
+ # String methods interface
600
+ # error: Incompatible types in assignment (expression has type "NAType",
601
+ # base class "NumpyExtensionArray" defined the type as "float")
602
+ _str_na_value = libmissing.NA # type: ignore[assignment]
603
+
604
+ def _str_map(
605
+ self, f, na_value=None, dtype: Dtype | None = None, convert: bool = True
606
+ ):
607
+ from pandas.arrays import BooleanArray
608
+
609
+ if dtype is None:
610
+ dtype = StringDtype(storage="python")
611
+ if na_value is None:
612
+ na_value = self.dtype.na_value
613
+
614
+ mask = isna(self)
615
+ arr = np.asarray(self)
616
+
617
+ if is_integer_dtype(dtype) or is_bool_dtype(dtype):
618
+ constructor: type[IntegerArray | BooleanArray]
619
+ if is_integer_dtype(dtype):
620
+ constructor = IntegerArray
621
+ else:
622
+ constructor = BooleanArray
623
+
624
+ na_value_is_na = isna(na_value)
625
+ if na_value_is_na:
626
+ na_value = 1
627
+ elif dtype == np.dtype("bool"):
628
+ na_value = bool(na_value)
629
+ result = lib.map_infer_mask(
630
+ arr,
631
+ f,
632
+ mask.view("uint8"),
633
+ convert=False,
634
+ na_value=na_value,
635
+ # error: Argument 1 to "dtype" has incompatible type
636
+ # "Union[ExtensionDtype, str, dtype[Any], Type[object]]"; expected
637
+ # "Type[object]"
638
+ dtype=np.dtype(dtype), # type: ignore[arg-type]
639
+ )
640
+
641
+ if not na_value_is_na:
642
+ mask[:] = False
643
+
644
+ return constructor(result, mask)
645
+
646
+ elif is_string_dtype(dtype) and not is_object_dtype(dtype):
647
+ # i.e. StringDtype
648
+ result = lib.map_infer_mask(
649
+ arr, f, mask.view("uint8"), convert=False, na_value=na_value
650
+ )
651
+ return StringArray(result)
652
+ else:
653
+ # This is when the result type is object. We reach this when
654
+ # -> We know the result type is truly object (e.g. .encode returns bytes
655
+ # or .findall returns a list).
656
+ # -> We don't know the result type. E.g. `.get` can return anything.
657
+ return lib.map_infer_mask(arr, f, mask.view("uint8"))
venv/lib/python3.10/site-packages/pandas/core/arrays/string_arrow.py ADDED
@@ -0,0 +1,715 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from __future__ import annotations
2
+
3
+ from functools import partial
4
+ import operator
5
+ import re
6
+ from typing import (
7
+ TYPE_CHECKING,
8
+ Callable,
9
+ Union,
10
+ )
11
+ import warnings
12
+
13
+ import numpy as np
14
+
15
+ from pandas._libs import (
16
+ lib,
17
+ missing as libmissing,
18
+ )
19
+ from pandas.compat import (
20
+ pa_version_under10p1,
21
+ pa_version_under13p0,
22
+ )
23
+ from pandas.util._exceptions import find_stack_level
24
+
25
+ from pandas.core.dtypes.common import (
26
+ is_bool_dtype,
27
+ is_integer_dtype,
28
+ is_object_dtype,
29
+ is_scalar,
30
+ is_string_dtype,
31
+ pandas_dtype,
32
+ )
33
+ from pandas.core.dtypes.missing import isna
34
+
35
+ from pandas.core.arrays._arrow_string_mixins import ArrowStringArrayMixin
36
+ from pandas.core.arrays.arrow import ArrowExtensionArray
37
+ from pandas.core.arrays.boolean import BooleanDtype
38
+ from pandas.core.arrays.integer import Int64Dtype
39
+ from pandas.core.arrays.numeric import NumericDtype
40
+ from pandas.core.arrays.string_ import (
41
+ BaseStringArray,
42
+ StringDtype,
43
+ )
44
+ from pandas.core.ops import invalid_comparison
45
+ from pandas.core.strings.object_array import ObjectStringArrayMixin
46
+
47
+ if not pa_version_under10p1:
48
+ import pyarrow as pa
49
+ import pyarrow.compute as pc
50
+
51
+ from pandas.core.arrays.arrow._arrow_utils import fallback_performancewarning
52
+
53
+
54
+ if TYPE_CHECKING:
55
+ from collections.abc import Sequence
56
+
57
+ from pandas._typing import (
58
+ ArrayLike,
59
+ AxisInt,
60
+ Dtype,
61
+ Scalar,
62
+ npt,
63
+ )
64
+
65
+ from pandas import Series
66
+
67
+
68
+ ArrowStringScalarOrNAT = Union[str, libmissing.NAType]
69
+
70
+
71
+ def _chk_pyarrow_available() -> None:
72
+ if pa_version_under10p1:
73
+ msg = "pyarrow>=10.0.1 is required for PyArrow backed ArrowExtensionArray."
74
+ raise ImportError(msg)
75
+
76
+
77
+ # TODO: Inherit directly from BaseStringArrayMethods. Currently we inherit from
78
+ # ObjectStringArrayMixin because we want to have the object-dtype based methods as
79
+ # fallback for the ones that pyarrow doesn't yet support
80
+
81
+
82
+ class ArrowStringArray(ObjectStringArrayMixin, ArrowExtensionArray, BaseStringArray):
83
+ """
84
+ Extension array for string data in a ``pyarrow.ChunkedArray``.
85
+
86
+ .. warning::
87
+
88
+ ArrowStringArray is considered experimental. The implementation and
89
+ parts of the API may change without warning.
90
+
91
+ Parameters
92
+ ----------
93
+ values : pyarrow.Array or pyarrow.ChunkedArray
94
+ The array of data.
95
+
96
+ Attributes
97
+ ----------
98
+ None
99
+
100
+ Methods
101
+ -------
102
+ None
103
+
104
+ See Also
105
+ --------
106
+ :func:`pandas.array`
107
+ The recommended function for creating a ArrowStringArray.
108
+ Series.str
109
+ The string methods are available on Series backed by
110
+ a ArrowStringArray.
111
+
112
+ Notes
113
+ -----
114
+ ArrowStringArray returns a BooleanArray for comparison methods.
115
+
116
+ Examples
117
+ --------
118
+ >>> pd.array(['This is', 'some text', None, 'data.'], dtype="string[pyarrow]")
119
+ <ArrowStringArray>
120
+ ['This is', 'some text', <NA>, 'data.']
121
+ Length: 4, dtype: string
122
+ """
123
+
124
+ # error: Incompatible types in assignment (expression has type "StringDtype",
125
+ # base class "ArrowExtensionArray" defined the type as "ArrowDtype")
126
+ _dtype: StringDtype # type: ignore[assignment]
127
+ _storage = "pyarrow"
128
+
129
+ def __init__(self, values) -> None:
130
+ _chk_pyarrow_available()
131
+ if isinstance(values, (pa.Array, pa.ChunkedArray)) and pa.types.is_string(
132
+ values.type
133
+ ):
134
+ values = pc.cast(values, pa.large_string())
135
+
136
+ super().__init__(values)
137
+ self._dtype = StringDtype(storage=self._storage)
138
+
139
+ if not pa.types.is_large_string(self._pa_array.type) and not (
140
+ pa.types.is_dictionary(self._pa_array.type)
141
+ and pa.types.is_large_string(self._pa_array.type.value_type)
142
+ ):
143
+ raise ValueError(
144
+ "ArrowStringArray requires a PyArrow (chunked) array of "
145
+ "large_string type"
146
+ )
147
+
148
+ @classmethod
149
+ def _box_pa_scalar(cls, value, pa_type: pa.DataType | None = None) -> pa.Scalar:
150
+ pa_scalar = super()._box_pa_scalar(value, pa_type)
151
+ if pa.types.is_string(pa_scalar.type) and pa_type is None:
152
+ pa_scalar = pc.cast(pa_scalar, pa.large_string())
153
+ return pa_scalar
154
+
155
+ @classmethod
156
+ def _box_pa_array(
157
+ cls, value, pa_type: pa.DataType | None = None, copy: bool = False
158
+ ) -> pa.Array | pa.ChunkedArray:
159
+ pa_array = super()._box_pa_array(value, pa_type)
160
+ if pa.types.is_string(pa_array.type) and pa_type is None:
161
+ pa_array = pc.cast(pa_array, pa.large_string())
162
+ return pa_array
163
+
164
+ def __len__(self) -> int:
165
+ """
166
+ Length of this array.
167
+
168
+ Returns
169
+ -------
170
+ length : int
171
+ """
172
+ return len(self._pa_array)
173
+
174
+ @classmethod
175
+ def _from_sequence(cls, scalars, *, dtype: Dtype | None = None, copy: bool = False):
176
+ from pandas.core.arrays.masked import BaseMaskedArray
177
+
178
+ _chk_pyarrow_available()
179
+
180
+ if dtype and not (isinstance(dtype, str) and dtype == "string"):
181
+ dtype = pandas_dtype(dtype)
182
+ assert isinstance(dtype, StringDtype) and dtype.storage in (
183
+ "pyarrow",
184
+ "pyarrow_numpy",
185
+ )
186
+
187
+ if isinstance(scalars, BaseMaskedArray):
188
+ # avoid costly conversion to object dtype in ensure_string_array and
189
+ # numerical issues with Float32Dtype
190
+ na_values = scalars._mask
191
+ result = scalars._data
192
+ result = lib.ensure_string_array(result, copy=copy, convert_na_value=False)
193
+ return cls(pa.array(result, mask=na_values, type=pa.string()))
194
+ elif isinstance(scalars, (pa.Array, pa.ChunkedArray)):
195
+ return cls(pc.cast(scalars, pa.string()))
196
+
197
+ # convert non-na-likes to str
198
+ result = lib.ensure_string_array(scalars, copy=copy)
199
+ return cls(pa.array(result, type=pa.string(), from_pandas=True))
200
+
201
+ @classmethod
202
+ def _from_sequence_of_strings(
203
+ cls, strings, dtype: Dtype | None = None, copy: bool = False
204
+ ):
205
+ return cls._from_sequence(strings, dtype=dtype, copy=copy)
206
+
207
+ @property
208
+ def dtype(self) -> StringDtype: # type: ignore[override]
209
+ """
210
+ An instance of 'string[pyarrow]'.
211
+ """
212
+ return self._dtype
213
+
214
+ def insert(self, loc: int, item) -> ArrowStringArray:
215
+ if not isinstance(item, str) and item is not libmissing.NA:
216
+ raise TypeError("Scalar must be NA or str")
217
+ return super().insert(loc, item)
218
+
219
+ @classmethod
220
+ def _result_converter(cls, values, na=None):
221
+ return BooleanDtype().__from_arrow__(values)
222
+
223
+ def _maybe_convert_setitem_value(self, value):
224
+ """Maybe convert value to be pyarrow compatible."""
225
+ if is_scalar(value):
226
+ if isna(value):
227
+ value = None
228
+ elif not isinstance(value, str):
229
+ raise TypeError("Scalar must be NA or str")
230
+ else:
231
+ value = np.array(value, dtype=object, copy=True)
232
+ value[isna(value)] = None
233
+ for v in value:
234
+ if not (v is None or isinstance(v, str)):
235
+ raise TypeError("Scalar must be NA or str")
236
+ return super()._maybe_convert_setitem_value(value)
237
+
238
+ def isin(self, values: ArrayLike) -> npt.NDArray[np.bool_]:
239
+ value_set = [
240
+ pa_scalar.as_py()
241
+ for pa_scalar in [pa.scalar(value, from_pandas=True) for value in values]
242
+ if pa_scalar.type in (pa.string(), pa.null())
243
+ ]
244
+
245
+ # short-circuit to return all False array.
246
+ if not len(value_set):
247
+ return np.zeros(len(self), dtype=bool)
248
+
249
+ result = pc.is_in(
250
+ self._pa_array, value_set=pa.array(value_set, type=self._pa_array.type)
251
+ )
252
+ # pyarrow 2.0.0 returned nulls, so we explicily specify dtype to convert nulls
253
+ # to False
254
+ return np.array(result, dtype=np.bool_)
255
+
256
+ def astype(self, dtype, copy: bool = True):
257
+ dtype = pandas_dtype(dtype)
258
+
259
+ if dtype == self.dtype:
260
+ if copy:
261
+ return self.copy()
262
+ return self
263
+ elif isinstance(dtype, NumericDtype):
264
+ data = self._pa_array.cast(pa.from_numpy_dtype(dtype.numpy_dtype))
265
+ return dtype.__from_arrow__(data)
266
+ elif isinstance(dtype, np.dtype) and np.issubdtype(dtype, np.floating):
267
+ return self.to_numpy(dtype=dtype, na_value=np.nan)
268
+
269
+ return super().astype(dtype, copy=copy)
270
+
271
+ @property
272
+ def _data(self):
273
+ # dask accesses ._data directlys
274
+ warnings.warn(
275
+ f"{type(self).__name__}._data is a deprecated and will be removed "
276
+ "in a future version, use ._pa_array instead",
277
+ FutureWarning,
278
+ stacklevel=find_stack_level(),
279
+ )
280
+ return self._pa_array
281
+
282
+ # ------------------------------------------------------------------------
283
+ # String methods interface
284
+
285
+ # error: Incompatible types in assignment (expression has type "NAType",
286
+ # base class "ObjectStringArrayMixin" defined the type as "float")
287
+ _str_na_value = libmissing.NA # type: ignore[assignment]
288
+
289
+ def _str_map(
290
+ self, f, na_value=None, dtype: Dtype | None = None, convert: bool = True
291
+ ):
292
+ # TODO: de-duplicate with StringArray method. This method is moreless copy and
293
+ # paste.
294
+
295
+ from pandas.arrays import (
296
+ BooleanArray,
297
+ IntegerArray,
298
+ )
299
+
300
+ if dtype is None:
301
+ dtype = self.dtype
302
+ if na_value is None:
303
+ na_value = self.dtype.na_value
304
+
305
+ mask = isna(self)
306
+ arr = np.asarray(self)
307
+
308
+ if is_integer_dtype(dtype) or is_bool_dtype(dtype):
309
+ constructor: type[IntegerArray | BooleanArray]
310
+ if is_integer_dtype(dtype):
311
+ constructor = IntegerArray
312
+ else:
313
+ constructor = BooleanArray
314
+
315
+ na_value_is_na = isna(na_value)
316
+ if na_value_is_na:
317
+ na_value = 1
318
+ result = lib.map_infer_mask(
319
+ arr,
320
+ f,
321
+ mask.view("uint8"),
322
+ convert=False,
323
+ na_value=na_value,
324
+ # error: Argument 1 to "dtype" has incompatible type
325
+ # "Union[ExtensionDtype, str, dtype[Any], Type[object]]"; expected
326
+ # "Type[object]"
327
+ dtype=np.dtype(dtype), # type: ignore[arg-type]
328
+ )
329
+
330
+ if not na_value_is_na:
331
+ mask[:] = False
332
+
333
+ return constructor(result, mask)
334
+
335
+ elif is_string_dtype(dtype) and not is_object_dtype(dtype):
336
+ # i.e. StringDtype
337
+ result = lib.map_infer_mask(
338
+ arr, f, mask.view("uint8"), convert=False, na_value=na_value
339
+ )
340
+ result = pa.array(result, mask=mask, type=pa.string(), from_pandas=True)
341
+ return type(self)(result)
342
+ else:
343
+ # This is when the result type is object. We reach this when
344
+ # -> We know the result type is truly object (e.g. .encode returns bytes
345
+ # or .findall returns a list).
346
+ # -> We don't know the result type. E.g. `.get` can return anything.
347
+ return lib.map_infer_mask(arr, f, mask.view("uint8"))
348
+
349
+ def _str_contains(
350
+ self, pat, case: bool = True, flags: int = 0, na=np.nan, regex: bool = True
351
+ ):
352
+ if flags:
353
+ fallback_performancewarning()
354
+ return super()._str_contains(pat, case, flags, na, regex)
355
+
356
+ if regex:
357
+ result = pc.match_substring_regex(self._pa_array, pat, ignore_case=not case)
358
+ else:
359
+ result = pc.match_substring(self._pa_array, pat, ignore_case=not case)
360
+ result = self._result_converter(result, na=na)
361
+ if not isna(na):
362
+ result[isna(result)] = bool(na)
363
+ return result
364
+
365
+ def _str_startswith(self, pat: str | tuple[str, ...], na: Scalar | None = None):
366
+ if isinstance(pat, str):
367
+ result = pc.starts_with(self._pa_array, pattern=pat)
368
+ else:
369
+ if len(pat) == 0:
370
+ # mimic existing behaviour of string extension array
371
+ # and python string method
372
+ result = pa.array(
373
+ np.zeros(len(self._pa_array), dtype=bool), mask=isna(self._pa_array)
374
+ )
375
+ else:
376
+ result = pc.starts_with(self._pa_array, pattern=pat[0])
377
+
378
+ for p in pat[1:]:
379
+ result = pc.or_(result, pc.starts_with(self._pa_array, pattern=p))
380
+ if not isna(na):
381
+ result = result.fill_null(na)
382
+ return self._result_converter(result)
383
+
384
+ def _str_endswith(self, pat: str | tuple[str, ...], na: Scalar | None = None):
385
+ if isinstance(pat, str):
386
+ result = pc.ends_with(self._pa_array, pattern=pat)
387
+ else:
388
+ if len(pat) == 0:
389
+ # mimic existing behaviour of string extension array
390
+ # and python string method
391
+ result = pa.array(
392
+ np.zeros(len(self._pa_array), dtype=bool), mask=isna(self._pa_array)
393
+ )
394
+ else:
395
+ result = pc.ends_with(self._pa_array, pattern=pat[0])
396
+
397
+ for p in pat[1:]:
398
+ result = pc.or_(result, pc.ends_with(self._pa_array, pattern=p))
399
+ if not isna(na):
400
+ result = result.fill_null(na)
401
+ return self._result_converter(result)
402
+
403
+ def _str_replace(
404
+ self,
405
+ pat: str | re.Pattern,
406
+ repl: str | Callable,
407
+ n: int = -1,
408
+ case: bool = True,
409
+ flags: int = 0,
410
+ regex: bool = True,
411
+ ):
412
+ if isinstance(pat, re.Pattern) or callable(repl) or not case or flags:
413
+ fallback_performancewarning()
414
+ return super()._str_replace(pat, repl, n, case, flags, regex)
415
+
416
+ func = pc.replace_substring_regex if regex else pc.replace_substring
417
+ result = func(self._pa_array, pattern=pat, replacement=repl, max_replacements=n)
418
+ return type(self)(result)
419
+
420
+ def _str_repeat(self, repeats: int | Sequence[int]):
421
+ if not isinstance(repeats, int):
422
+ return super()._str_repeat(repeats)
423
+ else:
424
+ return type(self)(pc.binary_repeat(self._pa_array, repeats))
425
+
426
+ def _str_match(
427
+ self, pat: str, case: bool = True, flags: int = 0, na: Scalar | None = None
428
+ ):
429
+ if not pat.startswith("^"):
430
+ pat = f"^{pat}"
431
+ return self._str_contains(pat, case, flags, na, regex=True)
432
+
433
+ def _str_fullmatch(
434
+ self, pat, case: bool = True, flags: int = 0, na: Scalar | None = None
435
+ ):
436
+ if not pat.endswith("$") or pat.endswith("\\$"):
437
+ pat = f"{pat}$"
438
+ return self._str_match(pat, case, flags, na)
439
+
440
+ def _str_slice(
441
+ self, start: int | None = None, stop: int | None = None, step: int | None = None
442
+ ):
443
+ if stop is None:
444
+ return super()._str_slice(start, stop, step)
445
+ if start is None:
446
+ start = 0
447
+ if step is None:
448
+ step = 1
449
+ return type(self)(
450
+ pc.utf8_slice_codeunits(self._pa_array, start=start, stop=stop, step=step)
451
+ )
452
+
453
+ def _str_isalnum(self):
454
+ result = pc.utf8_is_alnum(self._pa_array)
455
+ return self._result_converter(result)
456
+
457
+ def _str_isalpha(self):
458
+ result = pc.utf8_is_alpha(self._pa_array)
459
+ return self._result_converter(result)
460
+
461
+ def _str_isdecimal(self):
462
+ result = pc.utf8_is_decimal(self._pa_array)
463
+ return self._result_converter(result)
464
+
465
+ def _str_isdigit(self):
466
+ result = pc.utf8_is_digit(self._pa_array)
467
+ return self._result_converter(result)
468
+
469
+ def _str_islower(self):
470
+ result = pc.utf8_is_lower(self._pa_array)
471
+ return self._result_converter(result)
472
+
473
+ def _str_isnumeric(self):
474
+ result = pc.utf8_is_numeric(self._pa_array)
475
+ return self._result_converter(result)
476
+
477
+ def _str_isspace(self):
478
+ result = pc.utf8_is_space(self._pa_array)
479
+ return self._result_converter(result)
480
+
481
+ def _str_istitle(self):
482
+ result = pc.utf8_is_title(self._pa_array)
483
+ return self._result_converter(result)
484
+
485
+ def _str_isupper(self):
486
+ result = pc.utf8_is_upper(self._pa_array)
487
+ return self._result_converter(result)
488
+
489
+ def _str_len(self):
490
+ result = pc.utf8_length(self._pa_array)
491
+ return self._convert_int_dtype(result)
492
+
493
+ def _str_lower(self):
494
+ return type(self)(pc.utf8_lower(self._pa_array))
495
+
496
+ def _str_upper(self):
497
+ return type(self)(pc.utf8_upper(self._pa_array))
498
+
499
+ def _str_strip(self, to_strip=None):
500
+ if to_strip is None:
501
+ result = pc.utf8_trim_whitespace(self._pa_array)
502
+ else:
503
+ result = pc.utf8_trim(self._pa_array, characters=to_strip)
504
+ return type(self)(result)
505
+
506
+ def _str_lstrip(self, to_strip=None):
507
+ if to_strip is None:
508
+ result = pc.utf8_ltrim_whitespace(self._pa_array)
509
+ else:
510
+ result = pc.utf8_ltrim(self._pa_array, characters=to_strip)
511
+ return type(self)(result)
512
+
513
+ def _str_rstrip(self, to_strip=None):
514
+ if to_strip is None:
515
+ result = pc.utf8_rtrim_whitespace(self._pa_array)
516
+ else:
517
+ result = pc.utf8_rtrim(self._pa_array, characters=to_strip)
518
+ return type(self)(result)
519
+
520
+ def _str_removeprefix(self, prefix: str):
521
+ if not pa_version_under13p0:
522
+ starts_with = pc.starts_with(self._pa_array, pattern=prefix)
523
+ removed = pc.utf8_slice_codeunits(self._pa_array, len(prefix))
524
+ result = pc.if_else(starts_with, removed, self._pa_array)
525
+ return type(self)(result)
526
+ return super()._str_removeprefix(prefix)
527
+
528
+ def _str_removesuffix(self, suffix: str):
529
+ ends_with = pc.ends_with(self._pa_array, pattern=suffix)
530
+ removed = pc.utf8_slice_codeunits(self._pa_array, 0, stop=-len(suffix))
531
+ result = pc.if_else(ends_with, removed, self._pa_array)
532
+ return type(self)(result)
533
+
534
+ def _str_count(self, pat: str, flags: int = 0):
535
+ if flags:
536
+ return super()._str_count(pat, flags)
537
+ result = pc.count_substring_regex(self._pa_array, pat)
538
+ return self._convert_int_dtype(result)
539
+
540
+ def _str_find(self, sub: str, start: int = 0, end: int | None = None):
541
+ if start != 0 and end is not None:
542
+ slices = pc.utf8_slice_codeunits(self._pa_array, start, stop=end)
543
+ result = pc.find_substring(slices, sub)
544
+ not_found = pc.equal(result, -1)
545
+ offset_result = pc.add(result, end - start)
546
+ result = pc.if_else(not_found, result, offset_result)
547
+ elif start == 0 and end is None:
548
+ slices = self._pa_array
549
+ result = pc.find_substring(slices, sub)
550
+ else:
551
+ return super()._str_find(sub, start, end)
552
+ return self._convert_int_dtype(result)
553
+
554
+ def _str_get_dummies(self, sep: str = "|"):
555
+ dummies_pa, labels = ArrowExtensionArray(self._pa_array)._str_get_dummies(sep)
556
+ if len(labels) == 0:
557
+ return np.empty(shape=(0, 0), dtype=np.int64), labels
558
+ dummies = np.vstack(dummies_pa.to_numpy())
559
+ return dummies.astype(np.int64, copy=False), labels
560
+
561
+ def _convert_int_dtype(self, result):
562
+ return Int64Dtype().__from_arrow__(result)
563
+
564
+ def _reduce(
565
+ self, name: str, *, skipna: bool = True, keepdims: bool = False, **kwargs
566
+ ):
567
+ result = self._reduce_calc(name, skipna=skipna, keepdims=keepdims, **kwargs)
568
+ if name in ("argmin", "argmax") and isinstance(result, pa.Array):
569
+ return self._convert_int_dtype(result)
570
+ elif isinstance(result, pa.Array):
571
+ return type(self)(result)
572
+ else:
573
+ return result
574
+
575
+ def _rank(
576
+ self,
577
+ *,
578
+ axis: AxisInt = 0,
579
+ method: str = "average",
580
+ na_option: str = "keep",
581
+ ascending: bool = True,
582
+ pct: bool = False,
583
+ ):
584
+ """
585
+ See Series.rank.__doc__.
586
+ """
587
+ return self._convert_int_dtype(
588
+ self._rank_calc(
589
+ axis=axis,
590
+ method=method,
591
+ na_option=na_option,
592
+ ascending=ascending,
593
+ pct=pct,
594
+ )
595
+ )
596
+
597
+
598
+ class ArrowStringArrayNumpySemantics(ArrowStringArray):
599
+ _storage = "pyarrow_numpy"
600
+
601
+ @classmethod
602
+ def _result_converter(cls, values, na=None):
603
+ if not isna(na):
604
+ values = values.fill_null(bool(na))
605
+ return ArrowExtensionArray(values).to_numpy(na_value=np.nan)
606
+
607
+ def __getattribute__(self, item):
608
+ # ArrowStringArray and we both inherit from ArrowExtensionArray, which
609
+ # creates inheritance problems (Diamond inheritance)
610
+ if item in ArrowStringArrayMixin.__dict__ and item not in (
611
+ "_pa_array",
612
+ "__dict__",
613
+ ):
614
+ return partial(getattr(ArrowStringArrayMixin, item), self)
615
+ return super().__getattribute__(item)
616
+
617
+ def _str_map(
618
+ self, f, na_value=None, dtype: Dtype | None = None, convert: bool = True
619
+ ):
620
+ if dtype is None:
621
+ dtype = self.dtype
622
+ if na_value is None:
623
+ na_value = self.dtype.na_value
624
+
625
+ mask = isna(self)
626
+ arr = np.asarray(self)
627
+
628
+ if is_integer_dtype(dtype) or is_bool_dtype(dtype):
629
+ if is_integer_dtype(dtype):
630
+ na_value = np.nan
631
+ else:
632
+ na_value = False
633
+ try:
634
+ result = lib.map_infer_mask(
635
+ arr,
636
+ f,
637
+ mask.view("uint8"),
638
+ convert=False,
639
+ na_value=na_value,
640
+ dtype=np.dtype(dtype), # type: ignore[arg-type]
641
+ )
642
+ return result
643
+
644
+ except ValueError:
645
+ result = lib.map_infer_mask(
646
+ arr,
647
+ f,
648
+ mask.view("uint8"),
649
+ convert=False,
650
+ na_value=na_value,
651
+ )
652
+ if convert and result.dtype == object:
653
+ result = lib.maybe_convert_objects(result)
654
+ return result
655
+
656
+ elif is_string_dtype(dtype) and not is_object_dtype(dtype):
657
+ # i.e. StringDtype
658
+ result = lib.map_infer_mask(
659
+ arr, f, mask.view("uint8"), convert=False, na_value=na_value
660
+ )
661
+ result = pa.array(result, mask=mask, type=pa.string(), from_pandas=True)
662
+ return type(self)(result)
663
+ else:
664
+ # This is when the result type is object. We reach this when
665
+ # -> We know the result type is truly object (e.g. .encode returns bytes
666
+ # or .findall returns a list).
667
+ # -> We don't know the result type. E.g. `.get` can return anything.
668
+ return lib.map_infer_mask(arr, f, mask.view("uint8"))
669
+
670
+ def _convert_int_dtype(self, result):
671
+ if isinstance(result, pa.Array):
672
+ result = result.to_numpy(zero_copy_only=False)
673
+ else:
674
+ result = result.to_numpy()
675
+ if result.dtype == np.int32:
676
+ result = result.astype(np.int64)
677
+ return result
678
+
679
+ def _cmp_method(self, other, op):
680
+ try:
681
+ result = super()._cmp_method(other, op)
682
+ except pa.ArrowNotImplementedError:
683
+ return invalid_comparison(self, other, op)
684
+ if op == operator.ne:
685
+ return result.to_numpy(np.bool_, na_value=True)
686
+ else:
687
+ return result.to_numpy(np.bool_, na_value=False)
688
+
689
+ def value_counts(self, dropna: bool = True) -> Series:
690
+ from pandas import Series
691
+
692
+ result = super().value_counts(dropna)
693
+ return Series(
694
+ result._values.to_numpy(), index=result.index, name=result.name, copy=False
695
+ )
696
+
697
+ def _reduce(
698
+ self, name: str, *, skipna: bool = True, keepdims: bool = False, **kwargs
699
+ ):
700
+ if name in ["any", "all"]:
701
+ if not skipna and name == "all":
702
+ nas = pc.invert(pc.is_null(self._pa_array))
703
+ arr = pc.and_kleene(nas, pc.not_equal(self._pa_array, ""))
704
+ else:
705
+ arr = pc.not_equal(self._pa_array, "")
706
+ return ArrowExtensionArray(arr)._reduce(
707
+ name, skipna=skipna, keepdims=keepdims, **kwargs
708
+ )
709
+ else:
710
+ return super()._reduce(name, skipna=skipna, keepdims=keepdims, **kwargs)
711
+
712
+ def insert(self, loc: int, item) -> ArrowStringArrayNumpySemantics:
713
+ if item is np.nan:
714
+ item = libmissing.NA
715
+ return super().insert(loc, item) # type: ignore[return-value]
venv/lib/python3.10/site-packages/pandas/core/arrays/timedeltas.py ADDED
@@ -0,0 +1,1177 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from __future__ import annotations
2
+
3
+ from datetime import timedelta
4
+ import operator
5
+ from typing import (
6
+ TYPE_CHECKING,
7
+ cast,
8
+ )
9
+
10
+ import numpy as np
11
+
12
+ from pandas._libs import (
13
+ lib,
14
+ tslibs,
15
+ )
16
+ from pandas._libs.tslibs import (
17
+ NaT,
18
+ NaTType,
19
+ Tick,
20
+ Timedelta,
21
+ astype_overflowsafe,
22
+ get_supported_dtype,
23
+ iNaT,
24
+ is_supported_dtype,
25
+ periods_per_second,
26
+ )
27
+ from pandas._libs.tslibs.conversion import cast_from_unit_vectorized
28
+ from pandas._libs.tslibs.fields import (
29
+ get_timedelta_days,
30
+ get_timedelta_field,
31
+ )
32
+ from pandas._libs.tslibs.timedeltas import (
33
+ array_to_timedelta64,
34
+ floordiv_object_array,
35
+ ints_to_pytimedelta,
36
+ parse_timedelta_unit,
37
+ truediv_object_array,
38
+ )
39
+ from pandas.compat.numpy import function as nv
40
+ from pandas.util._validators import validate_endpoints
41
+
42
+ from pandas.core.dtypes.common import (
43
+ TD64NS_DTYPE,
44
+ is_float_dtype,
45
+ is_integer_dtype,
46
+ is_object_dtype,
47
+ is_scalar,
48
+ is_string_dtype,
49
+ pandas_dtype,
50
+ )
51
+ from pandas.core.dtypes.dtypes import ExtensionDtype
52
+ from pandas.core.dtypes.missing import isna
53
+
54
+ from pandas.core import (
55
+ nanops,
56
+ roperator,
57
+ )
58
+ from pandas.core.array_algos import datetimelike_accumulations
59
+ from pandas.core.arrays import datetimelike as dtl
60
+ from pandas.core.arrays._ranges import generate_regular_range
61
+ import pandas.core.common as com
62
+ from pandas.core.ops.common import unpack_zerodim_and_defer
63
+
64
+ if TYPE_CHECKING:
65
+ from collections.abc import Iterator
66
+
67
+ from pandas._typing import (
68
+ AxisInt,
69
+ DateTimeErrorChoices,
70
+ DtypeObj,
71
+ NpDtype,
72
+ Self,
73
+ npt,
74
+ )
75
+
76
+ from pandas import DataFrame
77
+
78
+ import textwrap
79
+
80
+
81
+ def _field_accessor(name: str, alias: str, docstring: str):
82
+ def f(self) -> np.ndarray:
83
+ values = self.asi8
84
+ if alias == "days":
85
+ result = get_timedelta_days(values, reso=self._creso)
86
+ else:
87
+ # error: Incompatible types in assignment (
88
+ # expression has type "ndarray[Any, dtype[signedinteger[_32Bit]]]",
89
+ # variable has type "ndarray[Any, dtype[signedinteger[_64Bit]]]
90
+ result = get_timedelta_field(values, alias, reso=self._creso) # type: ignore[assignment]
91
+ if self._hasna:
92
+ result = self._maybe_mask_results(
93
+ result, fill_value=None, convert="float64"
94
+ )
95
+
96
+ return result
97
+
98
+ f.__name__ = name
99
+ f.__doc__ = f"\n{docstring}\n"
100
+ return property(f)
101
+
102
+
103
+ class TimedeltaArray(dtl.TimelikeOps):
104
+ """
105
+ Pandas ExtensionArray for timedelta data.
106
+
107
+ .. warning::
108
+
109
+ TimedeltaArray is currently experimental, and its API may change
110
+ without warning. In particular, :attr:`TimedeltaArray.dtype` is
111
+ expected to change to be an instance of an ``ExtensionDtype``
112
+ subclass.
113
+
114
+ Parameters
115
+ ----------
116
+ values : array-like
117
+ The timedelta data.
118
+
119
+ dtype : numpy.dtype
120
+ Currently, only ``numpy.dtype("timedelta64[ns]")`` is accepted.
121
+ freq : Offset, optional
122
+ copy : bool, default False
123
+ Whether to copy the underlying array of data.
124
+
125
+ Attributes
126
+ ----------
127
+ None
128
+
129
+ Methods
130
+ -------
131
+ None
132
+
133
+ Examples
134
+ --------
135
+ >>> pd.arrays.TimedeltaArray._from_sequence(pd.TimedeltaIndex(['1h', '2h']))
136
+ <TimedeltaArray>
137
+ ['0 days 01:00:00', '0 days 02:00:00']
138
+ Length: 2, dtype: timedelta64[ns]
139
+ """
140
+
141
+ _typ = "timedeltaarray"
142
+ _internal_fill_value = np.timedelta64("NaT", "ns")
143
+ _recognized_scalars = (timedelta, np.timedelta64, Tick)
144
+ _is_recognized_dtype = lambda x: lib.is_np_dtype(x, "m")
145
+ _infer_matches = ("timedelta", "timedelta64")
146
+
147
+ @property
148
+ def _scalar_type(self) -> type[Timedelta]:
149
+ return Timedelta
150
+
151
+ __array_priority__ = 1000
152
+ # define my properties & methods for delegation
153
+ _other_ops: list[str] = []
154
+ _bool_ops: list[str] = []
155
+ _object_ops: list[str] = ["freq"]
156
+ _field_ops: list[str] = ["days", "seconds", "microseconds", "nanoseconds"]
157
+ _datetimelike_ops: list[str] = _field_ops + _object_ops + _bool_ops + ["unit"]
158
+ _datetimelike_methods: list[str] = [
159
+ "to_pytimedelta",
160
+ "total_seconds",
161
+ "round",
162
+ "floor",
163
+ "ceil",
164
+ "as_unit",
165
+ ]
166
+
167
+ # Note: ndim must be defined to ensure NaT.__richcmp__(TimedeltaArray)
168
+ # operates pointwise.
169
+
170
+ def _box_func(self, x: np.timedelta64) -> Timedelta | NaTType:
171
+ y = x.view("i8")
172
+ if y == NaT._value:
173
+ return NaT
174
+ return Timedelta._from_value_and_reso(y, reso=self._creso)
175
+
176
+ @property
177
+ # error: Return type "dtype" of "dtype" incompatible with return type
178
+ # "ExtensionDtype" in supertype "ExtensionArray"
179
+ def dtype(self) -> np.dtype[np.timedelta64]: # type: ignore[override]
180
+ """
181
+ The dtype for the TimedeltaArray.
182
+
183
+ .. warning::
184
+
185
+ A future version of pandas will change dtype to be an instance
186
+ of a :class:`pandas.api.extensions.ExtensionDtype` subclass,
187
+ not a ``numpy.dtype``.
188
+
189
+ Returns
190
+ -------
191
+ numpy.dtype
192
+ """
193
+ return self._ndarray.dtype
194
+
195
+ # ----------------------------------------------------------------
196
+ # Constructors
197
+
198
+ _freq = None
199
+ _default_dtype = TD64NS_DTYPE # used in TimeLikeOps.__init__
200
+
201
+ @classmethod
202
+ def _validate_dtype(cls, values, dtype):
203
+ # used in TimeLikeOps.__init__
204
+ dtype = _validate_td64_dtype(dtype)
205
+ _validate_td64_dtype(values.dtype)
206
+ if dtype != values.dtype:
207
+ raise ValueError("Values resolution does not match dtype.")
208
+ return dtype
209
+
210
+ # error: Signature of "_simple_new" incompatible with supertype "NDArrayBacked"
211
+ @classmethod
212
+ def _simple_new( # type: ignore[override]
213
+ cls,
214
+ values: npt.NDArray[np.timedelta64],
215
+ freq: Tick | None = None,
216
+ dtype: np.dtype[np.timedelta64] = TD64NS_DTYPE,
217
+ ) -> Self:
218
+ # Require td64 dtype, not unit-less, matching values.dtype
219
+ assert lib.is_np_dtype(dtype, "m")
220
+ assert not tslibs.is_unitless(dtype)
221
+ assert isinstance(values, np.ndarray), type(values)
222
+ assert dtype == values.dtype
223
+ assert freq is None or isinstance(freq, Tick)
224
+
225
+ result = super()._simple_new(values=values, dtype=dtype)
226
+ result._freq = freq
227
+ return result
228
+
229
+ @classmethod
230
+ def _from_sequence(cls, data, *, dtype=None, copy: bool = False) -> Self:
231
+ if dtype:
232
+ dtype = _validate_td64_dtype(dtype)
233
+
234
+ data, freq = sequence_to_td64ns(data, copy=copy, unit=None)
235
+
236
+ if dtype is not None:
237
+ data = astype_overflowsafe(data, dtype=dtype, copy=False)
238
+
239
+ return cls._simple_new(data, dtype=data.dtype, freq=freq)
240
+
241
+ @classmethod
242
+ def _from_sequence_not_strict(
243
+ cls,
244
+ data,
245
+ *,
246
+ dtype=None,
247
+ copy: bool = False,
248
+ freq=lib.no_default,
249
+ unit=None,
250
+ ) -> Self:
251
+ """
252
+ _from_sequence_not_strict but without responsibility for finding the
253
+ result's `freq`.
254
+ """
255
+ if dtype:
256
+ dtype = _validate_td64_dtype(dtype)
257
+
258
+ assert unit not in ["Y", "y", "M"] # caller is responsible for checking
259
+
260
+ data, inferred_freq = sequence_to_td64ns(data, copy=copy, unit=unit)
261
+
262
+ if dtype is not None:
263
+ data = astype_overflowsafe(data, dtype=dtype, copy=False)
264
+
265
+ result = cls._simple_new(data, dtype=data.dtype, freq=inferred_freq)
266
+
267
+ result._maybe_pin_freq(freq, {})
268
+ return result
269
+
270
+ @classmethod
271
+ def _generate_range(
272
+ cls, start, end, periods, freq, closed=None, *, unit: str | None = None
273
+ ) -> Self:
274
+ periods = dtl.validate_periods(periods)
275
+ if freq is None and any(x is None for x in [periods, start, end]):
276
+ raise ValueError("Must provide freq argument if no data is supplied")
277
+
278
+ if com.count_not_none(start, end, periods, freq) != 3:
279
+ raise ValueError(
280
+ "Of the four parameters: start, end, periods, "
281
+ "and freq, exactly three must be specified"
282
+ )
283
+
284
+ if start is not None:
285
+ start = Timedelta(start).as_unit("ns")
286
+
287
+ if end is not None:
288
+ end = Timedelta(end).as_unit("ns")
289
+
290
+ if unit is not None:
291
+ if unit not in ["s", "ms", "us", "ns"]:
292
+ raise ValueError("'unit' must be one of 's', 'ms', 'us', 'ns'")
293
+ else:
294
+ unit = "ns"
295
+
296
+ if start is not None and unit is not None:
297
+ start = start.as_unit(unit, round_ok=False)
298
+ if end is not None and unit is not None:
299
+ end = end.as_unit(unit, round_ok=False)
300
+
301
+ left_closed, right_closed = validate_endpoints(closed)
302
+
303
+ if freq is not None:
304
+ index = generate_regular_range(start, end, periods, freq, unit=unit)
305
+ else:
306
+ index = np.linspace(start._value, end._value, periods).astype("i8")
307
+
308
+ if not left_closed:
309
+ index = index[1:]
310
+ if not right_closed:
311
+ index = index[:-1]
312
+
313
+ td64values = index.view(f"m8[{unit}]")
314
+ return cls._simple_new(td64values, dtype=td64values.dtype, freq=freq)
315
+
316
+ # ----------------------------------------------------------------
317
+ # DatetimeLike Interface
318
+
319
+ def _unbox_scalar(self, value) -> np.timedelta64:
320
+ if not isinstance(value, self._scalar_type) and value is not NaT:
321
+ raise ValueError("'value' should be a Timedelta.")
322
+ self._check_compatible_with(value)
323
+ if value is NaT:
324
+ return np.timedelta64(value._value, self.unit)
325
+ else:
326
+ return value.as_unit(self.unit).asm8
327
+
328
+ def _scalar_from_string(self, value) -> Timedelta | NaTType:
329
+ return Timedelta(value)
330
+
331
+ def _check_compatible_with(self, other) -> None:
332
+ # we don't have anything to validate.
333
+ pass
334
+
335
+ # ----------------------------------------------------------------
336
+ # Array-Like / EA-Interface Methods
337
+
338
+ def astype(self, dtype, copy: bool = True):
339
+ # We handle
340
+ # --> timedelta64[ns]
341
+ # --> timedelta64
342
+ # DatetimeLikeArrayMixin super call handles other cases
343
+ dtype = pandas_dtype(dtype)
344
+
345
+ if lib.is_np_dtype(dtype, "m"):
346
+ if dtype == self.dtype:
347
+ if copy:
348
+ return self.copy()
349
+ return self
350
+
351
+ if is_supported_dtype(dtype):
352
+ # unit conversion e.g. timedelta64[s]
353
+ res_values = astype_overflowsafe(self._ndarray, dtype, copy=False)
354
+ return type(self)._simple_new(
355
+ res_values, dtype=res_values.dtype, freq=self.freq
356
+ )
357
+ else:
358
+ raise ValueError(
359
+ f"Cannot convert from {self.dtype} to {dtype}. "
360
+ "Supported resolutions are 's', 'ms', 'us', 'ns'"
361
+ )
362
+
363
+ return dtl.DatetimeLikeArrayMixin.astype(self, dtype, copy=copy)
364
+
365
+ def __iter__(self) -> Iterator:
366
+ if self.ndim > 1:
367
+ for i in range(len(self)):
368
+ yield self[i]
369
+ else:
370
+ # convert in chunks of 10k for efficiency
371
+ data = self._ndarray
372
+ length = len(self)
373
+ chunksize = 10000
374
+ chunks = (length // chunksize) + 1
375
+ for i in range(chunks):
376
+ start_i = i * chunksize
377
+ end_i = min((i + 1) * chunksize, length)
378
+ converted = ints_to_pytimedelta(data[start_i:end_i], box=True)
379
+ yield from converted
380
+
381
+ # ----------------------------------------------------------------
382
+ # Reductions
383
+
384
+ def sum(
385
+ self,
386
+ *,
387
+ axis: AxisInt | None = None,
388
+ dtype: NpDtype | None = None,
389
+ out=None,
390
+ keepdims: bool = False,
391
+ initial=None,
392
+ skipna: bool = True,
393
+ min_count: int = 0,
394
+ ):
395
+ nv.validate_sum(
396
+ (), {"dtype": dtype, "out": out, "keepdims": keepdims, "initial": initial}
397
+ )
398
+
399
+ result = nanops.nansum(
400
+ self._ndarray, axis=axis, skipna=skipna, min_count=min_count
401
+ )
402
+ return self._wrap_reduction_result(axis, result)
403
+
404
+ def std(
405
+ self,
406
+ *,
407
+ axis: AxisInt | None = None,
408
+ dtype: NpDtype | None = None,
409
+ out=None,
410
+ ddof: int = 1,
411
+ keepdims: bool = False,
412
+ skipna: bool = True,
413
+ ):
414
+ nv.validate_stat_ddof_func(
415
+ (), {"dtype": dtype, "out": out, "keepdims": keepdims}, fname="std"
416
+ )
417
+
418
+ result = nanops.nanstd(self._ndarray, axis=axis, skipna=skipna, ddof=ddof)
419
+ if axis is None or self.ndim == 1:
420
+ return self._box_func(result)
421
+ return self._from_backing_data(result)
422
+
423
+ # ----------------------------------------------------------------
424
+ # Accumulations
425
+
426
+ def _accumulate(self, name: str, *, skipna: bool = True, **kwargs):
427
+ if name == "cumsum":
428
+ op = getattr(datetimelike_accumulations, name)
429
+ result = op(self._ndarray.copy(), skipna=skipna, **kwargs)
430
+
431
+ return type(self)._simple_new(result, freq=None, dtype=self.dtype)
432
+ elif name == "cumprod":
433
+ raise TypeError("cumprod not supported for Timedelta.")
434
+
435
+ else:
436
+ return super()._accumulate(name, skipna=skipna, **kwargs)
437
+
438
+ # ----------------------------------------------------------------
439
+ # Rendering Methods
440
+
441
+ def _formatter(self, boxed: bool = False):
442
+ from pandas.io.formats.format import get_format_timedelta64
443
+
444
+ return get_format_timedelta64(self, box=True)
445
+
446
+ def _format_native_types(
447
+ self, *, na_rep: str | float = "NaT", date_format=None, **kwargs
448
+ ) -> npt.NDArray[np.object_]:
449
+ from pandas.io.formats.format import get_format_timedelta64
450
+
451
+ # Relies on TimeDelta._repr_base
452
+ formatter = get_format_timedelta64(self, na_rep)
453
+ # equiv: np.array([formatter(x) for x in self._ndarray])
454
+ # but independent of dimension
455
+ return np.frompyfunc(formatter, 1, 1)(self._ndarray)
456
+
457
+ # ----------------------------------------------------------------
458
+ # Arithmetic Methods
459
+
460
+ def _add_offset(self, other):
461
+ assert not isinstance(other, Tick)
462
+ raise TypeError(
463
+ f"cannot add the type {type(other).__name__} to a {type(self).__name__}"
464
+ )
465
+
466
+ @unpack_zerodim_and_defer("__mul__")
467
+ def __mul__(self, other) -> Self:
468
+ if is_scalar(other):
469
+ # numpy will accept float and int, raise TypeError for others
470
+ result = self._ndarray * other
471
+ freq = None
472
+ if self.freq is not None and not isna(other):
473
+ freq = self.freq * other
474
+ if freq.n == 0:
475
+ # GH#51575 Better to have no freq than an incorrect one
476
+ freq = None
477
+ return type(self)._simple_new(result, dtype=result.dtype, freq=freq)
478
+
479
+ if not hasattr(other, "dtype"):
480
+ # list, tuple
481
+ other = np.array(other)
482
+ if len(other) != len(self) and not lib.is_np_dtype(other.dtype, "m"):
483
+ # Exclude timedelta64 here so we correctly raise TypeError
484
+ # for that instead of ValueError
485
+ raise ValueError("Cannot multiply with unequal lengths")
486
+
487
+ if is_object_dtype(other.dtype):
488
+ # this multiplication will succeed only if all elements of other
489
+ # are int or float scalars, so we will end up with
490
+ # timedelta64[ns]-dtyped result
491
+ arr = self._ndarray
492
+ result = [arr[n] * other[n] for n in range(len(self))]
493
+ result = np.array(result)
494
+ return type(self)._simple_new(result, dtype=result.dtype)
495
+
496
+ # numpy will accept float or int dtype, raise TypeError for others
497
+ result = self._ndarray * other
498
+ return type(self)._simple_new(result, dtype=result.dtype)
499
+
500
+ __rmul__ = __mul__
501
+
502
+ def _scalar_divlike_op(self, other, op):
503
+ """
504
+ Shared logic for __truediv__, __rtruediv__, __floordiv__, __rfloordiv__
505
+ with scalar 'other'.
506
+ """
507
+ if isinstance(other, self._recognized_scalars):
508
+ other = Timedelta(other)
509
+ # mypy assumes that __new__ returns an instance of the class
510
+ # github.com/python/mypy/issues/1020
511
+ if cast("Timedelta | NaTType", other) is NaT:
512
+ # specifically timedelta64-NaT
513
+ res = np.empty(self.shape, dtype=np.float64)
514
+ res.fill(np.nan)
515
+ return res
516
+
517
+ # otherwise, dispatch to Timedelta implementation
518
+ return op(self._ndarray, other)
519
+
520
+ else:
521
+ # caller is responsible for checking lib.is_scalar(other)
522
+ # assume other is numeric, otherwise numpy will raise
523
+
524
+ if op in [roperator.rtruediv, roperator.rfloordiv]:
525
+ raise TypeError(
526
+ f"Cannot divide {type(other).__name__} by {type(self).__name__}"
527
+ )
528
+
529
+ result = op(self._ndarray, other)
530
+ freq = None
531
+
532
+ if self.freq is not None:
533
+ # Note: freq gets division, not floor-division, even if op
534
+ # is floordiv.
535
+ freq = self.freq / other
536
+ if freq.nanos == 0 and self.freq.nanos != 0:
537
+ # e.g. if self.freq is Nano(1) then dividing by 2
538
+ # rounds down to zero
539
+ freq = None
540
+
541
+ return type(self)._simple_new(result, dtype=result.dtype, freq=freq)
542
+
543
+ def _cast_divlike_op(self, other):
544
+ if not hasattr(other, "dtype"):
545
+ # e.g. list, tuple
546
+ other = np.array(other)
547
+
548
+ if len(other) != len(self):
549
+ raise ValueError("Cannot divide vectors with unequal lengths")
550
+ return other
551
+
552
+ def _vector_divlike_op(self, other, op) -> np.ndarray | Self:
553
+ """
554
+ Shared logic for __truediv__, __floordiv__, and their reversed versions
555
+ with timedelta64-dtype ndarray other.
556
+ """
557
+ # Let numpy handle it
558
+ result = op(self._ndarray, np.asarray(other))
559
+
560
+ if (is_integer_dtype(other.dtype) or is_float_dtype(other.dtype)) and op in [
561
+ operator.truediv,
562
+ operator.floordiv,
563
+ ]:
564
+ return type(self)._simple_new(result, dtype=result.dtype)
565
+
566
+ if op in [operator.floordiv, roperator.rfloordiv]:
567
+ mask = self.isna() | isna(other)
568
+ if mask.any():
569
+ result = result.astype(np.float64)
570
+ np.putmask(result, mask, np.nan)
571
+
572
+ return result
573
+
574
+ @unpack_zerodim_and_defer("__truediv__")
575
+ def __truediv__(self, other):
576
+ # timedelta / X is well-defined for timedelta-like or numeric X
577
+ op = operator.truediv
578
+ if is_scalar(other):
579
+ return self._scalar_divlike_op(other, op)
580
+
581
+ other = self._cast_divlike_op(other)
582
+ if (
583
+ lib.is_np_dtype(other.dtype, "m")
584
+ or is_integer_dtype(other.dtype)
585
+ or is_float_dtype(other.dtype)
586
+ ):
587
+ return self._vector_divlike_op(other, op)
588
+
589
+ if is_object_dtype(other.dtype):
590
+ other = np.asarray(other)
591
+ if self.ndim > 1:
592
+ res_cols = [left / right for left, right in zip(self, other)]
593
+ res_cols2 = [x.reshape(1, -1) for x in res_cols]
594
+ result = np.concatenate(res_cols2, axis=0)
595
+ else:
596
+ result = truediv_object_array(self._ndarray, other)
597
+
598
+ return result
599
+
600
+ else:
601
+ return NotImplemented
602
+
603
+ @unpack_zerodim_and_defer("__rtruediv__")
604
+ def __rtruediv__(self, other):
605
+ # X / timedelta is defined only for timedelta-like X
606
+ op = roperator.rtruediv
607
+ if is_scalar(other):
608
+ return self._scalar_divlike_op(other, op)
609
+
610
+ other = self._cast_divlike_op(other)
611
+ if lib.is_np_dtype(other.dtype, "m"):
612
+ return self._vector_divlike_op(other, op)
613
+
614
+ elif is_object_dtype(other.dtype):
615
+ # Note: unlike in __truediv__, we do not _need_ to do type
616
+ # inference on the result. It does not raise, a numeric array
617
+ # is returned. GH#23829
618
+ result_list = [other[n] / self[n] for n in range(len(self))]
619
+ return np.array(result_list)
620
+
621
+ else:
622
+ return NotImplemented
623
+
624
+ @unpack_zerodim_and_defer("__floordiv__")
625
+ def __floordiv__(self, other):
626
+ op = operator.floordiv
627
+ if is_scalar(other):
628
+ return self._scalar_divlike_op(other, op)
629
+
630
+ other = self._cast_divlike_op(other)
631
+ if (
632
+ lib.is_np_dtype(other.dtype, "m")
633
+ or is_integer_dtype(other.dtype)
634
+ or is_float_dtype(other.dtype)
635
+ ):
636
+ return self._vector_divlike_op(other, op)
637
+
638
+ elif is_object_dtype(other.dtype):
639
+ other = np.asarray(other)
640
+ if self.ndim > 1:
641
+ res_cols = [left // right for left, right in zip(self, other)]
642
+ res_cols2 = [x.reshape(1, -1) for x in res_cols]
643
+ result = np.concatenate(res_cols2, axis=0)
644
+ else:
645
+ result = floordiv_object_array(self._ndarray, other)
646
+
647
+ assert result.dtype == object
648
+ return result
649
+
650
+ else:
651
+ return NotImplemented
652
+
653
+ @unpack_zerodim_and_defer("__rfloordiv__")
654
+ def __rfloordiv__(self, other):
655
+ op = roperator.rfloordiv
656
+ if is_scalar(other):
657
+ return self._scalar_divlike_op(other, op)
658
+
659
+ other = self._cast_divlike_op(other)
660
+ if lib.is_np_dtype(other.dtype, "m"):
661
+ return self._vector_divlike_op(other, op)
662
+
663
+ elif is_object_dtype(other.dtype):
664
+ result_list = [other[n] // self[n] for n in range(len(self))]
665
+ result = np.array(result_list)
666
+ return result
667
+
668
+ else:
669
+ return NotImplemented
670
+
671
+ @unpack_zerodim_and_defer("__mod__")
672
+ def __mod__(self, other):
673
+ # Note: This is a naive implementation, can likely be optimized
674
+ if isinstance(other, self._recognized_scalars):
675
+ other = Timedelta(other)
676
+ return self - (self // other) * other
677
+
678
+ @unpack_zerodim_and_defer("__rmod__")
679
+ def __rmod__(self, other):
680
+ # Note: This is a naive implementation, can likely be optimized
681
+ if isinstance(other, self._recognized_scalars):
682
+ other = Timedelta(other)
683
+ return other - (other // self) * self
684
+
685
+ @unpack_zerodim_and_defer("__divmod__")
686
+ def __divmod__(self, other):
687
+ # Note: This is a naive implementation, can likely be optimized
688
+ if isinstance(other, self._recognized_scalars):
689
+ other = Timedelta(other)
690
+
691
+ res1 = self // other
692
+ res2 = self - res1 * other
693
+ return res1, res2
694
+
695
+ @unpack_zerodim_and_defer("__rdivmod__")
696
+ def __rdivmod__(self, other):
697
+ # Note: This is a naive implementation, can likely be optimized
698
+ if isinstance(other, self._recognized_scalars):
699
+ other = Timedelta(other)
700
+
701
+ res1 = other // self
702
+ res2 = other - res1 * self
703
+ return res1, res2
704
+
705
+ def __neg__(self) -> TimedeltaArray:
706
+ freq = None
707
+ if self.freq is not None:
708
+ freq = -self.freq
709
+ return type(self)._simple_new(-self._ndarray, dtype=self.dtype, freq=freq)
710
+
711
+ def __pos__(self) -> TimedeltaArray:
712
+ return type(self)._simple_new(
713
+ self._ndarray.copy(), dtype=self.dtype, freq=self.freq
714
+ )
715
+
716
+ def __abs__(self) -> TimedeltaArray:
717
+ # Note: freq is not preserved
718
+ return type(self)._simple_new(np.abs(self._ndarray), dtype=self.dtype)
719
+
720
+ # ----------------------------------------------------------------
721
+ # Conversion Methods - Vectorized analogues of Timedelta methods
722
+
723
+ def total_seconds(self) -> npt.NDArray[np.float64]:
724
+ """
725
+ Return total duration of each element expressed in seconds.
726
+
727
+ This method is available directly on TimedeltaArray, TimedeltaIndex
728
+ and on Series containing timedelta values under the ``.dt`` namespace.
729
+
730
+ Returns
731
+ -------
732
+ ndarray, Index or Series
733
+ When the calling object is a TimedeltaArray, the return type
734
+ is ndarray. When the calling object is a TimedeltaIndex,
735
+ the return type is an Index with a float64 dtype. When the calling object
736
+ is a Series, the return type is Series of type `float64` whose
737
+ index is the same as the original.
738
+
739
+ See Also
740
+ --------
741
+ datetime.timedelta.total_seconds : Standard library version
742
+ of this method.
743
+ TimedeltaIndex.components : Return a DataFrame with components of
744
+ each Timedelta.
745
+
746
+ Examples
747
+ --------
748
+ **Series**
749
+
750
+ >>> s = pd.Series(pd.to_timedelta(np.arange(5), unit='d'))
751
+ >>> s
752
+ 0 0 days
753
+ 1 1 days
754
+ 2 2 days
755
+ 3 3 days
756
+ 4 4 days
757
+ dtype: timedelta64[ns]
758
+
759
+ >>> s.dt.total_seconds()
760
+ 0 0.0
761
+ 1 86400.0
762
+ 2 172800.0
763
+ 3 259200.0
764
+ 4 345600.0
765
+ dtype: float64
766
+
767
+ **TimedeltaIndex**
768
+
769
+ >>> idx = pd.to_timedelta(np.arange(5), unit='d')
770
+ >>> idx
771
+ TimedeltaIndex(['0 days', '1 days', '2 days', '3 days', '4 days'],
772
+ dtype='timedelta64[ns]', freq=None)
773
+
774
+ >>> idx.total_seconds()
775
+ Index([0.0, 86400.0, 172800.0, 259200.0, 345600.0], dtype='float64')
776
+ """
777
+ pps = periods_per_second(self._creso)
778
+ return self._maybe_mask_results(self.asi8 / pps, fill_value=None)
779
+
780
+ def to_pytimedelta(self) -> npt.NDArray[np.object_]:
781
+ """
782
+ Return an ndarray of datetime.timedelta objects.
783
+
784
+ Returns
785
+ -------
786
+ numpy.ndarray
787
+
788
+ Examples
789
+ --------
790
+ >>> tdelta_idx = pd.to_timedelta([1, 2, 3], unit='D')
791
+ >>> tdelta_idx
792
+ TimedeltaIndex(['1 days', '2 days', '3 days'],
793
+ dtype='timedelta64[ns]', freq=None)
794
+ >>> tdelta_idx.to_pytimedelta()
795
+ array([datetime.timedelta(days=1), datetime.timedelta(days=2),
796
+ datetime.timedelta(days=3)], dtype=object)
797
+ """
798
+ return ints_to_pytimedelta(self._ndarray)
799
+
800
+ days_docstring = textwrap.dedent(
801
+ """Number of days for each element.
802
+
803
+ Examples
804
+ --------
805
+ For Series:
806
+
807
+ >>> ser = pd.Series(pd.to_timedelta([1, 2, 3], unit='d'))
808
+ >>> ser
809
+ 0 1 days
810
+ 1 2 days
811
+ 2 3 days
812
+ dtype: timedelta64[ns]
813
+ >>> ser.dt.days
814
+ 0 1
815
+ 1 2
816
+ 2 3
817
+ dtype: int64
818
+
819
+ For TimedeltaIndex:
820
+
821
+ >>> tdelta_idx = pd.to_timedelta(["0 days", "10 days", "20 days"])
822
+ >>> tdelta_idx
823
+ TimedeltaIndex(['0 days', '10 days', '20 days'],
824
+ dtype='timedelta64[ns]', freq=None)
825
+ >>> tdelta_idx.days
826
+ Index([0, 10, 20], dtype='int64')"""
827
+ )
828
+ days = _field_accessor("days", "days", days_docstring)
829
+
830
+ seconds_docstring = textwrap.dedent(
831
+ """Number of seconds (>= 0 and less than 1 day) for each element.
832
+
833
+ Examples
834
+ --------
835
+ For Series:
836
+
837
+ >>> ser = pd.Series(pd.to_timedelta([1, 2, 3], unit='s'))
838
+ >>> ser
839
+ 0 0 days 00:00:01
840
+ 1 0 days 00:00:02
841
+ 2 0 days 00:00:03
842
+ dtype: timedelta64[ns]
843
+ >>> ser.dt.seconds
844
+ 0 1
845
+ 1 2
846
+ 2 3
847
+ dtype: int32
848
+
849
+ For TimedeltaIndex:
850
+
851
+ >>> tdelta_idx = pd.to_timedelta([1, 2, 3], unit='s')
852
+ >>> tdelta_idx
853
+ TimedeltaIndex(['0 days 00:00:01', '0 days 00:00:02', '0 days 00:00:03'],
854
+ dtype='timedelta64[ns]', freq=None)
855
+ >>> tdelta_idx.seconds
856
+ Index([1, 2, 3], dtype='int32')"""
857
+ )
858
+ seconds = _field_accessor(
859
+ "seconds",
860
+ "seconds",
861
+ seconds_docstring,
862
+ )
863
+
864
+ microseconds_docstring = textwrap.dedent(
865
+ """Number of microseconds (>= 0 and less than 1 second) for each element.
866
+
867
+ Examples
868
+ --------
869
+ For Series:
870
+
871
+ >>> ser = pd.Series(pd.to_timedelta([1, 2, 3], unit='us'))
872
+ >>> ser
873
+ 0 0 days 00:00:00.000001
874
+ 1 0 days 00:00:00.000002
875
+ 2 0 days 00:00:00.000003
876
+ dtype: timedelta64[ns]
877
+ >>> ser.dt.microseconds
878
+ 0 1
879
+ 1 2
880
+ 2 3
881
+ dtype: int32
882
+
883
+ For TimedeltaIndex:
884
+
885
+ >>> tdelta_idx = pd.to_timedelta([1, 2, 3], unit='us')
886
+ >>> tdelta_idx
887
+ TimedeltaIndex(['0 days 00:00:00.000001', '0 days 00:00:00.000002',
888
+ '0 days 00:00:00.000003'],
889
+ dtype='timedelta64[ns]', freq=None)
890
+ >>> tdelta_idx.microseconds
891
+ Index([1, 2, 3], dtype='int32')"""
892
+ )
893
+ microseconds = _field_accessor(
894
+ "microseconds",
895
+ "microseconds",
896
+ microseconds_docstring,
897
+ )
898
+
899
+ nanoseconds_docstring = textwrap.dedent(
900
+ """Number of nanoseconds (>= 0 and less than 1 microsecond) for each element.
901
+
902
+ Examples
903
+ --------
904
+ For Series:
905
+
906
+ >>> ser = pd.Series(pd.to_timedelta([1, 2, 3], unit='ns'))
907
+ >>> ser
908
+ 0 0 days 00:00:00.000000001
909
+ 1 0 days 00:00:00.000000002
910
+ 2 0 days 00:00:00.000000003
911
+ dtype: timedelta64[ns]
912
+ >>> ser.dt.nanoseconds
913
+ 0 1
914
+ 1 2
915
+ 2 3
916
+ dtype: int32
917
+
918
+ For TimedeltaIndex:
919
+
920
+ >>> tdelta_idx = pd.to_timedelta([1, 2, 3], unit='ns')
921
+ >>> tdelta_idx
922
+ TimedeltaIndex(['0 days 00:00:00.000000001', '0 days 00:00:00.000000002',
923
+ '0 days 00:00:00.000000003'],
924
+ dtype='timedelta64[ns]', freq=None)
925
+ >>> tdelta_idx.nanoseconds
926
+ Index([1, 2, 3], dtype='int32')"""
927
+ )
928
+ nanoseconds = _field_accessor(
929
+ "nanoseconds",
930
+ "nanoseconds",
931
+ nanoseconds_docstring,
932
+ )
933
+
934
+ @property
935
+ def components(self) -> DataFrame:
936
+ """
937
+ Return a DataFrame of the individual resolution components of the Timedeltas.
938
+
939
+ The components (days, hours, minutes seconds, milliseconds, microseconds,
940
+ nanoseconds) are returned as columns in a DataFrame.
941
+
942
+ Returns
943
+ -------
944
+ DataFrame
945
+
946
+ Examples
947
+ --------
948
+ >>> tdelta_idx = pd.to_timedelta(['1 day 3 min 2 us 42 ns'])
949
+ >>> tdelta_idx
950
+ TimedeltaIndex(['1 days 00:03:00.000002042'],
951
+ dtype='timedelta64[ns]', freq=None)
952
+ >>> tdelta_idx.components
953
+ days hours minutes seconds milliseconds microseconds nanoseconds
954
+ 0 1 0 3 0 0 2 42
955
+ """
956
+ from pandas import DataFrame
957
+
958
+ columns = [
959
+ "days",
960
+ "hours",
961
+ "minutes",
962
+ "seconds",
963
+ "milliseconds",
964
+ "microseconds",
965
+ "nanoseconds",
966
+ ]
967
+ hasnans = self._hasna
968
+ if hasnans:
969
+
970
+ def f(x):
971
+ if isna(x):
972
+ return [np.nan] * len(columns)
973
+ return x.components
974
+
975
+ else:
976
+
977
+ def f(x):
978
+ return x.components
979
+
980
+ result = DataFrame([f(x) for x in self], columns=columns)
981
+ if not hasnans:
982
+ result = result.astype("int64")
983
+ return result
984
+
985
+
986
+ # ---------------------------------------------------------------------
987
+ # Constructor Helpers
988
+
989
+
990
+ def sequence_to_td64ns(
991
+ data,
992
+ copy: bool = False,
993
+ unit=None,
994
+ errors: DateTimeErrorChoices = "raise",
995
+ ) -> tuple[np.ndarray, Tick | None]:
996
+ """
997
+ Parameters
998
+ ----------
999
+ data : list-like
1000
+ copy : bool, default False
1001
+ unit : str, optional
1002
+ The timedelta unit to treat integers as multiples of. For numeric
1003
+ data this defaults to ``'ns'``.
1004
+ Must be un-specified if the data contains a str and ``errors=="raise"``.
1005
+ errors : {"raise", "coerce", "ignore"}, default "raise"
1006
+ How to handle elements that cannot be converted to timedelta64[ns].
1007
+ See ``pandas.to_timedelta`` for details.
1008
+
1009
+ Returns
1010
+ -------
1011
+ converted : numpy.ndarray
1012
+ The sequence converted to a numpy array with dtype ``timedelta64[ns]``.
1013
+ inferred_freq : Tick or None
1014
+ The inferred frequency of the sequence.
1015
+
1016
+ Raises
1017
+ ------
1018
+ ValueError : Data cannot be converted to timedelta64[ns].
1019
+
1020
+ Notes
1021
+ -----
1022
+ Unlike `pandas.to_timedelta`, if setting ``errors=ignore`` will not cause
1023
+ errors to be ignored; they are caught and subsequently ignored at a
1024
+ higher level.
1025
+ """
1026
+ assert unit not in ["Y", "y", "M"] # caller is responsible for checking
1027
+
1028
+ inferred_freq = None
1029
+ if unit is not None:
1030
+ unit = parse_timedelta_unit(unit)
1031
+
1032
+ data, copy = dtl.ensure_arraylike_for_datetimelike(
1033
+ data, copy, cls_name="TimedeltaArray"
1034
+ )
1035
+
1036
+ if isinstance(data, TimedeltaArray):
1037
+ inferred_freq = data.freq
1038
+
1039
+ # Convert whatever we have into timedelta64[ns] dtype
1040
+ if data.dtype == object or is_string_dtype(data.dtype):
1041
+ # no need to make a copy, need to convert if string-dtyped
1042
+ data = _objects_to_td64ns(data, unit=unit, errors=errors)
1043
+ copy = False
1044
+
1045
+ elif is_integer_dtype(data.dtype):
1046
+ # treat as multiples of the given unit
1047
+ data, copy_made = _ints_to_td64ns(data, unit=unit)
1048
+ copy = copy and not copy_made
1049
+
1050
+ elif is_float_dtype(data.dtype):
1051
+ # cast the unit, multiply base/frac separately
1052
+ # to avoid precision issues from float -> int
1053
+ if isinstance(data.dtype, ExtensionDtype):
1054
+ mask = data._mask
1055
+ data = data._data
1056
+ else:
1057
+ mask = np.isnan(data)
1058
+
1059
+ data = cast_from_unit_vectorized(data, unit or "ns")
1060
+ data[mask] = iNaT
1061
+ data = data.view("m8[ns]")
1062
+ copy = False
1063
+
1064
+ elif lib.is_np_dtype(data.dtype, "m"):
1065
+ if not is_supported_dtype(data.dtype):
1066
+ # cast to closest supported unit, i.e. s or ns
1067
+ new_dtype = get_supported_dtype(data.dtype)
1068
+ data = astype_overflowsafe(data, dtype=new_dtype, copy=False)
1069
+ copy = False
1070
+
1071
+ else:
1072
+ # This includes datetime64-dtype, see GH#23539, GH#29794
1073
+ raise TypeError(f"dtype {data.dtype} cannot be converted to timedelta64[ns]")
1074
+
1075
+ if not copy:
1076
+ data = np.asarray(data)
1077
+ else:
1078
+ data = np.array(data, copy=copy)
1079
+
1080
+ assert data.dtype.kind == "m"
1081
+ assert data.dtype != "m8" # i.e. not unit-less
1082
+
1083
+ return data, inferred_freq
1084
+
1085
+
1086
+ def _ints_to_td64ns(data, unit: str = "ns"):
1087
+ """
1088
+ Convert an ndarray with integer-dtype to timedelta64[ns] dtype, treating
1089
+ the integers as multiples of the given timedelta unit.
1090
+
1091
+ Parameters
1092
+ ----------
1093
+ data : numpy.ndarray with integer-dtype
1094
+ unit : str, default "ns"
1095
+ The timedelta unit to treat integers as multiples of.
1096
+
1097
+ Returns
1098
+ -------
1099
+ numpy.ndarray : timedelta64[ns] array converted from data
1100
+ bool : whether a copy was made
1101
+ """
1102
+ copy_made = False
1103
+ unit = unit if unit is not None else "ns"
1104
+
1105
+ if data.dtype != np.int64:
1106
+ # converting to int64 makes a copy, so we can avoid
1107
+ # re-copying later
1108
+ data = data.astype(np.int64)
1109
+ copy_made = True
1110
+
1111
+ if unit != "ns":
1112
+ dtype_str = f"timedelta64[{unit}]"
1113
+ data = data.view(dtype_str)
1114
+
1115
+ data = astype_overflowsafe(data, dtype=TD64NS_DTYPE)
1116
+
1117
+ # the astype conversion makes a copy, so we can avoid re-copying later
1118
+ copy_made = True
1119
+
1120
+ else:
1121
+ data = data.view("timedelta64[ns]")
1122
+
1123
+ return data, copy_made
1124
+
1125
+
1126
+ def _objects_to_td64ns(data, unit=None, errors: DateTimeErrorChoices = "raise"):
1127
+ """
1128
+ Convert a object-dtyped or string-dtyped array into an
1129
+ timedelta64[ns]-dtyped array.
1130
+
1131
+ Parameters
1132
+ ----------
1133
+ data : ndarray or Index
1134
+ unit : str, default "ns"
1135
+ The timedelta unit to treat integers as multiples of.
1136
+ Must not be specified if the data contains a str.
1137
+ errors : {"raise", "coerce", "ignore"}, default "raise"
1138
+ How to handle elements that cannot be converted to timedelta64[ns].
1139
+ See ``pandas.to_timedelta`` for details.
1140
+
1141
+ Returns
1142
+ -------
1143
+ numpy.ndarray : timedelta64[ns] array converted from data
1144
+
1145
+ Raises
1146
+ ------
1147
+ ValueError : Data cannot be converted to timedelta64[ns].
1148
+
1149
+ Notes
1150
+ -----
1151
+ Unlike `pandas.to_timedelta`, if setting `errors=ignore` will not cause
1152
+ errors to be ignored; they are caught and subsequently ignored at a
1153
+ higher level.
1154
+ """
1155
+ # coerce Index to np.ndarray, converting string-dtype if necessary
1156
+ values = np.asarray(data, dtype=np.object_)
1157
+
1158
+ result = array_to_timedelta64(values, unit=unit, errors=errors)
1159
+ return result.view("timedelta64[ns]")
1160
+
1161
+
1162
+ def _validate_td64_dtype(dtype) -> DtypeObj:
1163
+ dtype = pandas_dtype(dtype)
1164
+ if dtype == np.dtype("m8"):
1165
+ # no precision disallowed GH#24806
1166
+ msg = (
1167
+ "Passing in 'timedelta' dtype with no precision is not allowed. "
1168
+ "Please pass in 'timedelta64[ns]' instead."
1169
+ )
1170
+ raise ValueError(msg)
1171
+
1172
+ if not lib.is_np_dtype(dtype, "m"):
1173
+ raise ValueError(f"dtype '{dtype}' is invalid, should be np.timedelta64 dtype")
1174
+ elif not is_supported_dtype(dtype):
1175
+ raise ValueError("Supported timedelta64 resolutions are 's', 'ms', 'us', 'ns'")
1176
+
1177
+ return dtype
venv/lib/python3.10/site-packages/pandas/core/base.py ADDED
@@ -0,0 +1,1391 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ Base and utility classes for pandas objects.
3
+ """
4
+
5
+ from __future__ import annotations
6
+
7
+ import textwrap
8
+ from typing import (
9
+ TYPE_CHECKING,
10
+ Any,
11
+ Generic,
12
+ Literal,
13
+ cast,
14
+ final,
15
+ overload,
16
+ )
17
+ import warnings
18
+
19
+ import numpy as np
20
+
21
+ from pandas._config import using_copy_on_write
22
+
23
+ from pandas._libs import lib
24
+ from pandas._typing import (
25
+ AxisInt,
26
+ DtypeObj,
27
+ IndexLabel,
28
+ NDFrameT,
29
+ Self,
30
+ Shape,
31
+ npt,
32
+ )
33
+ from pandas.compat import PYPY
34
+ from pandas.compat.numpy import function as nv
35
+ from pandas.errors import AbstractMethodError
36
+ from pandas.util._decorators import (
37
+ cache_readonly,
38
+ doc,
39
+ )
40
+ from pandas.util._exceptions import find_stack_level
41
+
42
+ from pandas.core.dtypes.cast import can_hold_element
43
+ from pandas.core.dtypes.common import (
44
+ is_object_dtype,
45
+ is_scalar,
46
+ )
47
+ from pandas.core.dtypes.dtypes import ExtensionDtype
48
+ from pandas.core.dtypes.generic import (
49
+ ABCDataFrame,
50
+ ABCIndex,
51
+ ABCSeries,
52
+ )
53
+ from pandas.core.dtypes.missing import (
54
+ isna,
55
+ remove_na_arraylike,
56
+ )
57
+
58
+ from pandas.core import (
59
+ algorithms,
60
+ nanops,
61
+ ops,
62
+ )
63
+ from pandas.core.accessor import DirNamesMixin
64
+ from pandas.core.arraylike import OpsMixin
65
+ from pandas.core.arrays import ExtensionArray
66
+ from pandas.core.construction import (
67
+ ensure_wrapped_if_datetimelike,
68
+ extract_array,
69
+ )
70
+
71
+ if TYPE_CHECKING:
72
+ from collections.abc import (
73
+ Hashable,
74
+ Iterator,
75
+ )
76
+
77
+ from pandas._typing import (
78
+ DropKeep,
79
+ NumpySorter,
80
+ NumpyValueArrayLike,
81
+ ScalarLike_co,
82
+ )
83
+
84
+ from pandas import (
85
+ DataFrame,
86
+ Index,
87
+ Series,
88
+ )
89
+
90
+
91
+ _shared_docs: dict[str, str] = {}
92
+ _indexops_doc_kwargs = {
93
+ "klass": "IndexOpsMixin",
94
+ "inplace": "",
95
+ "unique": "IndexOpsMixin",
96
+ "duplicated": "IndexOpsMixin",
97
+ }
98
+
99
+
100
+ class PandasObject(DirNamesMixin):
101
+ """
102
+ Baseclass for various pandas objects.
103
+ """
104
+
105
+ # results from calls to methods decorated with cache_readonly get added to _cache
106
+ _cache: dict[str, Any]
107
+
108
+ @property
109
+ def _constructor(self):
110
+ """
111
+ Class constructor (for this class it's just `__class__`).
112
+ """
113
+ return type(self)
114
+
115
+ def __repr__(self) -> str:
116
+ """
117
+ Return a string representation for a particular object.
118
+ """
119
+ # Should be overwritten by base classes
120
+ return object.__repr__(self)
121
+
122
+ def _reset_cache(self, key: str | None = None) -> None:
123
+ """
124
+ Reset cached properties. If ``key`` is passed, only clears that key.
125
+ """
126
+ if not hasattr(self, "_cache"):
127
+ return
128
+ if key is None:
129
+ self._cache.clear()
130
+ else:
131
+ self._cache.pop(key, None)
132
+
133
+ def __sizeof__(self) -> int:
134
+ """
135
+ Generates the total memory usage for an object that returns
136
+ either a value or Series of values
137
+ """
138
+ memory_usage = getattr(self, "memory_usage", None)
139
+ if memory_usage:
140
+ mem = memory_usage(deep=True) # pylint: disable=not-callable
141
+ return int(mem if is_scalar(mem) else mem.sum())
142
+
143
+ # no memory_usage attribute, so fall back to object's 'sizeof'
144
+ return super().__sizeof__()
145
+
146
+
147
+ class NoNewAttributesMixin:
148
+ """
149
+ Mixin which prevents adding new attributes.
150
+
151
+ Prevents additional attributes via xxx.attribute = "something" after a
152
+ call to `self.__freeze()`. Mainly used to prevent the user from using
153
+ wrong attributes on an accessor (`Series.cat/.str/.dt`).
154
+
155
+ If you really want to add a new attribute at a later time, you need to use
156
+ `object.__setattr__(self, key, value)`.
157
+ """
158
+
159
+ def _freeze(self) -> None:
160
+ """
161
+ Prevents setting additional attributes.
162
+ """
163
+ object.__setattr__(self, "__frozen", True)
164
+
165
+ # prevent adding any attribute via s.xxx.new_attribute = ...
166
+ def __setattr__(self, key: str, value) -> None:
167
+ # _cache is used by a decorator
168
+ # We need to check both 1.) cls.__dict__ and 2.) getattr(self, key)
169
+ # because
170
+ # 1.) getattr is false for attributes that raise errors
171
+ # 2.) cls.__dict__ doesn't traverse into base classes
172
+ if getattr(self, "__frozen", False) and not (
173
+ key == "_cache"
174
+ or key in type(self).__dict__
175
+ or getattr(self, key, None) is not None
176
+ ):
177
+ raise AttributeError(f"You cannot add any new attribute '{key}'")
178
+ object.__setattr__(self, key, value)
179
+
180
+
181
+ class SelectionMixin(Generic[NDFrameT]):
182
+ """
183
+ mixin implementing the selection & aggregation interface on a group-like
184
+ object sub-classes need to define: obj, exclusions
185
+ """
186
+
187
+ obj: NDFrameT
188
+ _selection: IndexLabel | None = None
189
+ exclusions: frozenset[Hashable]
190
+ _internal_names = ["_cache", "__setstate__"]
191
+ _internal_names_set = set(_internal_names)
192
+
193
+ @final
194
+ @property
195
+ def _selection_list(self):
196
+ if not isinstance(
197
+ self._selection, (list, tuple, ABCSeries, ABCIndex, np.ndarray)
198
+ ):
199
+ return [self._selection]
200
+ return self._selection
201
+
202
+ @cache_readonly
203
+ def _selected_obj(self):
204
+ if self._selection is None or isinstance(self.obj, ABCSeries):
205
+ return self.obj
206
+ else:
207
+ return self.obj[self._selection]
208
+
209
+ @final
210
+ @cache_readonly
211
+ def ndim(self) -> int:
212
+ return self._selected_obj.ndim
213
+
214
+ @final
215
+ @cache_readonly
216
+ def _obj_with_exclusions(self):
217
+ if isinstance(self.obj, ABCSeries):
218
+ return self.obj
219
+
220
+ if self._selection is not None:
221
+ return self.obj._getitem_nocopy(self._selection_list)
222
+
223
+ if len(self.exclusions) > 0:
224
+ # equivalent to `self.obj.drop(self.exclusions, axis=1)
225
+ # but this avoids consolidating and making a copy
226
+ # TODO: following GH#45287 can we now use .drop directly without
227
+ # making a copy?
228
+ return self.obj._drop_axis(self.exclusions, axis=1, only_slice=True)
229
+ else:
230
+ return self.obj
231
+
232
+ def __getitem__(self, key):
233
+ if self._selection is not None:
234
+ raise IndexError(f"Column(s) {self._selection} already selected")
235
+
236
+ if isinstance(key, (list, tuple, ABCSeries, ABCIndex, np.ndarray)):
237
+ if len(self.obj.columns.intersection(key)) != len(set(key)):
238
+ bad_keys = list(set(key).difference(self.obj.columns))
239
+ raise KeyError(f"Columns not found: {str(bad_keys)[1:-1]}")
240
+ return self._gotitem(list(key), ndim=2)
241
+
242
+ else:
243
+ if key not in self.obj:
244
+ raise KeyError(f"Column not found: {key}")
245
+ ndim = self.obj[key].ndim
246
+ return self._gotitem(key, ndim=ndim)
247
+
248
+ def _gotitem(self, key, ndim: int, subset=None):
249
+ """
250
+ sub-classes to define
251
+ return a sliced object
252
+
253
+ Parameters
254
+ ----------
255
+ key : str / list of selections
256
+ ndim : {1, 2}
257
+ requested ndim of result
258
+ subset : object, default None
259
+ subset to act on
260
+ """
261
+ raise AbstractMethodError(self)
262
+
263
+ @final
264
+ def _infer_selection(self, key, subset: Series | DataFrame):
265
+ """
266
+ Infer the `selection` to pass to our constructor in _gotitem.
267
+ """
268
+ # Shared by Rolling and Resample
269
+ selection = None
270
+ if subset.ndim == 2 and (
271
+ (lib.is_scalar(key) and key in subset) or lib.is_list_like(key)
272
+ ):
273
+ selection = key
274
+ elif subset.ndim == 1 and lib.is_scalar(key) and key == subset.name:
275
+ selection = key
276
+ return selection
277
+
278
+ def aggregate(self, func, *args, **kwargs):
279
+ raise AbstractMethodError(self)
280
+
281
+ agg = aggregate
282
+
283
+
284
+ class IndexOpsMixin(OpsMixin):
285
+ """
286
+ Common ops mixin to support a unified interface / docs for Series / Index
287
+ """
288
+
289
+ # ndarray compatibility
290
+ __array_priority__ = 1000
291
+ _hidden_attrs: frozenset[str] = frozenset(
292
+ ["tolist"] # tolist is not deprecated, just suppressed in the __dir__
293
+ )
294
+
295
+ @property
296
+ def dtype(self) -> DtypeObj:
297
+ # must be defined here as a property for mypy
298
+ raise AbstractMethodError(self)
299
+
300
+ @property
301
+ def _values(self) -> ExtensionArray | np.ndarray:
302
+ # must be defined here as a property for mypy
303
+ raise AbstractMethodError(self)
304
+
305
+ @final
306
+ def transpose(self, *args, **kwargs) -> Self:
307
+ """
308
+ Return the transpose, which is by definition self.
309
+
310
+ Returns
311
+ -------
312
+ %(klass)s
313
+ """
314
+ nv.validate_transpose(args, kwargs)
315
+ return self
316
+
317
+ T = property(
318
+ transpose,
319
+ doc="""
320
+ Return the transpose, which is by definition self.
321
+
322
+ Examples
323
+ --------
324
+ For Series:
325
+
326
+ >>> s = pd.Series(['Ant', 'Bear', 'Cow'])
327
+ >>> s
328
+ 0 Ant
329
+ 1 Bear
330
+ 2 Cow
331
+ dtype: object
332
+ >>> s.T
333
+ 0 Ant
334
+ 1 Bear
335
+ 2 Cow
336
+ dtype: object
337
+
338
+ For Index:
339
+
340
+ >>> idx = pd.Index([1, 2, 3])
341
+ >>> idx.T
342
+ Index([1, 2, 3], dtype='int64')
343
+ """,
344
+ )
345
+
346
+ @property
347
+ def shape(self) -> Shape:
348
+ """
349
+ Return a tuple of the shape of the underlying data.
350
+
351
+ Examples
352
+ --------
353
+ >>> s = pd.Series([1, 2, 3])
354
+ >>> s.shape
355
+ (3,)
356
+ """
357
+ return self._values.shape
358
+
359
+ def __len__(self) -> int:
360
+ # We need this defined here for mypy
361
+ raise AbstractMethodError(self)
362
+
363
+ @property
364
+ def ndim(self) -> Literal[1]:
365
+ """
366
+ Number of dimensions of the underlying data, by definition 1.
367
+
368
+ Examples
369
+ --------
370
+ >>> s = pd.Series(['Ant', 'Bear', 'Cow'])
371
+ >>> s
372
+ 0 Ant
373
+ 1 Bear
374
+ 2 Cow
375
+ dtype: object
376
+ >>> s.ndim
377
+ 1
378
+
379
+ For Index:
380
+
381
+ >>> idx = pd.Index([1, 2, 3])
382
+ >>> idx
383
+ Index([1, 2, 3], dtype='int64')
384
+ >>> idx.ndim
385
+ 1
386
+ """
387
+ return 1
388
+
389
+ @final
390
+ def item(self):
391
+ """
392
+ Return the first element of the underlying data as a Python scalar.
393
+
394
+ Returns
395
+ -------
396
+ scalar
397
+ The first element of Series or Index.
398
+
399
+ Raises
400
+ ------
401
+ ValueError
402
+ If the data is not length = 1.
403
+
404
+ Examples
405
+ --------
406
+ >>> s = pd.Series([1])
407
+ >>> s.item()
408
+ 1
409
+
410
+ For an index:
411
+
412
+ >>> s = pd.Series([1], index=['a'])
413
+ >>> s.index.item()
414
+ 'a'
415
+ """
416
+ if len(self) == 1:
417
+ return next(iter(self))
418
+ raise ValueError("can only convert an array of size 1 to a Python scalar")
419
+
420
+ @property
421
+ def nbytes(self) -> int:
422
+ """
423
+ Return the number of bytes in the underlying data.
424
+
425
+ Examples
426
+ --------
427
+ For Series:
428
+
429
+ >>> s = pd.Series(['Ant', 'Bear', 'Cow'])
430
+ >>> s
431
+ 0 Ant
432
+ 1 Bear
433
+ 2 Cow
434
+ dtype: object
435
+ >>> s.nbytes
436
+ 24
437
+
438
+ For Index:
439
+
440
+ >>> idx = pd.Index([1, 2, 3])
441
+ >>> idx
442
+ Index([1, 2, 3], dtype='int64')
443
+ >>> idx.nbytes
444
+ 24
445
+ """
446
+ return self._values.nbytes
447
+
448
+ @property
449
+ def size(self) -> int:
450
+ """
451
+ Return the number of elements in the underlying data.
452
+
453
+ Examples
454
+ --------
455
+ For Series:
456
+
457
+ >>> s = pd.Series(['Ant', 'Bear', 'Cow'])
458
+ >>> s
459
+ 0 Ant
460
+ 1 Bear
461
+ 2 Cow
462
+ dtype: object
463
+ >>> s.size
464
+ 3
465
+
466
+ For Index:
467
+
468
+ >>> idx = pd.Index([1, 2, 3])
469
+ >>> idx
470
+ Index([1, 2, 3], dtype='int64')
471
+ >>> idx.size
472
+ 3
473
+ """
474
+ return len(self._values)
475
+
476
+ @property
477
+ def array(self) -> ExtensionArray:
478
+ """
479
+ The ExtensionArray of the data backing this Series or Index.
480
+
481
+ Returns
482
+ -------
483
+ ExtensionArray
484
+ An ExtensionArray of the values stored within. For extension
485
+ types, this is the actual array. For NumPy native types, this
486
+ is a thin (no copy) wrapper around :class:`numpy.ndarray`.
487
+
488
+ ``.array`` differs from ``.values``, which may require converting
489
+ the data to a different form.
490
+
491
+ See Also
492
+ --------
493
+ Index.to_numpy : Similar method that always returns a NumPy array.
494
+ Series.to_numpy : Similar method that always returns a NumPy array.
495
+
496
+ Notes
497
+ -----
498
+ This table lays out the different array types for each extension
499
+ dtype within pandas.
500
+
501
+ ================== =============================
502
+ dtype array type
503
+ ================== =============================
504
+ category Categorical
505
+ period PeriodArray
506
+ interval IntervalArray
507
+ IntegerNA IntegerArray
508
+ string StringArray
509
+ boolean BooleanArray
510
+ datetime64[ns, tz] DatetimeArray
511
+ ================== =============================
512
+
513
+ For any 3rd-party extension types, the array type will be an
514
+ ExtensionArray.
515
+
516
+ For all remaining dtypes ``.array`` will be a
517
+ :class:`arrays.NumpyExtensionArray` wrapping the actual ndarray
518
+ stored within. If you absolutely need a NumPy array (possibly with
519
+ copying / coercing data), then use :meth:`Series.to_numpy` instead.
520
+
521
+ Examples
522
+ --------
523
+ For regular NumPy types like int, and float, a NumpyExtensionArray
524
+ is returned.
525
+
526
+ >>> pd.Series([1, 2, 3]).array
527
+ <NumpyExtensionArray>
528
+ [1, 2, 3]
529
+ Length: 3, dtype: int64
530
+
531
+ For extension types, like Categorical, the actual ExtensionArray
532
+ is returned
533
+
534
+ >>> ser = pd.Series(pd.Categorical(['a', 'b', 'a']))
535
+ >>> ser.array
536
+ ['a', 'b', 'a']
537
+ Categories (2, object): ['a', 'b']
538
+ """
539
+ raise AbstractMethodError(self)
540
+
541
+ @final
542
+ def to_numpy(
543
+ self,
544
+ dtype: npt.DTypeLike | None = None,
545
+ copy: bool = False,
546
+ na_value: object = lib.no_default,
547
+ **kwargs,
548
+ ) -> np.ndarray:
549
+ """
550
+ A NumPy ndarray representing the values in this Series or Index.
551
+
552
+ Parameters
553
+ ----------
554
+ dtype : str or numpy.dtype, optional
555
+ The dtype to pass to :meth:`numpy.asarray`.
556
+ copy : bool, default False
557
+ Whether to ensure that the returned value is not a view on
558
+ another array. Note that ``copy=False`` does not *ensure* that
559
+ ``to_numpy()`` is no-copy. Rather, ``copy=True`` ensure that
560
+ a copy is made, even if not strictly necessary.
561
+ na_value : Any, optional
562
+ The value to use for missing values. The default value depends
563
+ on `dtype` and the type of the array.
564
+ **kwargs
565
+ Additional keywords passed through to the ``to_numpy`` method
566
+ of the underlying array (for extension arrays).
567
+
568
+ Returns
569
+ -------
570
+ numpy.ndarray
571
+
572
+ See Also
573
+ --------
574
+ Series.array : Get the actual data stored within.
575
+ Index.array : Get the actual data stored within.
576
+ DataFrame.to_numpy : Similar method for DataFrame.
577
+
578
+ Notes
579
+ -----
580
+ The returned array will be the same up to equality (values equal
581
+ in `self` will be equal in the returned array; likewise for values
582
+ that are not equal). When `self` contains an ExtensionArray, the
583
+ dtype may be different. For example, for a category-dtype Series,
584
+ ``to_numpy()`` will return a NumPy array and the categorical dtype
585
+ will be lost.
586
+
587
+ For NumPy dtypes, this will be a reference to the actual data stored
588
+ in this Series or Index (assuming ``copy=False``). Modifying the result
589
+ in place will modify the data stored in the Series or Index (not that
590
+ we recommend doing that).
591
+
592
+ For extension types, ``to_numpy()`` *may* require copying data and
593
+ coercing the result to a NumPy type (possibly object), which may be
594
+ expensive. When you need a no-copy reference to the underlying data,
595
+ :attr:`Series.array` should be used instead.
596
+
597
+ This table lays out the different dtypes and default return types of
598
+ ``to_numpy()`` for various dtypes within pandas.
599
+
600
+ ================== ================================
601
+ dtype array type
602
+ ================== ================================
603
+ category[T] ndarray[T] (same dtype as input)
604
+ period ndarray[object] (Periods)
605
+ interval ndarray[object] (Intervals)
606
+ IntegerNA ndarray[object]
607
+ datetime64[ns] datetime64[ns]
608
+ datetime64[ns, tz] ndarray[object] (Timestamps)
609
+ ================== ================================
610
+
611
+ Examples
612
+ --------
613
+ >>> ser = pd.Series(pd.Categorical(['a', 'b', 'a']))
614
+ >>> ser.to_numpy()
615
+ array(['a', 'b', 'a'], dtype=object)
616
+
617
+ Specify the `dtype` to control how datetime-aware data is represented.
618
+ Use ``dtype=object`` to return an ndarray of pandas :class:`Timestamp`
619
+ objects, each with the correct ``tz``.
620
+
621
+ >>> ser = pd.Series(pd.date_range('2000', periods=2, tz="CET"))
622
+ >>> ser.to_numpy(dtype=object)
623
+ array([Timestamp('2000-01-01 00:00:00+0100', tz='CET'),
624
+ Timestamp('2000-01-02 00:00:00+0100', tz='CET')],
625
+ dtype=object)
626
+
627
+ Or ``dtype='datetime64[ns]'`` to return an ndarray of native
628
+ datetime64 values. The values are converted to UTC and the timezone
629
+ info is dropped.
630
+
631
+ >>> ser.to_numpy(dtype="datetime64[ns]")
632
+ ... # doctest: +ELLIPSIS
633
+ array(['1999-12-31T23:00:00.000000000', '2000-01-01T23:00:00...'],
634
+ dtype='datetime64[ns]')
635
+ """
636
+ if isinstance(self.dtype, ExtensionDtype):
637
+ return self.array.to_numpy(dtype, copy=copy, na_value=na_value, **kwargs)
638
+ elif kwargs:
639
+ bad_keys = next(iter(kwargs.keys()))
640
+ raise TypeError(
641
+ f"to_numpy() got an unexpected keyword argument '{bad_keys}'"
642
+ )
643
+
644
+ fillna = (
645
+ na_value is not lib.no_default
646
+ # no need to fillna with np.nan if we already have a float dtype
647
+ and not (na_value is np.nan and np.issubdtype(self.dtype, np.floating))
648
+ )
649
+
650
+ values = self._values
651
+ if fillna:
652
+ if not can_hold_element(values, na_value):
653
+ # if we can't hold the na_value asarray either makes a copy or we
654
+ # error before modifying values. The asarray later on thus won't make
655
+ # another copy
656
+ values = np.asarray(values, dtype=dtype)
657
+ else:
658
+ values = values.copy()
659
+
660
+ values[np.asanyarray(isna(self))] = na_value
661
+
662
+ result = np.asarray(values, dtype=dtype)
663
+
664
+ if (copy and not fillna) or (not copy and using_copy_on_write()):
665
+ if np.shares_memory(self._values[:2], result[:2]):
666
+ # Take slices to improve performance of check
667
+ if using_copy_on_write() and not copy:
668
+ result = result.view()
669
+ result.flags.writeable = False
670
+ else:
671
+ result = result.copy()
672
+
673
+ return result
674
+
675
+ @final
676
+ @property
677
+ def empty(self) -> bool:
678
+ return not self.size
679
+
680
+ @doc(op="max", oppose="min", value="largest")
681
+ def argmax(
682
+ self, axis: AxisInt | None = None, skipna: bool = True, *args, **kwargs
683
+ ) -> int:
684
+ """
685
+ Return int position of the {value} value in the Series.
686
+
687
+ If the {op}imum is achieved in multiple locations,
688
+ the first row position is returned.
689
+
690
+ Parameters
691
+ ----------
692
+ axis : {{None}}
693
+ Unused. Parameter needed for compatibility with DataFrame.
694
+ skipna : bool, default True
695
+ Exclude NA/null values when showing the result.
696
+ *args, **kwargs
697
+ Additional arguments and keywords for compatibility with NumPy.
698
+
699
+ Returns
700
+ -------
701
+ int
702
+ Row position of the {op}imum value.
703
+
704
+ See Also
705
+ --------
706
+ Series.arg{op} : Return position of the {op}imum value.
707
+ Series.arg{oppose} : Return position of the {oppose}imum value.
708
+ numpy.ndarray.arg{op} : Equivalent method for numpy arrays.
709
+ Series.idxmax : Return index label of the maximum values.
710
+ Series.idxmin : Return index label of the minimum values.
711
+
712
+ Examples
713
+ --------
714
+ Consider dataset containing cereal calories
715
+
716
+ >>> s = pd.Series({{'Corn Flakes': 100.0, 'Almond Delight': 110.0,
717
+ ... 'Cinnamon Toast Crunch': 120.0, 'Cocoa Puff': 110.0}})
718
+ >>> s
719
+ Corn Flakes 100.0
720
+ Almond Delight 110.0
721
+ Cinnamon Toast Crunch 120.0
722
+ Cocoa Puff 110.0
723
+ dtype: float64
724
+
725
+ >>> s.argmax()
726
+ 2
727
+ >>> s.argmin()
728
+ 0
729
+
730
+ The maximum cereal calories is the third element and
731
+ the minimum cereal calories is the first element,
732
+ since series is zero-indexed.
733
+ """
734
+ delegate = self._values
735
+ nv.validate_minmax_axis(axis)
736
+ skipna = nv.validate_argmax_with_skipna(skipna, args, kwargs)
737
+
738
+ if isinstance(delegate, ExtensionArray):
739
+ if not skipna and delegate.isna().any():
740
+ warnings.warn(
741
+ f"The behavior of {type(self).__name__}.argmax/argmin "
742
+ "with skipna=False and NAs, or with all-NAs is deprecated. "
743
+ "In a future version this will raise ValueError.",
744
+ FutureWarning,
745
+ stacklevel=find_stack_level(),
746
+ )
747
+ return -1
748
+ else:
749
+ return delegate.argmax()
750
+ else:
751
+ result = nanops.nanargmax(delegate, skipna=skipna)
752
+ if result == -1:
753
+ warnings.warn(
754
+ f"The behavior of {type(self).__name__}.argmax/argmin "
755
+ "with skipna=False and NAs, or with all-NAs is deprecated. "
756
+ "In a future version this will raise ValueError.",
757
+ FutureWarning,
758
+ stacklevel=find_stack_level(),
759
+ )
760
+ # error: Incompatible return value type (got "Union[int, ndarray]", expected
761
+ # "int")
762
+ return result # type: ignore[return-value]
763
+
764
+ @doc(argmax, op="min", oppose="max", value="smallest")
765
+ def argmin(
766
+ self, axis: AxisInt | None = None, skipna: bool = True, *args, **kwargs
767
+ ) -> int:
768
+ delegate = self._values
769
+ nv.validate_minmax_axis(axis)
770
+ skipna = nv.validate_argmin_with_skipna(skipna, args, kwargs)
771
+
772
+ if isinstance(delegate, ExtensionArray):
773
+ if not skipna and delegate.isna().any():
774
+ warnings.warn(
775
+ f"The behavior of {type(self).__name__}.argmax/argmin "
776
+ "with skipna=False and NAs, or with all-NAs is deprecated. "
777
+ "In a future version this will raise ValueError.",
778
+ FutureWarning,
779
+ stacklevel=find_stack_level(),
780
+ )
781
+ return -1
782
+ else:
783
+ return delegate.argmin()
784
+ else:
785
+ result = nanops.nanargmin(delegate, skipna=skipna)
786
+ if result == -1:
787
+ warnings.warn(
788
+ f"The behavior of {type(self).__name__}.argmax/argmin "
789
+ "with skipna=False and NAs, or with all-NAs is deprecated. "
790
+ "In a future version this will raise ValueError.",
791
+ FutureWarning,
792
+ stacklevel=find_stack_level(),
793
+ )
794
+ # error: Incompatible return value type (got "Union[int, ndarray]", expected
795
+ # "int")
796
+ return result # type: ignore[return-value]
797
+
798
+ def tolist(self):
799
+ """
800
+ Return a list of the values.
801
+
802
+ These are each a scalar type, which is a Python scalar
803
+ (for str, int, float) or a pandas scalar
804
+ (for Timestamp/Timedelta/Interval/Period)
805
+
806
+ Returns
807
+ -------
808
+ list
809
+
810
+ See Also
811
+ --------
812
+ numpy.ndarray.tolist : Return the array as an a.ndim-levels deep
813
+ nested list of Python scalars.
814
+
815
+ Examples
816
+ --------
817
+ For Series
818
+
819
+ >>> s = pd.Series([1, 2, 3])
820
+ >>> s.to_list()
821
+ [1, 2, 3]
822
+
823
+ For Index:
824
+
825
+ >>> idx = pd.Index([1, 2, 3])
826
+ >>> idx
827
+ Index([1, 2, 3], dtype='int64')
828
+
829
+ >>> idx.to_list()
830
+ [1, 2, 3]
831
+ """
832
+ return self._values.tolist()
833
+
834
+ to_list = tolist
835
+
836
+ def __iter__(self) -> Iterator:
837
+ """
838
+ Return an iterator of the values.
839
+
840
+ These are each a scalar type, which is a Python scalar
841
+ (for str, int, float) or a pandas scalar
842
+ (for Timestamp/Timedelta/Interval/Period)
843
+
844
+ Returns
845
+ -------
846
+ iterator
847
+
848
+ Examples
849
+ --------
850
+ >>> s = pd.Series([1, 2, 3])
851
+ >>> for x in s:
852
+ ... print(x)
853
+ 1
854
+ 2
855
+ 3
856
+ """
857
+ # We are explicitly making element iterators.
858
+ if not isinstance(self._values, np.ndarray):
859
+ # Check type instead of dtype to catch DTA/TDA
860
+ return iter(self._values)
861
+ else:
862
+ return map(self._values.item, range(self._values.size))
863
+
864
+ @cache_readonly
865
+ def hasnans(self) -> bool:
866
+ """
867
+ Return True if there are any NaNs.
868
+
869
+ Enables various performance speedups.
870
+
871
+ Returns
872
+ -------
873
+ bool
874
+
875
+ Examples
876
+ --------
877
+ >>> s = pd.Series([1, 2, 3, None])
878
+ >>> s
879
+ 0 1.0
880
+ 1 2.0
881
+ 2 3.0
882
+ 3 NaN
883
+ dtype: float64
884
+ >>> s.hasnans
885
+ True
886
+ """
887
+ # error: Item "bool" of "Union[bool, ndarray[Any, dtype[bool_]], NDFrame]"
888
+ # has no attribute "any"
889
+ return bool(isna(self).any()) # type: ignore[union-attr]
890
+
891
+ @final
892
+ def _map_values(self, mapper, na_action=None, convert: bool = True):
893
+ """
894
+ An internal function that maps values using the input
895
+ correspondence (which can be a dict, Series, or function).
896
+
897
+ Parameters
898
+ ----------
899
+ mapper : function, dict, or Series
900
+ The input correspondence object
901
+ na_action : {None, 'ignore'}
902
+ If 'ignore', propagate NA values, without passing them to the
903
+ mapping function
904
+ convert : bool, default True
905
+ Try to find better dtype for elementwise function results. If
906
+ False, leave as dtype=object. Note that the dtype is always
907
+ preserved for some extension array dtypes, such as Categorical.
908
+
909
+ Returns
910
+ -------
911
+ Union[Index, MultiIndex], inferred
912
+ The output of the mapping function applied to the index.
913
+ If the function returns a tuple with more than one element
914
+ a MultiIndex will be returned.
915
+ """
916
+ arr = self._values
917
+
918
+ if isinstance(arr, ExtensionArray):
919
+ return arr.map(mapper, na_action=na_action)
920
+
921
+ return algorithms.map_array(arr, mapper, na_action=na_action, convert=convert)
922
+
923
+ @final
924
+ def value_counts(
925
+ self,
926
+ normalize: bool = False,
927
+ sort: bool = True,
928
+ ascending: bool = False,
929
+ bins=None,
930
+ dropna: bool = True,
931
+ ) -> Series:
932
+ """
933
+ Return a Series containing counts of unique values.
934
+
935
+ The resulting object will be in descending order so that the
936
+ first element is the most frequently-occurring element.
937
+ Excludes NA values by default.
938
+
939
+ Parameters
940
+ ----------
941
+ normalize : bool, default False
942
+ If True then the object returned will contain the relative
943
+ frequencies of the unique values.
944
+ sort : bool, default True
945
+ Sort by frequencies when True. Preserve the order of the data when False.
946
+ ascending : bool, default False
947
+ Sort in ascending order.
948
+ bins : int, optional
949
+ Rather than count values, group them into half-open bins,
950
+ a convenience for ``pd.cut``, only works with numeric data.
951
+ dropna : bool, default True
952
+ Don't include counts of NaN.
953
+
954
+ Returns
955
+ -------
956
+ Series
957
+
958
+ See Also
959
+ --------
960
+ Series.count: Number of non-NA elements in a Series.
961
+ DataFrame.count: Number of non-NA elements in a DataFrame.
962
+ DataFrame.value_counts: Equivalent method on DataFrames.
963
+
964
+ Examples
965
+ --------
966
+ >>> index = pd.Index([3, 1, 2, 3, 4, np.nan])
967
+ >>> index.value_counts()
968
+ 3.0 2
969
+ 1.0 1
970
+ 2.0 1
971
+ 4.0 1
972
+ Name: count, dtype: int64
973
+
974
+ With `normalize` set to `True`, returns the relative frequency by
975
+ dividing all values by the sum of values.
976
+
977
+ >>> s = pd.Series([3, 1, 2, 3, 4, np.nan])
978
+ >>> s.value_counts(normalize=True)
979
+ 3.0 0.4
980
+ 1.0 0.2
981
+ 2.0 0.2
982
+ 4.0 0.2
983
+ Name: proportion, dtype: float64
984
+
985
+ **bins**
986
+
987
+ Bins can be useful for going from a continuous variable to a
988
+ categorical variable; instead of counting unique
989
+ apparitions of values, divide the index in the specified
990
+ number of half-open bins.
991
+
992
+ >>> s.value_counts(bins=3)
993
+ (0.996, 2.0] 2
994
+ (2.0, 3.0] 2
995
+ (3.0, 4.0] 1
996
+ Name: count, dtype: int64
997
+
998
+ **dropna**
999
+
1000
+ With `dropna` set to `False` we can also see NaN index values.
1001
+
1002
+ >>> s.value_counts(dropna=False)
1003
+ 3.0 2
1004
+ 1.0 1
1005
+ 2.0 1
1006
+ 4.0 1
1007
+ NaN 1
1008
+ Name: count, dtype: int64
1009
+ """
1010
+ return algorithms.value_counts_internal(
1011
+ self,
1012
+ sort=sort,
1013
+ ascending=ascending,
1014
+ normalize=normalize,
1015
+ bins=bins,
1016
+ dropna=dropna,
1017
+ )
1018
+
1019
+ def unique(self):
1020
+ values = self._values
1021
+ if not isinstance(values, np.ndarray):
1022
+ # i.e. ExtensionArray
1023
+ result = values.unique()
1024
+ else:
1025
+ result = algorithms.unique1d(values)
1026
+ return result
1027
+
1028
+ @final
1029
+ def nunique(self, dropna: bool = True) -> int:
1030
+ """
1031
+ Return number of unique elements in the object.
1032
+
1033
+ Excludes NA values by default.
1034
+
1035
+ Parameters
1036
+ ----------
1037
+ dropna : bool, default True
1038
+ Don't include NaN in the count.
1039
+
1040
+ Returns
1041
+ -------
1042
+ int
1043
+
1044
+ See Also
1045
+ --------
1046
+ DataFrame.nunique: Method nunique for DataFrame.
1047
+ Series.count: Count non-NA/null observations in the Series.
1048
+
1049
+ Examples
1050
+ --------
1051
+ >>> s = pd.Series([1, 3, 5, 7, 7])
1052
+ >>> s
1053
+ 0 1
1054
+ 1 3
1055
+ 2 5
1056
+ 3 7
1057
+ 4 7
1058
+ dtype: int64
1059
+
1060
+ >>> s.nunique()
1061
+ 4
1062
+ """
1063
+ uniqs = self.unique()
1064
+ if dropna:
1065
+ uniqs = remove_na_arraylike(uniqs)
1066
+ return len(uniqs)
1067
+
1068
+ @property
1069
+ def is_unique(self) -> bool:
1070
+ """
1071
+ Return boolean if values in the object are unique.
1072
+
1073
+ Returns
1074
+ -------
1075
+ bool
1076
+
1077
+ Examples
1078
+ --------
1079
+ >>> s = pd.Series([1, 2, 3])
1080
+ >>> s.is_unique
1081
+ True
1082
+
1083
+ >>> s = pd.Series([1, 2, 3, 1])
1084
+ >>> s.is_unique
1085
+ False
1086
+ """
1087
+ return self.nunique(dropna=False) == len(self)
1088
+
1089
+ @property
1090
+ def is_monotonic_increasing(self) -> bool:
1091
+ """
1092
+ Return boolean if values in the object are monotonically increasing.
1093
+
1094
+ Returns
1095
+ -------
1096
+ bool
1097
+
1098
+ Examples
1099
+ --------
1100
+ >>> s = pd.Series([1, 2, 2])
1101
+ >>> s.is_monotonic_increasing
1102
+ True
1103
+
1104
+ >>> s = pd.Series([3, 2, 1])
1105
+ >>> s.is_monotonic_increasing
1106
+ False
1107
+ """
1108
+ from pandas import Index
1109
+
1110
+ return Index(self).is_monotonic_increasing
1111
+
1112
+ @property
1113
+ def is_monotonic_decreasing(self) -> bool:
1114
+ """
1115
+ Return boolean if values in the object are monotonically decreasing.
1116
+
1117
+ Returns
1118
+ -------
1119
+ bool
1120
+
1121
+ Examples
1122
+ --------
1123
+ >>> s = pd.Series([3, 2, 2, 1])
1124
+ >>> s.is_monotonic_decreasing
1125
+ True
1126
+
1127
+ >>> s = pd.Series([1, 2, 3])
1128
+ >>> s.is_monotonic_decreasing
1129
+ False
1130
+ """
1131
+ from pandas import Index
1132
+
1133
+ return Index(self).is_monotonic_decreasing
1134
+
1135
+ @final
1136
+ def _memory_usage(self, deep: bool = False) -> int:
1137
+ """
1138
+ Memory usage of the values.
1139
+
1140
+ Parameters
1141
+ ----------
1142
+ deep : bool, default False
1143
+ Introspect the data deeply, interrogate
1144
+ `object` dtypes for system-level memory consumption.
1145
+
1146
+ Returns
1147
+ -------
1148
+ bytes used
1149
+
1150
+ See Also
1151
+ --------
1152
+ numpy.ndarray.nbytes : Total bytes consumed by the elements of the
1153
+ array.
1154
+
1155
+ Notes
1156
+ -----
1157
+ Memory usage does not include memory consumed by elements that
1158
+ are not components of the array if deep=False or if used on PyPy
1159
+
1160
+ Examples
1161
+ --------
1162
+ >>> idx = pd.Index([1, 2, 3])
1163
+ >>> idx.memory_usage()
1164
+ 24
1165
+ """
1166
+ if hasattr(self.array, "memory_usage"):
1167
+ return self.array.memory_usage( # pyright: ignore[reportGeneralTypeIssues]
1168
+ deep=deep,
1169
+ )
1170
+
1171
+ v = self.array.nbytes
1172
+ if deep and is_object_dtype(self.dtype) and not PYPY:
1173
+ values = cast(np.ndarray, self._values)
1174
+ v += lib.memory_usage_of_objects(values)
1175
+ return v
1176
+
1177
+ @doc(
1178
+ algorithms.factorize,
1179
+ values="",
1180
+ order="",
1181
+ size_hint="",
1182
+ sort=textwrap.dedent(
1183
+ """\
1184
+ sort : bool, default False
1185
+ Sort `uniques` and shuffle `codes` to maintain the
1186
+ relationship.
1187
+ """
1188
+ ),
1189
+ )
1190
+ def factorize(
1191
+ self,
1192
+ sort: bool = False,
1193
+ use_na_sentinel: bool = True,
1194
+ ) -> tuple[npt.NDArray[np.intp], Index]:
1195
+ codes, uniques = algorithms.factorize(
1196
+ self._values, sort=sort, use_na_sentinel=use_na_sentinel
1197
+ )
1198
+ if uniques.dtype == np.float16:
1199
+ uniques = uniques.astype(np.float32)
1200
+
1201
+ if isinstance(self, ABCIndex):
1202
+ # preserve e.g. MultiIndex
1203
+ uniques = self._constructor(uniques)
1204
+ else:
1205
+ from pandas import Index
1206
+
1207
+ uniques = Index(uniques)
1208
+ return codes, uniques
1209
+
1210
+ _shared_docs[
1211
+ "searchsorted"
1212
+ ] = """
1213
+ Find indices where elements should be inserted to maintain order.
1214
+
1215
+ Find the indices into a sorted {klass} `self` such that, if the
1216
+ corresponding elements in `value` were inserted before the indices,
1217
+ the order of `self` would be preserved.
1218
+
1219
+ .. note::
1220
+
1221
+ The {klass} *must* be monotonically sorted, otherwise
1222
+ wrong locations will likely be returned. Pandas does *not*
1223
+ check this for you.
1224
+
1225
+ Parameters
1226
+ ----------
1227
+ value : array-like or scalar
1228
+ Values to insert into `self`.
1229
+ side : {{'left', 'right'}}, optional
1230
+ If 'left', the index of the first suitable location found is given.
1231
+ If 'right', return the last such index. If there is no suitable
1232
+ index, return either 0 or N (where N is the length of `self`).
1233
+ sorter : 1-D array-like, optional
1234
+ Optional array of integer indices that sort `self` into ascending
1235
+ order. They are typically the result of ``np.argsort``.
1236
+
1237
+ Returns
1238
+ -------
1239
+ int or array of int
1240
+ A scalar or array of insertion points with the
1241
+ same shape as `value`.
1242
+
1243
+ See Also
1244
+ --------
1245
+ sort_values : Sort by the values along either axis.
1246
+ numpy.searchsorted : Similar method from NumPy.
1247
+
1248
+ Notes
1249
+ -----
1250
+ Binary search is used to find the required insertion points.
1251
+
1252
+ Examples
1253
+ --------
1254
+ >>> ser = pd.Series([1, 2, 3])
1255
+ >>> ser
1256
+ 0 1
1257
+ 1 2
1258
+ 2 3
1259
+ dtype: int64
1260
+
1261
+ >>> ser.searchsorted(4)
1262
+ 3
1263
+
1264
+ >>> ser.searchsorted([0, 4])
1265
+ array([0, 3])
1266
+
1267
+ >>> ser.searchsorted([1, 3], side='left')
1268
+ array([0, 2])
1269
+
1270
+ >>> ser.searchsorted([1, 3], side='right')
1271
+ array([1, 3])
1272
+
1273
+ >>> ser = pd.Series(pd.to_datetime(['3/11/2000', '3/12/2000', '3/13/2000']))
1274
+ >>> ser
1275
+ 0 2000-03-11
1276
+ 1 2000-03-12
1277
+ 2 2000-03-13
1278
+ dtype: datetime64[ns]
1279
+
1280
+ >>> ser.searchsorted('3/14/2000')
1281
+ 3
1282
+
1283
+ >>> ser = pd.Categorical(
1284
+ ... ['apple', 'bread', 'bread', 'cheese', 'milk'], ordered=True
1285
+ ... )
1286
+ >>> ser
1287
+ ['apple', 'bread', 'bread', 'cheese', 'milk']
1288
+ Categories (4, object): ['apple' < 'bread' < 'cheese' < 'milk']
1289
+
1290
+ >>> ser.searchsorted('bread')
1291
+ 1
1292
+
1293
+ >>> ser.searchsorted(['bread'], side='right')
1294
+ array([3])
1295
+
1296
+ If the values are not monotonically sorted, wrong locations
1297
+ may be returned:
1298
+
1299
+ >>> ser = pd.Series([2, 1, 3])
1300
+ >>> ser
1301
+ 0 2
1302
+ 1 1
1303
+ 2 3
1304
+ dtype: int64
1305
+
1306
+ >>> ser.searchsorted(1) # doctest: +SKIP
1307
+ 0 # wrong result, correct would be 1
1308
+ """
1309
+
1310
+ # This overload is needed so that the call to searchsorted in
1311
+ # pandas.core.resample.TimeGrouper._get_period_bins picks the correct result
1312
+
1313
+ # error: Overloaded function signatures 1 and 2 overlap with incompatible
1314
+ # return types
1315
+ @overload
1316
+ def searchsorted( # type: ignore[overload-overlap]
1317
+ self,
1318
+ value: ScalarLike_co,
1319
+ side: Literal["left", "right"] = ...,
1320
+ sorter: NumpySorter = ...,
1321
+ ) -> np.intp:
1322
+ ...
1323
+
1324
+ @overload
1325
+ def searchsorted(
1326
+ self,
1327
+ value: npt.ArrayLike | ExtensionArray,
1328
+ side: Literal["left", "right"] = ...,
1329
+ sorter: NumpySorter = ...,
1330
+ ) -> npt.NDArray[np.intp]:
1331
+ ...
1332
+
1333
+ @doc(_shared_docs["searchsorted"], klass="Index")
1334
+ def searchsorted(
1335
+ self,
1336
+ value: NumpyValueArrayLike | ExtensionArray,
1337
+ side: Literal["left", "right"] = "left",
1338
+ sorter: NumpySorter | None = None,
1339
+ ) -> npt.NDArray[np.intp] | np.intp:
1340
+ if isinstance(value, ABCDataFrame):
1341
+ msg = (
1342
+ "Value must be 1-D array-like or scalar, "
1343
+ f"{type(value).__name__} is not supported"
1344
+ )
1345
+ raise ValueError(msg)
1346
+
1347
+ values = self._values
1348
+ if not isinstance(values, np.ndarray):
1349
+ # Going through EA.searchsorted directly improves performance GH#38083
1350
+ return values.searchsorted(value, side=side, sorter=sorter)
1351
+
1352
+ return algorithms.searchsorted(
1353
+ values,
1354
+ value,
1355
+ side=side,
1356
+ sorter=sorter,
1357
+ )
1358
+
1359
+ def drop_duplicates(self, *, keep: DropKeep = "first"):
1360
+ duplicated = self._duplicated(keep=keep)
1361
+ # error: Value of type "IndexOpsMixin" is not indexable
1362
+ return self[~duplicated] # type: ignore[index]
1363
+
1364
+ @final
1365
+ def _duplicated(self, keep: DropKeep = "first") -> npt.NDArray[np.bool_]:
1366
+ arr = self._values
1367
+ if isinstance(arr, ExtensionArray):
1368
+ return arr.duplicated(keep=keep)
1369
+ return algorithms.duplicated(arr, keep=keep)
1370
+
1371
+ def _arith_method(self, other, op):
1372
+ res_name = ops.get_op_result_name(self, other)
1373
+
1374
+ lvalues = self._values
1375
+ rvalues = extract_array(other, extract_numpy=True, extract_range=True)
1376
+ rvalues = ops.maybe_prepare_scalar_for_op(rvalues, lvalues.shape)
1377
+ rvalues = ensure_wrapped_if_datetimelike(rvalues)
1378
+ if isinstance(rvalues, range):
1379
+ rvalues = np.arange(rvalues.start, rvalues.stop, rvalues.step)
1380
+
1381
+ with np.errstate(all="ignore"):
1382
+ result = ops.arithmetic_op(lvalues, rvalues, op)
1383
+
1384
+ return self._construct_result(result, name=res_name)
1385
+
1386
+ def _construct_result(self, result, name):
1387
+ """
1388
+ Construct an appropriately-wrapped result from the ArrayLike result
1389
+ of an arithmetic-like operation.
1390
+ """
1391
+ raise AbstractMethodError(self)
venv/lib/python3.10/site-packages/pandas/core/common.py ADDED
@@ -0,0 +1,657 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ Misc tools for implementing data structures
3
+
4
+ Note: pandas.core.common is *not* part of the public API.
5
+ """
6
+ from __future__ import annotations
7
+
8
+ import builtins
9
+ from collections import (
10
+ abc,
11
+ defaultdict,
12
+ )
13
+ from collections.abc import (
14
+ Collection,
15
+ Generator,
16
+ Hashable,
17
+ Iterable,
18
+ Sequence,
19
+ )
20
+ import contextlib
21
+ from functools import partial
22
+ import inspect
23
+ from typing import (
24
+ TYPE_CHECKING,
25
+ Any,
26
+ Callable,
27
+ cast,
28
+ overload,
29
+ )
30
+ import warnings
31
+
32
+ import numpy as np
33
+
34
+ from pandas._libs import lib
35
+ from pandas.compat.numpy import np_version_gte1p24
36
+
37
+ from pandas.core.dtypes.cast import construct_1d_object_array_from_listlike
38
+ from pandas.core.dtypes.common import (
39
+ is_bool_dtype,
40
+ is_integer,
41
+ )
42
+ from pandas.core.dtypes.generic import (
43
+ ABCExtensionArray,
44
+ ABCIndex,
45
+ ABCMultiIndex,
46
+ ABCSeries,
47
+ )
48
+ from pandas.core.dtypes.inference import iterable_not_string
49
+
50
+ if TYPE_CHECKING:
51
+ from pandas._typing import (
52
+ AnyArrayLike,
53
+ ArrayLike,
54
+ NpDtype,
55
+ RandomState,
56
+ T,
57
+ )
58
+
59
+ from pandas import Index
60
+
61
+
62
+ def flatten(line):
63
+ """
64
+ Flatten an arbitrarily nested sequence.
65
+
66
+ Parameters
67
+ ----------
68
+ line : sequence
69
+ The non string sequence to flatten
70
+
71
+ Notes
72
+ -----
73
+ This doesn't consider strings sequences.
74
+
75
+ Returns
76
+ -------
77
+ flattened : generator
78
+ """
79
+ for element in line:
80
+ if iterable_not_string(element):
81
+ yield from flatten(element)
82
+ else:
83
+ yield element
84
+
85
+
86
+ def consensus_name_attr(objs):
87
+ name = objs[0].name
88
+ for obj in objs[1:]:
89
+ try:
90
+ if obj.name != name:
91
+ name = None
92
+ except ValueError:
93
+ name = None
94
+ return name
95
+
96
+
97
+ def is_bool_indexer(key: Any) -> bool:
98
+ """
99
+ Check whether `key` is a valid boolean indexer.
100
+
101
+ Parameters
102
+ ----------
103
+ key : Any
104
+ Only list-likes may be considered boolean indexers.
105
+ All other types are not considered a boolean indexer.
106
+ For array-like input, boolean ndarrays or ExtensionArrays
107
+ with ``_is_boolean`` set are considered boolean indexers.
108
+
109
+ Returns
110
+ -------
111
+ bool
112
+ Whether `key` is a valid boolean indexer.
113
+
114
+ Raises
115
+ ------
116
+ ValueError
117
+ When the array is an object-dtype ndarray or ExtensionArray
118
+ and contains missing values.
119
+
120
+ See Also
121
+ --------
122
+ check_array_indexer : Check that `key` is a valid array to index,
123
+ and convert to an ndarray.
124
+ """
125
+ if isinstance(
126
+ key, (ABCSeries, np.ndarray, ABCIndex, ABCExtensionArray)
127
+ ) and not isinstance(key, ABCMultiIndex):
128
+ if key.dtype == np.object_:
129
+ key_array = np.asarray(key)
130
+
131
+ if not lib.is_bool_array(key_array):
132
+ na_msg = "Cannot mask with non-boolean array containing NA / NaN values"
133
+ if lib.is_bool_array(key_array, skipna=True):
134
+ # Don't raise on e.g. ["A", "B", np.nan], see
135
+ # test_loc_getitem_list_of_labels_categoricalindex_with_na
136
+ raise ValueError(na_msg)
137
+ return False
138
+ return True
139
+ elif is_bool_dtype(key.dtype):
140
+ return True
141
+ elif isinstance(key, list):
142
+ # check if np.array(key).dtype would be bool
143
+ if len(key) > 0:
144
+ if type(key) is not list: # noqa: E721
145
+ # GH#42461 cython will raise TypeError if we pass a subclass
146
+ key = list(key)
147
+ return lib.is_bool_list(key)
148
+
149
+ return False
150
+
151
+
152
+ def cast_scalar_indexer(val):
153
+ """
154
+ Disallow indexing with a float key, even if that key is a round number.
155
+
156
+ Parameters
157
+ ----------
158
+ val : scalar
159
+
160
+ Returns
161
+ -------
162
+ outval : scalar
163
+ """
164
+ # assumes lib.is_scalar(val)
165
+ if lib.is_float(val) and val.is_integer():
166
+ raise IndexError(
167
+ # GH#34193
168
+ "Indexing with a float is no longer supported. Manually convert "
169
+ "to an integer key instead."
170
+ )
171
+ return val
172
+
173
+
174
+ def not_none(*args):
175
+ """
176
+ Returns a generator consisting of the arguments that are not None.
177
+ """
178
+ return (arg for arg in args if arg is not None)
179
+
180
+
181
+ def any_none(*args) -> bool:
182
+ """
183
+ Returns a boolean indicating if any argument is None.
184
+ """
185
+ return any(arg is None for arg in args)
186
+
187
+
188
+ def all_none(*args) -> bool:
189
+ """
190
+ Returns a boolean indicating if all arguments are None.
191
+ """
192
+ return all(arg is None for arg in args)
193
+
194
+
195
+ def any_not_none(*args) -> bool:
196
+ """
197
+ Returns a boolean indicating if any argument is not None.
198
+ """
199
+ return any(arg is not None for arg in args)
200
+
201
+
202
+ def all_not_none(*args) -> bool:
203
+ """
204
+ Returns a boolean indicating if all arguments are not None.
205
+ """
206
+ return all(arg is not None for arg in args)
207
+
208
+
209
+ def count_not_none(*args) -> int:
210
+ """
211
+ Returns the count of arguments that are not None.
212
+ """
213
+ return sum(x is not None for x in args)
214
+
215
+
216
+ @overload
217
+ def asarray_tuplesafe(
218
+ values: ArrayLike | list | tuple | zip, dtype: NpDtype | None = ...
219
+ ) -> np.ndarray:
220
+ # ExtensionArray can only be returned when values is an Index, all other iterables
221
+ # will return np.ndarray. Unfortunately "all other" cannot be encoded in a type
222
+ # signature, so instead we special-case some common types.
223
+ ...
224
+
225
+
226
+ @overload
227
+ def asarray_tuplesafe(values: Iterable, dtype: NpDtype | None = ...) -> ArrayLike:
228
+ ...
229
+
230
+
231
+ def asarray_tuplesafe(values: Iterable, dtype: NpDtype | None = None) -> ArrayLike:
232
+ if not (isinstance(values, (list, tuple)) or hasattr(values, "__array__")):
233
+ values = list(values)
234
+ elif isinstance(values, ABCIndex):
235
+ return values._values
236
+ elif isinstance(values, ABCSeries):
237
+ return values._values
238
+
239
+ if isinstance(values, list) and dtype in [np.object_, object]:
240
+ return construct_1d_object_array_from_listlike(values)
241
+
242
+ try:
243
+ with warnings.catch_warnings():
244
+ # Can remove warning filter once NumPy 1.24 is min version
245
+ if not np_version_gte1p24:
246
+ warnings.simplefilter("ignore", np.VisibleDeprecationWarning)
247
+ result = np.asarray(values, dtype=dtype)
248
+ except ValueError:
249
+ # Using try/except since it's more performant than checking is_list_like
250
+ # over each element
251
+ # error: Argument 1 to "construct_1d_object_array_from_listlike"
252
+ # has incompatible type "Iterable[Any]"; expected "Sized"
253
+ return construct_1d_object_array_from_listlike(values) # type: ignore[arg-type]
254
+
255
+ if issubclass(result.dtype.type, str):
256
+ result = np.asarray(values, dtype=object)
257
+
258
+ if result.ndim == 2:
259
+ # Avoid building an array of arrays:
260
+ values = [tuple(x) for x in values]
261
+ result = construct_1d_object_array_from_listlike(values)
262
+
263
+ return result
264
+
265
+
266
+ def index_labels_to_array(
267
+ labels: np.ndarray | Iterable, dtype: NpDtype | None = None
268
+ ) -> np.ndarray:
269
+ """
270
+ Transform label or iterable of labels to array, for use in Index.
271
+
272
+ Parameters
273
+ ----------
274
+ dtype : dtype
275
+ If specified, use as dtype of the resulting array, otherwise infer.
276
+
277
+ Returns
278
+ -------
279
+ array
280
+ """
281
+ if isinstance(labels, (str, tuple)):
282
+ labels = [labels]
283
+
284
+ if not isinstance(labels, (list, np.ndarray)):
285
+ try:
286
+ labels = list(labels)
287
+ except TypeError: # non-iterable
288
+ labels = [labels]
289
+
290
+ labels = asarray_tuplesafe(labels, dtype=dtype)
291
+
292
+ return labels
293
+
294
+
295
+ def maybe_make_list(obj):
296
+ if obj is not None and not isinstance(obj, (tuple, list)):
297
+ return [obj]
298
+ return obj
299
+
300
+
301
+ def maybe_iterable_to_list(obj: Iterable[T] | T) -> Collection[T] | T:
302
+ """
303
+ If obj is Iterable but not list-like, consume into list.
304
+ """
305
+ if isinstance(obj, abc.Iterable) and not isinstance(obj, abc.Sized):
306
+ return list(obj)
307
+ obj = cast(Collection, obj)
308
+ return obj
309
+
310
+
311
+ def is_null_slice(obj) -> bool:
312
+ """
313
+ We have a null slice.
314
+ """
315
+ return (
316
+ isinstance(obj, slice)
317
+ and obj.start is None
318
+ and obj.stop is None
319
+ and obj.step is None
320
+ )
321
+
322
+
323
+ def is_empty_slice(obj) -> bool:
324
+ """
325
+ We have an empty slice, e.g. no values are selected.
326
+ """
327
+ return (
328
+ isinstance(obj, slice)
329
+ and obj.start is not None
330
+ and obj.stop is not None
331
+ and obj.start == obj.stop
332
+ )
333
+
334
+
335
+ def is_true_slices(line) -> list[bool]:
336
+ """
337
+ Find non-trivial slices in "line": return a list of booleans with same length.
338
+ """
339
+ return [isinstance(k, slice) and not is_null_slice(k) for k in line]
340
+
341
+
342
+ # TODO: used only once in indexing; belongs elsewhere?
343
+ def is_full_slice(obj, line: int) -> bool:
344
+ """
345
+ We have a full length slice.
346
+ """
347
+ return (
348
+ isinstance(obj, slice)
349
+ and obj.start == 0
350
+ and obj.stop == line
351
+ and obj.step is None
352
+ )
353
+
354
+
355
+ def get_callable_name(obj):
356
+ # typical case has name
357
+ if hasattr(obj, "__name__"):
358
+ return getattr(obj, "__name__")
359
+ # some objects don't; could recurse
360
+ if isinstance(obj, partial):
361
+ return get_callable_name(obj.func)
362
+ # fall back to class name
363
+ if callable(obj):
364
+ return type(obj).__name__
365
+ # everything failed (probably because the argument
366
+ # wasn't actually callable); we return None
367
+ # instead of the empty string in this case to allow
368
+ # distinguishing between no name and a name of ''
369
+ return None
370
+
371
+
372
+ def apply_if_callable(maybe_callable, obj, **kwargs):
373
+ """
374
+ Evaluate possibly callable input using obj and kwargs if it is callable,
375
+ otherwise return as it is.
376
+
377
+ Parameters
378
+ ----------
379
+ maybe_callable : possibly a callable
380
+ obj : NDFrame
381
+ **kwargs
382
+ """
383
+ if callable(maybe_callable):
384
+ return maybe_callable(obj, **kwargs)
385
+
386
+ return maybe_callable
387
+
388
+
389
+ def standardize_mapping(into):
390
+ """
391
+ Helper function to standardize a supplied mapping.
392
+
393
+ Parameters
394
+ ----------
395
+ into : instance or subclass of collections.abc.Mapping
396
+ Must be a class, an initialized collections.defaultdict,
397
+ or an instance of a collections.abc.Mapping subclass.
398
+
399
+ Returns
400
+ -------
401
+ mapping : a collections.abc.Mapping subclass or other constructor
402
+ a callable object that can accept an iterator to create
403
+ the desired Mapping.
404
+
405
+ See Also
406
+ --------
407
+ DataFrame.to_dict
408
+ Series.to_dict
409
+ """
410
+ if not inspect.isclass(into):
411
+ if isinstance(into, defaultdict):
412
+ return partial(defaultdict, into.default_factory)
413
+ into = type(into)
414
+ if not issubclass(into, abc.Mapping):
415
+ raise TypeError(f"unsupported type: {into}")
416
+ if into == defaultdict:
417
+ raise TypeError("to_dict() only accepts initialized defaultdicts")
418
+ return into
419
+
420
+
421
+ @overload
422
+ def random_state(state: np.random.Generator) -> np.random.Generator:
423
+ ...
424
+
425
+
426
+ @overload
427
+ def random_state(
428
+ state: int | np.ndarray | np.random.BitGenerator | np.random.RandomState | None,
429
+ ) -> np.random.RandomState:
430
+ ...
431
+
432
+
433
+ def random_state(state: RandomState | None = None):
434
+ """
435
+ Helper function for processing random_state arguments.
436
+
437
+ Parameters
438
+ ----------
439
+ state : int, array-like, BitGenerator, Generator, np.random.RandomState, None.
440
+ If receives an int, array-like, or BitGenerator, passes to
441
+ np.random.RandomState() as seed.
442
+ If receives an np.random RandomState or Generator, just returns that unchanged.
443
+ If receives `None`, returns np.random.
444
+ If receives anything else, raises an informative ValueError.
445
+
446
+ Default None.
447
+
448
+ Returns
449
+ -------
450
+ np.random.RandomState or np.random.Generator. If state is None, returns np.random
451
+
452
+ """
453
+ if is_integer(state) or isinstance(state, (np.ndarray, np.random.BitGenerator)):
454
+ return np.random.RandomState(state)
455
+ elif isinstance(state, np.random.RandomState):
456
+ return state
457
+ elif isinstance(state, np.random.Generator):
458
+ return state
459
+ elif state is None:
460
+ return np.random
461
+ else:
462
+ raise ValueError(
463
+ "random_state must be an integer, array-like, a BitGenerator, Generator, "
464
+ "a numpy RandomState, or None"
465
+ )
466
+
467
+
468
+ def pipe(
469
+ obj, func: Callable[..., T] | tuple[Callable[..., T], str], *args, **kwargs
470
+ ) -> T:
471
+ """
472
+ Apply a function ``func`` to object ``obj`` either by passing obj as the
473
+ first argument to the function or, in the case that the func is a tuple,
474
+ interpret the first element of the tuple as a function and pass the obj to
475
+ that function as a keyword argument whose key is the value of the second
476
+ element of the tuple.
477
+
478
+ Parameters
479
+ ----------
480
+ func : callable or tuple of (callable, str)
481
+ Function to apply to this object or, alternatively, a
482
+ ``(callable, data_keyword)`` tuple where ``data_keyword`` is a
483
+ string indicating the keyword of ``callable`` that expects the
484
+ object.
485
+ *args : iterable, optional
486
+ Positional arguments passed into ``func``.
487
+ **kwargs : dict, optional
488
+ A dictionary of keyword arguments passed into ``func``.
489
+
490
+ Returns
491
+ -------
492
+ object : the return type of ``func``.
493
+ """
494
+ if isinstance(func, tuple):
495
+ func, target = func
496
+ if target in kwargs:
497
+ msg = f"{target} is both the pipe target and a keyword argument"
498
+ raise ValueError(msg)
499
+ kwargs[target] = obj
500
+ return func(*args, **kwargs)
501
+ else:
502
+ return func(obj, *args, **kwargs)
503
+
504
+
505
+ def get_rename_function(mapper):
506
+ """
507
+ Returns a function that will map names/labels, dependent if mapper
508
+ is a dict, Series or just a function.
509
+ """
510
+
511
+ def f(x):
512
+ if x in mapper:
513
+ return mapper[x]
514
+ else:
515
+ return x
516
+
517
+ return f if isinstance(mapper, (abc.Mapping, ABCSeries)) else mapper
518
+
519
+
520
+ def convert_to_list_like(
521
+ values: Hashable | Iterable | AnyArrayLike,
522
+ ) -> list | AnyArrayLike:
523
+ """
524
+ Convert list-like or scalar input to list-like. List, numpy and pandas array-like
525
+ inputs are returned unmodified whereas others are converted to list.
526
+ """
527
+ if isinstance(values, (list, np.ndarray, ABCIndex, ABCSeries, ABCExtensionArray)):
528
+ return values
529
+ elif isinstance(values, abc.Iterable) and not isinstance(values, str):
530
+ return list(values)
531
+
532
+ return [values]
533
+
534
+
535
+ @contextlib.contextmanager
536
+ def temp_setattr(
537
+ obj, attr: str, value, condition: bool = True
538
+ ) -> Generator[None, None, None]:
539
+ """
540
+ Temporarily set attribute on an object.
541
+
542
+ Parameters
543
+ ----------
544
+ obj : object
545
+ Object whose attribute will be modified.
546
+ attr : str
547
+ Attribute to modify.
548
+ value : Any
549
+ Value to temporarily set attribute to.
550
+ condition : bool, default True
551
+ Whether to set the attribute. Provided in order to not have to
552
+ conditionally use this context manager.
553
+
554
+ Yields
555
+ ------
556
+ object : obj with modified attribute.
557
+ """
558
+ if condition:
559
+ old_value = getattr(obj, attr)
560
+ setattr(obj, attr, value)
561
+ try:
562
+ yield obj
563
+ finally:
564
+ if condition:
565
+ setattr(obj, attr, old_value)
566
+
567
+
568
+ def require_length_match(data, index: Index) -> None:
569
+ """
570
+ Check the length of data matches the length of the index.
571
+ """
572
+ if len(data) != len(index):
573
+ raise ValueError(
574
+ "Length of values "
575
+ f"({len(data)}) "
576
+ "does not match length of index "
577
+ f"({len(index)})"
578
+ )
579
+
580
+
581
+ # the ufuncs np.maximum.reduce and np.minimum.reduce default to axis=0,
582
+ # whereas np.min and np.max (which directly call obj.min and obj.max)
583
+ # default to axis=None.
584
+ _builtin_table = {
585
+ builtins.sum: np.sum,
586
+ builtins.max: np.maximum.reduce,
587
+ builtins.min: np.minimum.reduce,
588
+ }
589
+
590
+ # GH#53425: Only for deprecation
591
+ _builtin_table_alias = {
592
+ builtins.sum: "np.sum",
593
+ builtins.max: "np.maximum.reduce",
594
+ builtins.min: "np.minimum.reduce",
595
+ }
596
+
597
+ _cython_table = {
598
+ builtins.sum: "sum",
599
+ builtins.max: "max",
600
+ builtins.min: "min",
601
+ np.all: "all",
602
+ np.any: "any",
603
+ np.sum: "sum",
604
+ np.nansum: "sum",
605
+ np.mean: "mean",
606
+ np.nanmean: "mean",
607
+ np.prod: "prod",
608
+ np.nanprod: "prod",
609
+ np.std: "std",
610
+ np.nanstd: "std",
611
+ np.var: "var",
612
+ np.nanvar: "var",
613
+ np.median: "median",
614
+ np.nanmedian: "median",
615
+ np.max: "max",
616
+ np.nanmax: "max",
617
+ np.min: "min",
618
+ np.nanmin: "min",
619
+ np.cumprod: "cumprod",
620
+ np.nancumprod: "cumprod",
621
+ np.cumsum: "cumsum",
622
+ np.nancumsum: "cumsum",
623
+ }
624
+
625
+
626
+ def get_cython_func(arg: Callable) -> str | None:
627
+ """
628
+ if we define an internal function for this argument, return it
629
+ """
630
+ return _cython_table.get(arg)
631
+
632
+
633
+ def is_builtin_func(arg):
634
+ """
635
+ if we define a builtin function for this argument, return it,
636
+ otherwise return the arg
637
+ """
638
+ return _builtin_table.get(arg, arg)
639
+
640
+
641
+ def fill_missing_names(names: Sequence[Hashable | None]) -> list[Hashable]:
642
+ """
643
+ If a name is missing then replace it by level_n, where n is the count
644
+
645
+ .. versionadded:: 1.4.0
646
+
647
+ Parameters
648
+ ----------
649
+ names : list-like
650
+ list of column names or None values.
651
+
652
+ Returns
653
+ -------
654
+ list
655
+ list of column names with the None values replaced.
656
+ """
657
+ return [f"level_{i}" if name is None else name for i, name in enumerate(names)]
venv/lib/python3.10/site-packages/pandas/core/config_init.py ADDED
@@ -0,0 +1,924 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ This module is imported from the pandas package __init__.py file
3
+ in order to ensure that the core.config options registered here will
4
+ be available as soon as the user loads the package. if register_option
5
+ is invoked inside specific modules, they will not be registered until that
6
+ module is imported, which may or may not be a problem.
7
+
8
+ If you need to make sure options are available even before a certain
9
+ module is imported, register them here rather than in the module.
10
+
11
+ """
12
+ from __future__ import annotations
13
+
14
+ import os
15
+ from typing import Callable
16
+
17
+ import pandas._config.config as cf
18
+ from pandas._config.config import (
19
+ is_bool,
20
+ is_callable,
21
+ is_instance_factory,
22
+ is_int,
23
+ is_nonnegative_int,
24
+ is_one_of_factory,
25
+ is_str,
26
+ is_text,
27
+ )
28
+
29
+ # compute
30
+
31
+ use_bottleneck_doc = """
32
+ : bool
33
+ Use the bottleneck library to accelerate if it is installed,
34
+ the default is True
35
+ Valid values: False,True
36
+ """
37
+
38
+
39
+ def use_bottleneck_cb(key) -> None:
40
+ from pandas.core import nanops
41
+
42
+ nanops.set_use_bottleneck(cf.get_option(key))
43
+
44
+
45
+ use_numexpr_doc = """
46
+ : bool
47
+ Use the numexpr library to accelerate computation if it is installed,
48
+ the default is True
49
+ Valid values: False,True
50
+ """
51
+
52
+
53
+ def use_numexpr_cb(key) -> None:
54
+ from pandas.core.computation import expressions
55
+
56
+ expressions.set_use_numexpr(cf.get_option(key))
57
+
58
+
59
+ use_numba_doc = """
60
+ : bool
61
+ Use the numba engine option for select operations if it is installed,
62
+ the default is False
63
+ Valid values: False,True
64
+ """
65
+
66
+
67
+ def use_numba_cb(key) -> None:
68
+ from pandas.core.util import numba_
69
+
70
+ numba_.set_use_numba(cf.get_option(key))
71
+
72
+
73
+ with cf.config_prefix("compute"):
74
+ cf.register_option(
75
+ "use_bottleneck",
76
+ True,
77
+ use_bottleneck_doc,
78
+ validator=is_bool,
79
+ cb=use_bottleneck_cb,
80
+ )
81
+ cf.register_option(
82
+ "use_numexpr", True, use_numexpr_doc, validator=is_bool, cb=use_numexpr_cb
83
+ )
84
+ cf.register_option(
85
+ "use_numba", False, use_numba_doc, validator=is_bool, cb=use_numba_cb
86
+ )
87
+ #
88
+ # options from the "display" namespace
89
+
90
+ pc_precision_doc = """
91
+ : int
92
+ Floating point output precision in terms of number of places after the
93
+ decimal, for regular formatting as well as scientific notation. Similar
94
+ to ``precision`` in :meth:`numpy.set_printoptions`.
95
+ """
96
+
97
+ pc_colspace_doc = """
98
+ : int
99
+ Default space for DataFrame columns.
100
+ """
101
+
102
+ pc_max_rows_doc = """
103
+ : int
104
+ If max_rows is exceeded, switch to truncate view. Depending on
105
+ `large_repr`, objects are either centrally truncated or printed as
106
+ a summary view. 'None' value means unlimited.
107
+
108
+ In case python/IPython is running in a terminal and `large_repr`
109
+ equals 'truncate' this can be set to 0 and pandas will auto-detect
110
+ the height of the terminal and print a truncated object which fits
111
+ the screen height. The IPython notebook, IPython qtconsole, or
112
+ IDLE do not run in a terminal and hence it is not possible to do
113
+ correct auto-detection.
114
+ """
115
+
116
+ pc_min_rows_doc = """
117
+ : int
118
+ The numbers of rows to show in a truncated view (when `max_rows` is
119
+ exceeded). Ignored when `max_rows` is set to None or 0. When set to
120
+ None, follows the value of `max_rows`.
121
+ """
122
+
123
+ pc_max_cols_doc = """
124
+ : int
125
+ If max_cols is exceeded, switch to truncate view. Depending on
126
+ `large_repr`, objects are either centrally truncated or printed as
127
+ a summary view. 'None' value means unlimited.
128
+
129
+ In case python/IPython is running in a terminal and `large_repr`
130
+ equals 'truncate' this can be set to 0 or None and pandas will auto-detect
131
+ the width of the terminal and print a truncated object which fits
132
+ the screen width. The IPython notebook, IPython qtconsole, or IDLE
133
+ do not run in a terminal and hence it is not possible to do
134
+ correct auto-detection and defaults to 20.
135
+ """
136
+
137
+ pc_max_categories_doc = """
138
+ : int
139
+ This sets the maximum number of categories pandas should output when
140
+ printing out a `Categorical` or a Series of dtype "category".
141
+ """
142
+
143
+ pc_max_info_cols_doc = """
144
+ : int
145
+ max_info_columns is used in DataFrame.info method to decide if
146
+ per column information will be printed.
147
+ """
148
+
149
+ pc_nb_repr_h_doc = """
150
+ : boolean
151
+ When True, IPython notebook will use html representation for
152
+ pandas objects (if it is available).
153
+ """
154
+
155
+ pc_pprint_nest_depth = """
156
+ : int
157
+ Controls the number of nested levels to process when pretty-printing
158
+ """
159
+
160
+ pc_multi_sparse_doc = """
161
+ : boolean
162
+ "sparsify" MultiIndex display (don't display repeated
163
+ elements in outer levels within groups)
164
+ """
165
+
166
+ float_format_doc = """
167
+ : callable
168
+ The callable should accept a floating point number and return
169
+ a string with the desired format of the number. This is used
170
+ in some places like SeriesFormatter.
171
+ See formats.format.EngFormatter for an example.
172
+ """
173
+
174
+ max_colwidth_doc = """
175
+ : int or None
176
+ The maximum width in characters of a column in the repr of
177
+ a pandas data structure. When the column overflows, a "..."
178
+ placeholder is embedded in the output. A 'None' value means unlimited.
179
+ """
180
+
181
+ colheader_justify_doc = """
182
+ : 'left'/'right'
183
+ Controls the justification of column headers. used by DataFrameFormatter.
184
+ """
185
+
186
+ pc_expand_repr_doc = """
187
+ : boolean
188
+ Whether to print out the full DataFrame repr for wide DataFrames across
189
+ multiple lines, `max_columns` is still respected, but the output will
190
+ wrap-around across multiple "pages" if its width exceeds `display.width`.
191
+ """
192
+
193
+ pc_show_dimensions_doc = """
194
+ : boolean or 'truncate'
195
+ Whether to print out dimensions at the end of DataFrame repr.
196
+ If 'truncate' is specified, only print out the dimensions if the
197
+ frame is truncated (e.g. not display all rows and/or columns)
198
+ """
199
+
200
+ pc_east_asian_width_doc = """
201
+ : boolean
202
+ Whether to use the Unicode East Asian Width to calculate the display text
203
+ width.
204
+ Enabling this may affect to the performance (default: False)
205
+ """
206
+
207
+ pc_ambiguous_as_wide_doc = """
208
+ : boolean
209
+ Whether to handle Unicode characters belong to Ambiguous as Wide (width=2)
210
+ (default: False)
211
+ """
212
+
213
+ pc_table_schema_doc = """
214
+ : boolean
215
+ Whether to publish a Table Schema representation for frontends
216
+ that support it.
217
+ (default: False)
218
+ """
219
+
220
+ pc_html_border_doc = """
221
+ : int
222
+ A ``border=value`` attribute is inserted in the ``<table>`` tag
223
+ for the DataFrame HTML repr.
224
+ """
225
+
226
+ pc_html_use_mathjax_doc = """\
227
+ : boolean
228
+ When True, Jupyter notebook will process table contents using MathJax,
229
+ rendering mathematical expressions enclosed by the dollar symbol.
230
+ (default: True)
231
+ """
232
+
233
+ pc_max_dir_items = """\
234
+ : int
235
+ The number of items that will be added to `dir(...)`. 'None' value means
236
+ unlimited. Because dir is cached, changing this option will not immediately
237
+ affect already existing dataframes until a column is deleted or added.
238
+
239
+ This is for instance used to suggest columns from a dataframe to tab
240
+ completion.
241
+ """
242
+
243
+ pc_width_doc = """
244
+ : int
245
+ Width of the display in characters. In case python/IPython is running in
246
+ a terminal this can be set to None and pandas will correctly auto-detect
247
+ the width.
248
+ Note that the IPython notebook, IPython qtconsole, or IDLE do not run in a
249
+ terminal and hence it is not possible to correctly detect the width.
250
+ """
251
+
252
+ pc_chop_threshold_doc = """
253
+ : float or None
254
+ if set to a float value, all float values smaller than the given threshold
255
+ will be displayed as exactly 0 by repr and friends.
256
+ """
257
+
258
+ pc_max_seq_items = """
259
+ : int or None
260
+ When pretty-printing a long sequence, no more then `max_seq_items`
261
+ will be printed. If items are omitted, they will be denoted by the
262
+ addition of "..." to the resulting string.
263
+
264
+ If set to None, the number of items to be printed is unlimited.
265
+ """
266
+
267
+ pc_max_info_rows_doc = """
268
+ : int
269
+ df.info() will usually show null-counts for each column.
270
+ For large frames this can be quite slow. max_info_rows and max_info_cols
271
+ limit this null check only to frames with smaller dimensions than
272
+ specified.
273
+ """
274
+
275
+ pc_large_repr_doc = """
276
+ : 'truncate'/'info'
277
+ For DataFrames exceeding max_rows/max_cols, the repr (and HTML repr) can
278
+ show a truncated table, or switch to the view from
279
+ df.info() (the behaviour in earlier versions of pandas).
280
+ """
281
+
282
+ pc_memory_usage_doc = """
283
+ : bool, string or None
284
+ This specifies if the memory usage of a DataFrame should be displayed when
285
+ df.info() is called. Valid values True,False,'deep'
286
+ """
287
+
288
+
289
+ def table_schema_cb(key) -> None:
290
+ from pandas.io.formats.printing import enable_data_resource_formatter
291
+
292
+ enable_data_resource_formatter(cf.get_option(key))
293
+
294
+
295
+ def is_terminal() -> bool:
296
+ """
297
+ Detect if Python is running in a terminal.
298
+
299
+ Returns True if Python is running in a terminal or False if not.
300
+ """
301
+ try:
302
+ # error: Name 'get_ipython' is not defined
303
+ ip = get_ipython() # type: ignore[name-defined]
304
+ except NameError: # assume standard Python interpreter in a terminal
305
+ return True
306
+ else:
307
+ if hasattr(ip, "kernel"): # IPython as a Jupyter kernel
308
+ return False
309
+ else: # IPython in a terminal
310
+ return True
311
+
312
+
313
+ with cf.config_prefix("display"):
314
+ cf.register_option("precision", 6, pc_precision_doc, validator=is_nonnegative_int)
315
+ cf.register_option(
316
+ "float_format",
317
+ None,
318
+ float_format_doc,
319
+ validator=is_one_of_factory([None, is_callable]),
320
+ )
321
+ cf.register_option(
322
+ "max_info_rows",
323
+ 1690785,
324
+ pc_max_info_rows_doc,
325
+ validator=is_int,
326
+ )
327
+ cf.register_option("max_rows", 60, pc_max_rows_doc, validator=is_nonnegative_int)
328
+ cf.register_option(
329
+ "min_rows",
330
+ 10,
331
+ pc_min_rows_doc,
332
+ validator=is_instance_factory([type(None), int]),
333
+ )
334
+ cf.register_option("max_categories", 8, pc_max_categories_doc, validator=is_int)
335
+
336
+ cf.register_option(
337
+ "max_colwidth",
338
+ 50,
339
+ max_colwidth_doc,
340
+ validator=is_nonnegative_int,
341
+ )
342
+ if is_terminal():
343
+ max_cols = 0 # automatically determine optimal number of columns
344
+ else:
345
+ max_cols = 20 # cannot determine optimal number of columns
346
+ cf.register_option(
347
+ "max_columns", max_cols, pc_max_cols_doc, validator=is_nonnegative_int
348
+ )
349
+ cf.register_option(
350
+ "large_repr",
351
+ "truncate",
352
+ pc_large_repr_doc,
353
+ validator=is_one_of_factory(["truncate", "info"]),
354
+ )
355
+ cf.register_option("max_info_columns", 100, pc_max_info_cols_doc, validator=is_int)
356
+ cf.register_option(
357
+ "colheader_justify", "right", colheader_justify_doc, validator=is_text
358
+ )
359
+ cf.register_option("notebook_repr_html", True, pc_nb_repr_h_doc, validator=is_bool)
360
+ cf.register_option("pprint_nest_depth", 3, pc_pprint_nest_depth, validator=is_int)
361
+ cf.register_option("multi_sparse", True, pc_multi_sparse_doc, validator=is_bool)
362
+ cf.register_option("expand_frame_repr", True, pc_expand_repr_doc)
363
+ cf.register_option(
364
+ "show_dimensions",
365
+ "truncate",
366
+ pc_show_dimensions_doc,
367
+ validator=is_one_of_factory([True, False, "truncate"]),
368
+ )
369
+ cf.register_option("chop_threshold", None, pc_chop_threshold_doc)
370
+ cf.register_option("max_seq_items", 100, pc_max_seq_items)
371
+ cf.register_option(
372
+ "width", 80, pc_width_doc, validator=is_instance_factory([type(None), int])
373
+ )
374
+ cf.register_option(
375
+ "memory_usage",
376
+ True,
377
+ pc_memory_usage_doc,
378
+ validator=is_one_of_factory([None, True, False, "deep"]),
379
+ )
380
+ cf.register_option(
381
+ "unicode.east_asian_width", False, pc_east_asian_width_doc, validator=is_bool
382
+ )
383
+ cf.register_option(
384
+ "unicode.ambiguous_as_wide", False, pc_east_asian_width_doc, validator=is_bool
385
+ )
386
+ cf.register_option(
387
+ "html.table_schema",
388
+ False,
389
+ pc_table_schema_doc,
390
+ validator=is_bool,
391
+ cb=table_schema_cb,
392
+ )
393
+ cf.register_option("html.border", 1, pc_html_border_doc, validator=is_int)
394
+ cf.register_option(
395
+ "html.use_mathjax", True, pc_html_use_mathjax_doc, validator=is_bool
396
+ )
397
+ cf.register_option(
398
+ "max_dir_items", 100, pc_max_dir_items, validator=is_nonnegative_int
399
+ )
400
+
401
+ tc_sim_interactive_doc = """
402
+ : boolean
403
+ Whether to simulate interactive mode for purposes of testing
404
+ """
405
+
406
+ with cf.config_prefix("mode"):
407
+ cf.register_option("sim_interactive", False, tc_sim_interactive_doc)
408
+
409
+ use_inf_as_na_doc = """
410
+ : boolean
411
+ True means treat None, NaN, INF, -INF as NA (old way),
412
+ False means None and NaN are null, but INF, -INF are not NA
413
+ (new way).
414
+
415
+ This option is deprecated in pandas 2.1.0 and will be removed in 3.0.
416
+ """
417
+
418
+ # We don't want to start importing everything at the global context level
419
+ # or we'll hit circular deps.
420
+
421
+
422
+ def use_inf_as_na_cb(key) -> None:
423
+ # TODO(3.0): enforcing this deprecation will close GH#52501
424
+ from pandas.core.dtypes.missing import _use_inf_as_na
425
+
426
+ _use_inf_as_na(key)
427
+
428
+
429
+ with cf.config_prefix("mode"):
430
+ cf.register_option("use_inf_as_na", False, use_inf_as_na_doc, cb=use_inf_as_na_cb)
431
+
432
+ cf.deprecate_option(
433
+ # GH#51684
434
+ "mode.use_inf_as_na",
435
+ "use_inf_as_na option is deprecated and will be removed in a future "
436
+ "version. Convert inf values to NaN before operating instead.",
437
+ )
438
+
439
+ data_manager_doc = """
440
+ : string
441
+ Internal data manager type; can be "block" or "array". Defaults to "block",
442
+ unless overridden by the 'PANDAS_DATA_MANAGER' environment variable (needs
443
+ to be set before pandas is imported).
444
+ """
445
+
446
+
447
+ with cf.config_prefix("mode"):
448
+ cf.register_option(
449
+ "data_manager",
450
+ # Get the default from an environment variable, if set, otherwise defaults
451
+ # to "block". This environment variable can be set for testing.
452
+ os.environ.get("PANDAS_DATA_MANAGER", "block"),
453
+ data_manager_doc,
454
+ validator=is_one_of_factory(["block", "array"]),
455
+ )
456
+
457
+ cf.deprecate_option(
458
+ # GH#55043
459
+ "mode.data_manager",
460
+ "data_manager option is deprecated and will be removed in a future "
461
+ "version. Only the BlockManager will be available.",
462
+ )
463
+
464
+
465
+ # TODO better name?
466
+ copy_on_write_doc = """
467
+ : bool
468
+ Use new copy-view behaviour using Copy-on-Write. Defaults to False,
469
+ unless overridden by the 'PANDAS_COPY_ON_WRITE' environment variable
470
+ (if set to "1" for True, needs to be set before pandas is imported).
471
+ """
472
+
473
+
474
+ with cf.config_prefix("mode"):
475
+ cf.register_option(
476
+ "copy_on_write",
477
+ # Get the default from an environment variable, if set, otherwise defaults
478
+ # to False. This environment variable can be set for testing.
479
+ "warn"
480
+ if os.environ.get("PANDAS_COPY_ON_WRITE", "0") == "warn"
481
+ else os.environ.get("PANDAS_COPY_ON_WRITE", "0") == "1",
482
+ copy_on_write_doc,
483
+ validator=is_one_of_factory([True, False, "warn"]),
484
+ )
485
+
486
+
487
+ # user warnings
488
+ chained_assignment = """
489
+ : string
490
+ Raise an exception, warn, or no action if trying to use chained assignment,
491
+ The default is warn
492
+ """
493
+
494
+ with cf.config_prefix("mode"):
495
+ cf.register_option(
496
+ "chained_assignment",
497
+ "warn",
498
+ chained_assignment,
499
+ validator=is_one_of_factory([None, "warn", "raise"]),
500
+ )
501
+
502
+
503
+ string_storage_doc = """
504
+ : string
505
+ The default storage for StringDtype. This option is ignored if
506
+ ``future.infer_string`` is set to True.
507
+ """
508
+
509
+ with cf.config_prefix("mode"):
510
+ cf.register_option(
511
+ "string_storage",
512
+ "python",
513
+ string_storage_doc,
514
+ validator=is_one_of_factory(["python", "pyarrow", "pyarrow_numpy"]),
515
+ )
516
+
517
+
518
+ # Set up the io.excel specific reader configuration.
519
+ reader_engine_doc = """
520
+ : string
521
+ The default Excel reader engine for '{ext}' files. Available options:
522
+ auto, {others}.
523
+ """
524
+
525
+ _xls_options = ["xlrd", "calamine"]
526
+ _xlsm_options = ["xlrd", "openpyxl", "calamine"]
527
+ _xlsx_options = ["xlrd", "openpyxl", "calamine"]
528
+ _ods_options = ["odf", "calamine"]
529
+ _xlsb_options = ["pyxlsb", "calamine"]
530
+
531
+
532
+ with cf.config_prefix("io.excel.xls"):
533
+ cf.register_option(
534
+ "reader",
535
+ "auto",
536
+ reader_engine_doc.format(ext="xls", others=", ".join(_xls_options)),
537
+ validator=is_one_of_factory(_xls_options + ["auto"]),
538
+ )
539
+
540
+ with cf.config_prefix("io.excel.xlsm"):
541
+ cf.register_option(
542
+ "reader",
543
+ "auto",
544
+ reader_engine_doc.format(ext="xlsm", others=", ".join(_xlsm_options)),
545
+ validator=is_one_of_factory(_xlsm_options + ["auto"]),
546
+ )
547
+
548
+
549
+ with cf.config_prefix("io.excel.xlsx"):
550
+ cf.register_option(
551
+ "reader",
552
+ "auto",
553
+ reader_engine_doc.format(ext="xlsx", others=", ".join(_xlsx_options)),
554
+ validator=is_one_of_factory(_xlsx_options + ["auto"]),
555
+ )
556
+
557
+
558
+ with cf.config_prefix("io.excel.ods"):
559
+ cf.register_option(
560
+ "reader",
561
+ "auto",
562
+ reader_engine_doc.format(ext="ods", others=", ".join(_ods_options)),
563
+ validator=is_one_of_factory(_ods_options + ["auto"]),
564
+ )
565
+
566
+ with cf.config_prefix("io.excel.xlsb"):
567
+ cf.register_option(
568
+ "reader",
569
+ "auto",
570
+ reader_engine_doc.format(ext="xlsb", others=", ".join(_xlsb_options)),
571
+ validator=is_one_of_factory(_xlsb_options + ["auto"]),
572
+ )
573
+
574
+ # Set up the io.excel specific writer configuration.
575
+ writer_engine_doc = """
576
+ : string
577
+ The default Excel writer engine for '{ext}' files. Available options:
578
+ auto, {others}.
579
+ """
580
+
581
+ _xlsm_options = ["openpyxl"]
582
+ _xlsx_options = ["openpyxl", "xlsxwriter"]
583
+ _ods_options = ["odf"]
584
+
585
+
586
+ with cf.config_prefix("io.excel.xlsm"):
587
+ cf.register_option(
588
+ "writer",
589
+ "auto",
590
+ writer_engine_doc.format(ext="xlsm", others=", ".join(_xlsm_options)),
591
+ validator=str,
592
+ )
593
+
594
+
595
+ with cf.config_prefix("io.excel.xlsx"):
596
+ cf.register_option(
597
+ "writer",
598
+ "auto",
599
+ writer_engine_doc.format(ext="xlsx", others=", ".join(_xlsx_options)),
600
+ validator=str,
601
+ )
602
+
603
+
604
+ with cf.config_prefix("io.excel.ods"):
605
+ cf.register_option(
606
+ "writer",
607
+ "auto",
608
+ writer_engine_doc.format(ext="ods", others=", ".join(_ods_options)),
609
+ validator=str,
610
+ )
611
+
612
+
613
+ # Set up the io.parquet specific configuration.
614
+ parquet_engine_doc = """
615
+ : string
616
+ The default parquet reader/writer engine. Available options:
617
+ 'auto', 'pyarrow', 'fastparquet', the default is 'auto'
618
+ """
619
+
620
+ with cf.config_prefix("io.parquet"):
621
+ cf.register_option(
622
+ "engine",
623
+ "auto",
624
+ parquet_engine_doc,
625
+ validator=is_one_of_factory(["auto", "pyarrow", "fastparquet"]),
626
+ )
627
+
628
+
629
+ # Set up the io.sql specific configuration.
630
+ sql_engine_doc = """
631
+ : string
632
+ The default sql reader/writer engine. Available options:
633
+ 'auto', 'sqlalchemy', the default is 'auto'
634
+ """
635
+
636
+ with cf.config_prefix("io.sql"):
637
+ cf.register_option(
638
+ "engine",
639
+ "auto",
640
+ sql_engine_doc,
641
+ validator=is_one_of_factory(["auto", "sqlalchemy"]),
642
+ )
643
+
644
+ # --------
645
+ # Plotting
646
+ # ---------
647
+
648
+ plotting_backend_doc = """
649
+ : str
650
+ The plotting backend to use. The default value is "matplotlib", the
651
+ backend provided with pandas. Other backends can be specified by
652
+ providing the name of the module that implements the backend.
653
+ """
654
+
655
+
656
+ def register_plotting_backend_cb(key) -> None:
657
+ if key == "matplotlib":
658
+ # We defer matplotlib validation, since it's the default
659
+ return
660
+ from pandas.plotting._core import _get_plot_backend
661
+
662
+ _get_plot_backend(key)
663
+
664
+
665
+ with cf.config_prefix("plotting"):
666
+ cf.register_option(
667
+ "backend",
668
+ defval="matplotlib",
669
+ doc=plotting_backend_doc,
670
+ validator=register_plotting_backend_cb,
671
+ )
672
+
673
+
674
+ register_converter_doc = """
675
+ : bool or 'auto'.
676
+ Whether to register converters with matplotlib's units registry for
677
+ dates, times, datetimes, and Periods. Toggling to False will remove
678
+ the converters, restoring any converters that pandas overwrote.
679
+ """
680
+
681
+
682
+ def register_converter_cb(key) -> None:
683
+ from pandas.plotting import (
684
+ deregister_matplotlib_converters,
685
+ register_matplotlib_converters,
686
+ )
687
+
688
+ if cf.get_option(key):
689
+ register_matplotlib_converters()
690
+ else:
691
+ deregister_matplotlib_converters()
692
+
693
+
694
+ with cf.config_prefix("plotting.matplotlib"):
695
+ cf.register_option(
696
+ "register_converters",
697
+ "auto",
698
+ register_converter_doc,
699
+ validator=is_one_of_factory(["auto", True, False]),
700
+ cb=register_converter_cb,
701
+ )
702
+
703
+ # ------
704
+ # Styler
705
+ # ------
706
+
707
+ styler_sparse_index_doc = """
708
+ : bool
709
+ Whether to sparsify the display of a hierarchical index. Setting to False will
710
+ display each explicit level element in a hierarchical key for each row.
711
+ """
712
+
713
+ styler_sparse_columns_doc = """
714
+ : bool
715
+ Whether to sparsify the display of hierarchical columns. Setting to False will
716
+ display each explicit level element in a hierarchical key for each column.
717
+ """
718
+
719
+ styler_render_repr = """
720
+ : str
721
+ Determine which output to use in Jupyter Notebook in {"html", "latex"}.
722
+ """
723
+
724
+ styler_max_elements = """
725
+ : int
726
+ The maximum number of data-cell (<td>) elements that will be rendered before
727
+ trimming will occur over columns, rows or both if needed.
728
+ """
729
+
730
+ styler_max_rows = """
731
+ : int, optional
732
+ The maximum number of rows that will be rendered. May still be reduced to
733
+ satisfy ``max_elements``, which takes precedence.
734
+ """
735
+
736
+ styler_max_columns = """
737
+ : int, optional
738
+ The maximum number of columns that will be rendered. May still be reduced to
739
+ satisfy ``max_elements``, which takes precedence.
740
+ """
741
+
742
+ styler_precision = """
743
+ : int
744
+ The precision for floats and complex numbers.
745
+ """
746
+
747
+ styler_decimal = """
748
+ : str
749
+ The character representation for the decimal separator for floats and complex.
750
+ """
751
+
752
+ styler_thousands = """
753
+ : str, optional
754
+ The character representation for thousands separator for floats, int and complex.
755
+ """
756
+
757
+ styler_na_rep = """
758
+ : str, optional
759
+ The string representation for values identified as missing.
760
+ """
761
+
762
+ styler_escape = """
763
+ : str, optional
764
+ Whether to escape certain characters according to the given context; html or latex.
765
+ """
766
+
767
+ styler_formatter = """
768
+ : str, callable, dict, optional
769
+ A formatter object to be used as default within ``Styler.format``.
770
+ """
771
+
772
+ styler_multirow_align = """
773
+ : {"c", "t", "b"}
774
+ The specifier for vertical alignment of sparsified LaTeX multirows.
775
+ """
776
+
777
+ styler_multicol_align = r"""
778
+ : {"r", "c", "l", "naive-l", "naive-r"}
779
+ The specifier for horizontal alignment of sparsified LaTeX multicolumns. Pipe
780
+ decorators can also be added to non-naive values to draw vertical
781
+ rules, e.g. "\|r" will draw a rule on the left side of right aligned merged cells.
782
+ """
783
+
784
+ styler_hrules = """
785
+ : bool
786
+ Whether to add horizontal rules on top and bottom and below the headers.
787
+ """
788
+
789
+ styler_environment = """
790
+ : str
791
+ The environment to replace ``\\begin{table}``. If "longtable" is used results
792
+ in a specific longtable environment format.
793
+ """
794
+
795
+ styler_encoding = """
796
+ : str
797
+ The encoding used for output HTML and LaTeX files.
798
+ """
799
+
800
+ styler_mathjax = """
801
+ : bool
802
+ If False will render special CSS classes to table attributes that indicate Mathjax
803
+ will not be used in Jupyter Notebook.
804
+ """
805
+
806
+ with cf.config_prefix("styler"):
807
+ cf.register_option("sparse.index", True, styler_sparse_index_doc, validator=is_bool)
808
+
809
+ cf.register_option(
810
+ "sparse.columns", True, styler_sparse_columns_doc, validator=is_bool
811
+ )
812
+
813
+ cf.register_option(
814
+ "render.repr",
815
+ "html",
816
+ styler_render_repr,
817
+ validator=is_one_of_factory(["html", "latex"]),
818
+ )
819
+
820
+ cf.register_option(
821
+ "render.max_elements",
822
+ 2**18,
823
+ styler_max_elements,
824
+ validator=is_nonnegative_int,
825
+ )
826
+
827
+ cf.register_option(
828
+ "render.max_rows",
829
+ None,
830
+ styler_max_rows,
831
+ validator=is_nonnegative_int,
832
+ )
833
+
834
+ cf.register_option(
835
+ "render.max_columns",
836
+ None,
837
+ styler_max_columns,
838
+ validator=is_nonnegative_int,
839
+ )
840
+
841
+ cf.register_option("render.encoding", "utf-8", styler_encoding, validator=is_str)
842
+
843
+ cf.register_option("format.decimal", ".", styler_decimal, validator=is_str)
844
+
845
+ cf.register_option(
846
+ "format.precision", 6, styler_precision, validator=is_nonnegative_int
847
+ )
848
+
849
+ cf.register_option(
850
+ "format.thousands",
851
+ None,
852
+ styler_thousands,
853
+ validator=is_instance_factory([type(None), str]),
854
+ )
855
+
856
+ cf.register_option(
857
+ "format.na_rep",
858
+ None,
859
+ styler_na_rep,
860
+ validator=is_instance_factory([type(None), str]),
861
+ )
862
+
863
+ cf.register_option(
864
+ "format.escape",
865
+ None,
866
+ styler_escape,
867
+ validator=is_one_of_factory([None, "html", "latex", "latex-math"]),
868
+ )
869
+
870
+ cf.register_option(
871
+ "format.formatter",
872
+ None,
873
+ styler_formatter,
874
+ validator=is_instance_factory([type(None), dict, Callable, str]),
875
+ )
876
+
877
+ cf.register_option("html.mathjax", True, styler_mathjax, validator=is_bool)
878
+
879
+ cf.register_option(
880
+ "latex.multirow_align",
881
+ "c",
882
+ styler_multirow_align,
883
+ validator=is_one_of_factory(["c", "t", "b", "naive"]),
884
+ )
885
+
886
+ val_mca = ["r", "|r|", "|r", "r|", "c", "|c|", "|c", "c|", "l", "|l|", "|l", "l|"]
887
+ val_mca += ["naive-l", "naive-r"]
888
+ cf.register_option(
889
+ "latex.multicol_align",
890
+ "r",
891
+ styler_multicol_align,
892
+ validator=is_one_of_factory(val_mca),
893
+ )
894
+
895
+ cf.register_option("latex.hrules", False, styler_hrules, validator=is_bool)
896
+
897
+ cf.register_option(
898
+ "latex.environment",
899
+ None,
900
+ styler_environment,
901
+ validator=is_instance_factory([type(None), str]),
902
+ )
903
+
904
+
905
+ with cf.config_prefix("future"):
906
+ cf.register_option(
907
+ "infer_string",
908
+ False,
909
+ "Whether to infer sequence of str objects as pyarrow string "
910
+ "dtype, which will be the default in pandas 3.0 "
911
+ "(at which point this option will be deprecated).",
912
+ validator=is_one_of_factory([True, False]),
913
+ )
914
+
915
+ cf.register_option(
916
+ "no_silent_downcasting",
917
+ False,
918
+ "Whether to opt-in to the future behavior which will *not* silently "
919
+ "downcast results from Series and DataFrame `where`, `mask`, and `clip` "
920
+ "methods. "
921
+ "Silent downcasting will be removed in pandas 3.0 "
922
+ "(at which point this option will be deprecated).",
923
+ validator=is_one_of_factory([True, False]),
924
+ )
venv/lib/python3.10/site-packages/pandas/core/construction.py ADDED
@@ -0,0 +1,824 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ Constructor functions intended to be shared by pd.array, Series.__init__,
3
+ and Index.__new__.
4
+
5
+ These should not depend on core.internals.
6
+ """
7
+ from __future__ import annotations
8
+
9
+ from collections.abc import Sequence
10
+ from typing import (
11
+ TYPE_CHECKING,
12
+ Optional,
13
+ Union,
14
+ cast,
15
+ overload,
16
+ )
17
+ import warnings
18
+
19
+ import numpy as np
20
+ from numpy import ma
21
+
22
+ from pandas._config import using_pyarrow_string_dtype
23
+
24
+ from pandas._libs import lib
25
+ from pandas._libs.tslibs import (
26
+ Period,
27
+ get_supported_dtype,
28
+ is_supported_dtype,
29
+ )
30
+ from pandas._typing import (
31
+ AnyArrayLike,
32
+ ArrayLike,
33
+ Dtype,
34
+ DtypeObj,
35
+ T,
36
+ )
37
+ from pandas.util._exceptions import find_stack_level
38
+
39
+ from pandas.core.dtypes.base import ExtensionDtype
40
+ from pandas.core.dtypes.cast import (
41
+ construct_1d_arraylike_from_scalar,
42
+ construct_1d_object_array_from_listlike,
43
+ maybe_cast_to_datetime,
44
+ maybe_cast_to_integer_array,
45
+ maybe_convert_platform,
46
+ maybe_infer_to_datetimelike,
47
+ maybe_promote,
48
+ )
49
+ from pandas.core.dtypes.common import (
50
+ is_list_like,
51
+ is_object_dtype,
52
+ is_string_dtype,
53
+ pandas_dtype,
54
+ )
55
+ from pandas.core.dtypes.dtypes import NumpyEADtype
56
+ from pandas.core.dtypes.generic import (
57
+ ABCDataFrame,
58
+ ABCExtensionArray,
59
+ ABCIndex,
60
+ ABCSeries,
61
+ )
62
+ from pandas.core.dtypes.missing import isna
63
+
64
+ import pandas.core.common as com
65
+
66
+ if TYPE_CHECKING:
67
+ from pandas import (
68
+ Index,
69
+ Series,
70
+ )
71
+ from pandas.core.arrays.base import ExtensionArray
72
+
73
+
74
+ def array(
75
+ data: Sequence[object] | AnyArrayLike,
76
+ dtype: Dtype | None = None,
77
+ copy: bool = True,
78
+ ) -> ExtensionArray:
79
+ """
80
+ Create an array.
81
+
82
+ Parameters
83
+ ----------
84
+ data : Sequence of objects
85
+ The scalars inside `data` should be instances of the
86
+ scalar type for `dtype`. It's expected that `data`
87
+ represents a 1-dimensional array of data.
88
+
89
+ When `data` is an Index or Series, the underlying array
90
+ will be extracted from `data`.
91
+
92
+ dtype : str, np.dtype, or ExtensionDtype, optional
93
+ The dtype to use for the array. This may be a NumPy
94
+ dtype or an extension type registered with pandas using
95
+ :meth:`pandas.api.extensions.register_extension_dtype`.
96
+
97
+ If not specified, there are two possibilities:
98
+
99
+ 1. When `data` is a :class:`Series`, :class:`Index`, or
100
+ :class:`ExtensionArray`, the `dtype` will be taken
101
+ from the data.
102
+ 2. Otherwise, pandas will attempt to infer the `dtype`
103
+ from the data.
104
+
105
+ Note that when `data` is a NumPy array, ``data.dtype`` is
106
+ *not* used for inferring the array type. This is because
107
+ NumPy cannot represent all the types of data that can be
108
+ held in extension arrays.
109
+
110
+ Currently, pandas will infer an extension dtype for sequences of
111
+
112
+ ============================== =======================================
113
+ Scalar Type Array Type
114
+ ============================== =======================================
115
+ :class:`pandas.Interval` :class:`pandas.arrays.IntervalArray`
116
+ :class:`pandas.Period` :class:`pandas.arrays.PeriodArray`
117
+ :class:`datetime.datetime` :class:`pandas.arrays.DatetimeArray`
118
+ :class:`datetime.timedelta` :class:`pandas.arrays.TimedeltaArray`
119
+ :class:`int` :class:`pandas.arrays.IntegerArray`
120
+ :class:`float` :class:`pandas.arrays.FloatingArray`
121
+ :class:`str` :class:`pandas.arrays.StringArray` or
122
+ :class:`pandas.arrays.ArrowStringArray`
123
+ :class:`bool` :class:`pandas.arrays.BooleanArray`
124
+ ============================== =======================================
125
+
126
+ The ExtensionArray created when the scalar type is :class:`str` is determined by
127
+ ``pd.options.mode.string_storage`` if the dtype is not explicitly given.
128
+
129
+ For all other cases, NumPy's usual inference rules will be used.
130
+ copy : bool, default True
131
+ Whether to copy the data, even if not necessary. Depending
132
+ on the type of `data`, creating the new array may require
133
+ copying data, even if ``copy=False``.
134
+
135
+ Returns
136
+ -------
137
+ ExtensionArray
138
+ The newly created array.
139
+
140
+ Raises
141
+ ------
142
+ ValueError
143
+ When `data` is not 1-dimensional.
144
+
145
+ See Also
146
+ --------
147
+ numpy.array : Construct a NumPy array.
148
+ Series : Construct a pandas Series.
149
+ Index : Construct a pandas Index.
150
+ arrays.NumpyExtensionArray : ExtensionArray wrapping a NumPy array.
151
+ Series.array : Extract the array stored within a Series.
152
+
153
+ Notes
154
+ -----
155
+ Omitting the `dtype` argument means pandas will attempt to infer the
156
+ best array type from the values in the data. As new array types are
157
+ added by pandas and 3rd party libraries, the "best" array type may
158
+ change. We recommend specifying `dtype` to ensure that
159
+
160
+ 1. the correct array type for the data is returned
161
+ 2. the returned array type doesn't change as new extension types
162
+ are added by pandas and third-party libraries
163
+
164
+ Additionally, if the underlying memory representation of the returned
165
+ array matters, we recommend specifying the `dtype` as a concrete object
166
+ rather than a string alias or allowing it to be inferred. For example,
167
+ a future version of pandas or a 3rd-party library may include a
168
+ dedicated ExtensionArray for string data. In this event, the following
169
+ would no longer return a :class:`arrays.NumpyExtensionArray` backed by a
170
+ NumPy array.
171
+
172
+ >>> pd.array(['a', 'b'], dtype=str)
173
+ <NumpyExtensionArray>
174
+ ['a', 'b']
175
+ Length: 2, dtype: str32
176
+
177
+ This would instead return the new ExtensionArray dedicated for string
178
+ data. If you really need the new array to be backed by a NumPy array,
179
+ specify that in the dtype.
180
+
181
+ >>> pd.array(['a', 'b'], dtype=np.dtype("<U1"))
182
+ <NumpyExtensionArray>
183
+ ['a', 'b']
184
+ Length: 2, dtype: str32
185
+
186
+ Finally, Pandas has arrays that mostly overlap with NumPy
187
+
188
+ * :class:`arrays.DatetimeArray`
189
+ * :class:`arrays.TimedeltaArray`
190
+
191
+ When data with a ``datetime64[ns]`` or ``timedelta64[ns]`` dtype is
192
+ passed, pandas will always return a ``DatetimeArray`` or ``TimedeltaArray``
193
+ rather than a ``NumpyExtensionArray``. This is for symmetry with the case of
194
+ timezone-aware data, which NumPy does not natively support.
195
+
196
+ >>> pd.array(['2015', '2016'], dtype='datetime64[ns]')
197
+ <DatetimeArray>
198
+ ['2015-01-01 00:00:00', '2016-01-01 00:00:00']
199
+ Length: 2, dtype: datetime64[ns]
200
+
201
+ >>> pd.array(["1h", "2h"], dtype='timedelta64[ns]')
202
+ <TimedeltaArray>
203
+ ['0 days 01:00:00', '0 days 02:00:00']
204
+ Length: 2, dtype: timedelta64[ns]
205
+
206
+ Examples
207
+ --------
208
+ If a dtype is not specified, pandas will infer the best dtype from the values.
209
+ See the description of `dtype` for the types pandas infers for.
210
+
211
+ >>> pd.array([1, 2])
212
+ <IntegerArray>
213
+ [1, 2]
214
+ Length: 2, dtype: Int64
215
+
216
+ >>> pd.array([1, 2, np.nan])
217
+ <IntegerArray>
218
+ [1, 2, <NA>]
219
+ Length: 3, dtype: Int64
220
+
221
+ >>> pd.array([1.1, 2.2])
222
+ <FloatingArray>
223
+ [1.1, 2.2]
224
+ Length: 2, dtype: Float64
225
+
226
+ >>> pd.array(["a", None, "c"])
227
+ <StringArray>
228
+ ['a', <NA>, 'c']
229
+ Length: 3, dtype: string
230
+
231
+ >>> with pd.option_context("string_storage", "pyarrow"):
232
+ ... arr = pd.array(["a", None, "c"])
233
+ ...
234
+ >>> arr
235
+ <ArrowStringArray>
236
+ ['a', <NA>, 'c']
237
+ Length: 3, dtype: string
238
+
239
+ >>> pd.array([pd.Period('2000', freq="D"), pd.Period("2000", freq="D")])
240
+ <PeriodArray>
241
+ ['2000-01-01', '2000-01-01']
242
+ Length: 2, dtype: period[D]
243
+
244
+ You can use the string alias for `dtype`
245
+
246
+ >>> pd.array(['a', 'b', 'a'], dtype='category')
247
+ ['a', 'b', 'a']
248
+ Categories (2, object): ['a', 'b']
249
+
250
+ Or specify the actual dtype
251
+
252
+ >>> pd.array(['a', 'b', 'a'],
253
+ ... dtype=pd.CategoricalDtype(['a', 'b', 'c'], ordered=True))
254
+ ['a', 'b', 'a']
255
+ Categories (3, object): ['a' < 'b' < 'c']
256
+
257
+ If pandas does not infer a dedicated extension type a
258
+ :class:`arrays.NumpyExtensionArray` is returned.
259
+
260
+ >>> pd.array([1 + 1j, 3 + 2j])
261
+ <NumpyExtensionArray>
262
+ [(1+1j), (3+2j)]
263
+ Length: 2, dtype: complex128
264
+
265
+ As mentioned in the "Notes" section, new extension types may be added
266
+ in the future (by pandas or 3rd party libraries), causing the return
267
+ value to no longer be a :class:`arrays.NumpyExtensionArray`. Specify the
268
+ `dtype` as a NumPy dtype if you need to ensure there's no future change in
269
+ behavior.
270
+
271
+ >>> pd.array([1, 2], dtype=np.dtype("int32"))
272
+ <NumpyExtensionArray>
273
+ [1, 2]
274
+ Length: 2, dtype: int32
275
+
276
+ `data` must be 1-dimensional. A ValueError is raised when the input
277
+ has the wrong dimensionality.
278
+
279
+ >>> pd.array(1)
280
+ Traceback (most recent call last):
281
+ ...
282
+ ValueError: Cannot pass scalar '1' to 'pandas.array'.
283
+ """
284
+ from pandas.core.arrays import (
285
+ BooleanArray,
286
+ DatetimeArray,
287
+ ExtensionArray,
288
+ FloatingArray,
289
+ IntegerArray,
290
+ IntervalArray,
291
+ NumpyExtensionArray,
292
+ PeriodArray,
293
+ TimedeltaArray,
294
+ )
295
+ from pandas.core.arrays.string_ import StringDtype
296
+
297
+ if lib.is_scalar(data):
298
+ msg = f"Cannot pass scalar '{data}' to 'pandas.array'."
299
+ raise ValueError(msg)
300
+ elif isinstance(data, ABCDataFrame):
301
+ raise TypeError("Cannot pass DataFrame to 'pandas.array'")
302
+
303
+ if dtype is None and isinstance(data, (ABCSeries, ABCIndex, ExtensionArray)):
304
+ # Note: we exclude np.ndarray here, will do type inference on it
305
+ dtype = data.dtype
306
+
307
+ data = extract_array(data, extract_numpy=True)
308
+
309
+ # this returns None for not-found dtypes.
310
+ if dtype is not None:
311
+ dtype = pandas_dtype(dtype)
312
+
313
+ if isinstance(data, ExtensionArray) and (dtype is None or data.dtype == dtype):
314
+ # e.g. TimedeltaArray[s], avoid casting to NumpyExtensionArray
315
+ if copy:
316
+ return data.copy()
317
+ return data
318
+
319
+ if isinstance(dtype, ExtensionDtype):
320
+ cls = dtype.construct_array_type()
321
+ return cls._from_sequence(data, dtype=dtype, copy=copy)
322
+
323
+ if dtype is None:
324
+ inferred_dtype = lib.infer_dtype(data, skipna=True)
325
+ if inferred_dtype == "period":
326
+ period_data = cast(Union[Sequence[Optional[Period]], AnyArrayLike], data)
327
+ return PeriodArray._from_sequence(period_data, copy=copy)
328
+
329
+ elif inferred_dtype == "interval":
330
+ return IntervalArray(data, copy=copy)
331
+
332
+ elif inferred_dtype.startswith("datetime"):
333
+ # datetime, datetime64
334
+ try:
335
+ return DatetimeArray._from_sequence(data, copy=copy)
336
+ except ValueError:
337
+ # Mixture of timezones, fall back to NumpyExtensionArray
338
+ pass
339
+
340
+ elif inferred_dtype.startswith("timedelta"):
341
+ # timedelta, timedelta64
342
+ return TimedeltaArray._from_sequence(data, copy=copy)
343
+
344
+ elif inferred_dtype == "string":
345
+ # StringArray/ArrowStringArray depending on pd.options.mode.string_storage
346
+ dtype = StringDtype()
347
+ cls = dtype.construct_array_type()
348
+ return cls._from_sequence(data, dtype=dtype, copy=copy)
349
+
350
+ elif inferred_dtype == "integer":
351
+ return IntegerArray._from_sequence(data, copy=copy)
352
+ elif inferred_dtype == "empty" and not hasattr(data, "dtype") and not len(data):
353
+ return FloatingArray._from_sequence(data, copy=copy)
354
+ elif (
355
+ inferred_dtype in ("floating", "mixed-integer-float")
356
+ and getattr(data, "dtype", None) != np.float16
357
+ ):
358
+ # GH#44715 Exclude np.float16 bc FloatingArray does not support it;
359
+ # we will fall back to NumpyExtensionArray.
360
+ return FloatingArray._from_sequence(data, copy=copy)
361
+
362
+ elif inferred_dtype == "boolean":
363
+ return BooleanArray._from_sequence(data, dtype="boolean", copy=copy)
364
+
365
+ # Pandas overrides NumPy for
366
+ # 1. datetime64[ns,us,ms,s]
367
+ # 2. timedelta64[ns,us,ms,s]
368
+ # so that a DatetimeArray is returned.
369
+ if lib.is_np_dtype(dtype, "M") and is_supported_dtype(dtype):
370
+ return DatetimeArray._from_sequence(data, dtype=dtype, copy=copy)
371
+ if lib.is_np_dtype(dtype, "m") and is_supported_dtype(dtype):
372
+ return TimedeltaArray._from_sequence(data, dtype=dtype, copy=copy)
373
+
374
+ elif lib.is_np_dtype(dtype, "mM"):
375
+ warnings.warn(
376
+ r"datetime64 and timedelta64 dtype resolutions other than "
377
+ r"'s', 'ms', 'us', and 'ns' are deprecated. "
378
+ r"In future releases passing unsupported resolutions will "
379
+ r"raise an exception.",
380
+ FutureWarning,
381
+ stacklevel=find_stack_level(),
382
+ )
383
+
384
+ return NumpyExtensionArray._from_sequence(data, dtype=dtype, copy=copy)
385
+
386
+
387
+ _typs = frozenset(
388
+ {
389
+ "index",
390
+ "rangeindex",
391
+ "multiindex",
392
+ "datetimeindex",
393
+ "timedeltaindex",
394
+ "periodindex",
395
+ "categoricalindex",
396
+ "intervalindex",
397
+ "series",
398
+ }
399
+ )
400
+
401
+
402
+ @overload
403
+ def extract_array(
404
+ obj: Series | Index, extract_numpy: bool = ..., extract_range: bool = ...
405
+ ) -> ArrayLike:
406
+ ...
407
+
408
+
409
+ @overload
410
+ def extract_array(
411
+ obj: T, extract_numpy: bool = ..., extract_range: bool = ...
412
+ ) -> T | ArrayLike:
413
+ ...
414
+
415
+
416
+ def extract_array(
417
+ obj: T, extract_numpy: bool = False, extract_range: bool = False
418
+ ) -> T | ArrayLike:
419
+ """
420
+ Extract the ndarray or ExtensionArray from a Series or Index.
421
+
422
+ For all other types, `obj` is just returned as is.
423
+
424
+ Parameters
425
+ ----------
426
+ obj : object
427
+ For Series / Index, the underlying ExtensionArray is unboxed.
428
+
429
+ extract_numpy : bool, default False
430
+ Whether to extract the ndarray from a NumpyExtensionArray.
431
+
432
+ extract_range : bool, default False
433
+ If we have a RangeIndex, return range._values if True
434
+ (which is a materialized integer ndarray), otherwise return unchanged.
435
+
436
+ Returns
437
+ -------
438
+ arr : object
439
+
440
+ Examples
441
+ --------
442
+ >>> extract_array(pd.Series(['a', 'b', 'c'], dtype='category'))
443
+ ['a', 'b', 'c']
444
+ Categories (3, object): ['a', 'b', 'c']
445
+
446
+ Other objects like lists, arrays, and DataFrames are just passed through.
447
+
448
+ >>> extract_array([1, 2, 3])
449
+ [1, 2, 3]
450
+
451
+ For an ndarray-backed Series / Index the ndarray is returned.
452
+
453
+ >>> extract_array(pd.Series([1, 2, 3]))
454
+ array([1, 2, 3])
455
+
456
+ To extract all the way down to the ndarray, pass ``extract_numpy=True``.
457
+
458
+ >>> extract_array(pd.Series([1, 2, 3]), extract_numpy=True)
459
+ array([1, 2, 3])
460
+ """
461
+ typ = getattr(obj, "_typ", None)
462
+ if typ in _typs:
463
+ # i.e. isinstance(obj, (ABCIndex, ABCSeries))
464
+ if typ == "rangeindex":
465
+ if extract_range:
466
+ # error: "T" has no attribute "_values"
467
+ return obj._values # type: ignore[attr-defined]
468
+ return obj
469
+
470
+ # error: "T" has no attribute "_values"
471
+ return obj._values # type: ignore[attr-defined]
472
+
473
+ elif extract_numpy and typ == "npy_extension":
474
+ # i.e. isinstance(obj, ABCNumpyExtensionArray)
475
+ # error: "T" has no attribute "to_numpy"
476
+ return obj.to_numpy() # type: ignore[attr-defined]
477
+
478
+ return obj
479
+
480
+
481
+ def ensure_wrapped_if_datetimelike(arr):
482
+ """
483
+ Wrap datetime64 and timedelta64 ndarrays in DatetimeArray/TimedeltaArray.
484
+ """
485
+ if isinstance(arr, np.ndarray):
486
+ if arr.dtype.kind == "M":
487
+ from pandas.core.arrays import DatetimeArray
488
+
489
+ dtype = get_supported_dtype(arr.dtype)
490
+ return DatetimeArray._from_sequence(arr, dtype=dtype)
491
+
492
+ elif arr.dtype.kind == "m":
493
+ from pandas.core.arrays import TimedeltaArray
494
+
495
+ dtype = get_supported_dtype(arr.dtype)
496
+ return TimedeltaArray._from_sequence(arr, dtype=dtype)
497
+
498
+ return arr
499
+
500
+
501
+ def sanitize_masked_array(data: ma.MaskedArray) -> np.ndarray:
502
+ """
503
+ Convert numpy MaskedArray to ensure mask is softened.
504
+ """
505
+ mask = ma.getmaskarray(data)
506
+ if mask.any():
507
+ dtype, fill_value = maybe_promote(data.dtype, np.nan)
508
+ dtype = cast(np.dtype, dtype)
509
+ data = ma.asarray(data.astype(dtype, copy=True))
510
+ data.soften_mask() # set hardmask False if it was True
511
+ data[mask] = fill_value
512
+ else:
513
+ data = data.copy()
514
+ return data
515
+
516
+
517
+ def sanitize_array(
518
+ data,
519
+ index: Index | None,
520
+ dtype: DtypeObj | None = None,
521
+ copy: bool = False,
522
+ *,
523
+ allow_2d: bool = False,
524
+ ) -> ArrayLike:
525
+ """
526
+ Sanitize input data to an ndarray or ExtensionArray, copy if specified,
527
+ coerce to the dtype if specified.
528
+
529
+ Parameters
530
+ ----------
531
+ data : Any
532
+ index : Index or None, default None
533
+ dtype : np.dtype, ExtensionDtype, or None, default None
534
+ copy : bool, default False
535
+ allow_2d : bool, default False
536
+ If False, raise if we have a 2D Arraylike.
537
+
538
+ Returns
539
+ -------
540
+ np.ndarray or ExtensionArray
541
+ """
542
+ original_dtype = dtype
543
+ if isinstance(data, ma.MaskedArray):
544
+ data = sanitize_masked_array(data)
545
+
546
+ if isinstance(dtype, NumpyEADtype):
547
+ # Avoid ending up with a NumpyExtensionArray
548
+ dtype = dtype.numpy_dtype
549
+
550
+ object_index = False
551
+ if isinstance(data, ABCIndex) and data.dtype == object and dtype is None:
552
+ object_index = True
553
+
554
+ # extract ndarray or ExtensionArray, ensure we have no NumpyExtensionArray
555
+ data = extract_array(data, extract_numpy=True, extract_range=True)
556
+
557
+ if isinstance(data, np.ndarray) and data.ndim == 0:
558
+ if dtype is None:
559
+ dtype = data.dtype
560
+ data = lib.item_from_zerodim(data)
561
+ elif isinstance(data, range):
562
+ # GH#16804
563
+ data = range_to_ndarray(data)
564
+ copy = False
565
+
566
+ if not is_list_like(data):
567
+ if index is None:
568
+ raise ValueError("index must be specified when data is not list-like")
569
+ if (
570
+ isinstance(data, str)
571
+ and using_pyarrow_string_dtype()
572
+ and original_dtype is None
573
+ ):
574
+ from pandas.core.arrays.string_ import StringDtype
575
+
576
+ dtype = StringDtype("pyarrow_numpy")
577
+ data = construct_1d_arraylike_from_scalar(data, len(index), dtype)
578
+
579
+ return data
580
+
581
+ elif isinstance(data, ABCExtensionArray):
582
+ # it is already ensured above this is not a NumpyExtensionArray
583
+ # Until GH#49309 is fixed this check needs to come before the
584
+ # ExtensionDtype check
585
+ if dtype is not None:
586
+ subarr = data.astype(dtype, copy=copy)
587
+ elif copy:
588
+ subarr = data.copy()
589
+ else:
590
+ subarr = data
591
+
592
+ elif isinstance(dtype, ExtensionDtype):
593
+ # create an extension array from its dtype
594
+ _sanitize_non_ordered(data)
595
+ cls = dtype.construct_array_type()
596
+ subarr = cls._from_sequence(data, dtype=dtype, copy=copy)
597
+
598
+ # GH#846
599
+ elif isinstance(data, np.ndarray):
600
+ if isinstance(data, np.matrix):
601
+ data = data.A
602
+
603
+ if dtype is None:
604
+ subarr = data
605
+ if data.dtype == object:
606
+ subarr = maybe_infer_to_datetimelike(data)
607
+ if (
608
+ object_index
609
+ and using_pyarrow_string_dtype()
610
+ and is_string_dtype(subarr)
611
+ ):
612
+ # Avoid inference when string option is set
613
+ subarr = data
614
+ elif data.dtype.kind == "U" and using_pyarrow_string_dtype():
615
+ from pandas.core.arrays.string_ import StringDtype
616
+
617
+ dtype = StringDtype(storage="pyarrow_numpy")
618
+ subarr = dtype.construct_array_type()._from_sequence(data, dtype=dtype)
619
+
620
+ if subarr is data and copy:
621
+ subarr = subarr.copy()
622
+
623
+ else:
624
+ # we will try to copy by-definition here
625
+ subarr = _try_cast(data, dtype, copy)
626
+
627
+ elif hasattr(data, "__array__"):
628
+ # e.g. dask array GH#38645
629
+ if not copy:
630
+ data = np.asarray(data)
631
+ else:
632
+ data = np.array(data, copy=copy)
633
+ return sanitize_array(
634
+ data,
635
+ index=index,
636
+ dtype=dtype,
637
+ copy=False,
638
+ allow_2d=allow_2d,
639
+ )
640
+
641
+ else:
642
+ _sanitize_non_ordered(data)
643
+ # materialize e.g. generators, convert e.g. tuples, abc.ValueView
644
+ data = list(data)
645
+
646
+ if len(data) == 0 and dtype is None:
647
+ # We default to float64, matching numpy
648
+ subarr = np.array([], dtype=np.float64)
649
+
650
+ elif dtype is not None:
651
+ subarr = _try_cast(data, dtype, copy)
652
+
653
+ else:
654
+ subarr = maybe_convert_platform(data)
655
+ if subarr.dtype == object:
656
+ subarr = cast(np.ndarray, subarr)
657
+ subarr = maybe_infer_to_datetimelike(subarr)
658
+
659
+ subarr = _sanitize_ndim(subarr, data, dtype, index, allow_2d=allow_2d)
660
+
661
+ if isinstance(subarr, np.ndarray):
662
+ # at this point we should have dtype be None or subarr.dtype == dtype
663
+ dtype = cast(np.dtype, dtype)
664
+ subarr = _sanitize_str_dtypes(subarr, data, dtype, copy)
665
+
666
+ return subarr
667
+
668
+
669
+ def range_to_ndarray(rng: range) -> np.ndarray:
670
+ """
671
+ Cast a range object to ndarray.
672
+ """
673
+ # GH#30171 perf avoid realizing range as a list in np.array
674
+ try:
675
+ arr = np.arange(rng.start, rng.stop, rng.step, dtype="int64")
676
+ except OverflowError:
677
+ # GH#30173 handling for ranges that overflow int64
678
+ if (rng.start >= 0 and rng.step > 0) or (rng.step < 0 <= rng.stop):
679
+ try:
680
+ arr = np.arange(rng.start, rng.stop, rng.step, dtype="uint64")
681
+ except OverflowError:
682
+ arr = construct_1d_object_array_from_listlike(list(rng))
683
+ else:
684
+ arr = construct_1d_object_array_from_listlike(list(rng))
685
+ return arr
686
+
687
+
688
+ def _sanitize_non_ordered(data) -> None:
689
+ """
690
+ Raise only for unordered sets, e.g., not for dict_keys
691
+ """
692
+ if isinstance(data, (set, frozenset)):
693
+ raise TypeError(f"'{type(data).__name__}' type is unordered")
694
+
695
+
696
+ def _sanitize_ndim(
697
+ result: ArrayLike,
698
+ data,
699
+ dtype: DtypeObj | None,
700
+ index: Index | None,
701
+ *,
702
+ allow_2d: bool = False,
703
+ ) -> ArrayLike:
704
+ """
705
+ Ensure we have a 1-dimensional result array.
706
+ """
707
+ if getattr(result, "ndim", 0) == 0:
708
+ raise ValueError("result should be arraylike with ndim > 0")
709
+
710
+ if result.ndim == 1:
711
+ # the result that we want
712
+ result = _maybe_repeat(result, index)
713
+
714
+ elif result.ndim > 1:
715
+ if isinstance(data, np.ndarray):
716
+ if allow_2d:
717
+ return result
718
+ raise ValueError(
719
+ f"Data must be 1-dimensional, got ndarray of shape {data.shape} instead"
720
+ )
721
+ if is_object_dtype(dtype) and isinstance(dtype, ExtensionDtype):
722
+ # i.e. NumpyEADtype("O")
723
+
724
+ result = com.asarray_tuplesafe(data, dtype=np.dtype("object"))
725
+ cls = dtype.construct_array_type()
726
+ result = cls._from_sequence(result, dtype=dtype)
727
+ else:
728
+ # error: Argument "dtype" to "asarray_tuplesafe" has incompatible type
729
+ # "Union[dtype[Any], ExtensionDtype, None]"; expected "Union[str,
730
+ # dtype[Any], None]"
731
+ result = com.asarray_tuplesafe(data, dtype=dtype) # type: ignore[arg-type]
732
+ return result
733
+
734
+
735
+ def _sanitize_str_dtypes(
736
+ result: np.ndarray, data, dtype: np.dtype | None, copy: bool
737
+ ) -> np.ndarray:
738
+ """
739
+ Ensure we have a dtype that is supported by pandas.
740
+ """
741
+
742
+ # This is to prevent mixed-type Series getting all casted to
743
+ # NumPy string type, e.g. NaN --> '-1#IND'.
744
+ if issubclass(result.dtype.type, str):
745
+ # GH#16605
746
+ # If not empty convert the data to dtype
747
+ # GH#19853: If data is a scalar, result has already the result
748
+ if not lib.is_scalar(data):
749
+ if not np.all(isna(data)):
750
+ data = np.asarray(data, dtype=dtype)
751
+ if not copy:
752
+ result = np.asarray(data, dtype=object)
753
+ else:
754
+ result = np.array(data, dtype=object, copy=copy)
755
+ return result
756
+
757
+
758
+ def _maybe_repeat(arr: ArrayLike, index: Index | None) -> ArrayLike:
759
+ """
760
+ If we have a length-1 array and an index describing how long we expect
761
+ the result to be, repeat the array.
762
+ """
763
+ if index is not None:
764
+ if 1 == len(arr) != len(index):
765
+ arr = arr.repeat(len(index))
766
+ return arr
767
+
768
+
769
+ def _try_cast(
770
+ arr: list | np.ndarray,
771
+ dtype: np.dtype,
772
+ copy: bool,
773
+ ) -> ArrayLike:
774
+ """
775
+ Convert input to numpy ndarray and optionally cast to a given dtype.
776
+
777
+ Parameters
778
+ ----------
779
+ arr : ndarray or list
780
+ Excludes: ExtensionArray, Series, Index.
781
+ dtype : np.dtype
782
+ copy : bool
783
+ If False, don't copy the data if not needed.
784
+
785
+ Returns
786
+ -------
787
+ np.ndarray or ExtensionArray
788
+ """
789
+ is_ndarray = isinstance(arr, np.ndarray)
790
+
791
+ if dtype == object:
792
+ if not is_ndarray:
793
+ subarr = construct_1d_object_array_from_listlike(arr)
794
+ return subarr
795
+ return ensure_wrapped_if_datetimelike(arr).astype(dtype, copy=copy)
796
+
797
+ elif dtype.kind == "U":
798
+ # TODO: test cases with arr.dtype.kind in "mM"
799
+ if is_ndarray:
800
+ arr = cast(np.ndarray, arr)
801
+ shape = arr.shape
802
+ if arr.ndim > 1:
803
+ arr = arr.ravel()
804
+ else:
805
+ shape = (len(arr),)
806
+ return lib.ensure_string_array(arr, convert_na_value=False, copy=copy).reshape(
807
+ shape
808
+ )
809
+
810
+ elif dtype.kind in "mM":
811
+ return maybe_cast_to_datetime(arr, dtype)
812
+
813
+ # GH#15832: Check if we are requesting a numeric dtype and
814
+ # that we can convert the data to the requested dtype.
815
+ elif dtype.kind in "iu":
816
+ # this will raise if we have e.g. floats
817
+
818
+ subarr = maybe_cast_to_integer_array(arr, dtype)
819
+ elif not copy:
820
+ subarr = np.asarray(arr, dtype=dtype)
821
+ else:
822
+ subarr = np.array(arr, dtype=dtype, copy=copy)
823
+
824
+ return subarr
venv/lib/python3.10/site-packages/pandas/core/flags.py ADDED
@@ -0,0 +1,117 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from __future__ import annotations
2
+
3
+ from typing import TYPE_CHECKING
4
+ import weakref
5
+
6
+ if TYPE_CHECKING:
7
+ from pandas.core.generic import NDFrame
8
+
9
+
10
+ class Flags:
11
+ """
12
+ Flags that apply to pandas objects.
13
+
14
+ Parameters
15
+ ----------
16
+ obj : Series or DataFrame
17
+ The object these flags are associated with.
18
+ allows_duplicate_labels : bool, default True
19
+ Whether to allow duplicate labels in this object. By default,
20
+ duplicate labels are permitted. Setting this to ``False`` will
21
+ cause an :class:`errors.DuplicateLabelError` to be raised when
22
+ `index` (or columns for DataFrame) is not unique, or any
23
+ subsequent operation on introduces duplicates.
24
+ See :ref:`duplicates.disallow` for more.
25
+
26
+ .. warning::
27
+
28
+ This is an experimental feature. Currently, many methods fail to
29
+ propagate the ``allows_duplicate_labels`` value. In future versions
30
+ it is expected that every method taking or returning one or more
31
+ DataFrame or Series objects will propagate ``allows_duplicate_labels``.
32
+
33
+ Examples
34
+ --------
35
+ Attributes can be set in two ways:
36
+
37
+ >>> df = pd.DataFrame()
38
+ >>> df.flags
39
+ <Flags(allows_duplicate_labels=True)>
40
+ >>> df.flags.allows_duplicate_labels = False
41
+ >>> df.flags
42
+ <Flags(allows_duplicate_labels=False)>
43
+
44
+ >>> df.flags['allows_duplicate_labels'] = True
45
+ >>> df.flags
46
+ <Flags(allows_duplicate_labels=True)>
47
+ """
48
+
49
+ _keys: set[str] = {"allows_duplicate_labels"}
50
+
51
+ def __init__(self, obj: NDFrame, *, allows_duplicate_labels: bool) -> None:
52
+ self._allows_duplicate_labels = allows_duplicate_labels
53
+ self._obj = weakref.ref(obj)
54
+
55
+ @property
56
+ def allows_duplicate_labels(self) -> bool:
57
+ """
58
+ Whether this object allows duplicate labels.
59
+
60
+ Setting ``allows_duplicate_labels=False`` ensures that the
61
+ index (and columns of a DataFrame) are unique. Most methods
62
+ that accept and return a Series or DataFrame will propagate
63
+ the value of ``allows_duplicate_labels``.
64
+
65
+ See :ref:`duplicates` for more.
66
+
67
+ See Also
68
+ --------
69
+ DataFrame.attrs : Set global metadata on this object.
70
+ DataFrame.set_flags : Set global flags on this object.
71
+
72
+ Examples
73
+ --------
74
+ >>> df = pd.DataFrame({"A": [1, 2]}, index=['a', 'a'])
75
+ >>> df.flags.allows_duplicate_labels
76
+ True
77
+ >>> df.flags.allows_duplicate_labels = False
78
+ Traceback (most recent call last):
79
+ ...
80
+ pandas.errors.DuplicateLabelError: Index has duplicates.
81
+ positions
82
+ label
83
+ a [0, 1]
84
+ """
85
+ return self._allows_duplicate_labels
86
+
87
+ @allows_duplicate_labels.setter
88
+ def allows_duplicate_labels(self, value: bool) -> None:
89
+ value = bool(value)
90
+ obj = self._obj()
91
+ if obj is None:
92
+ raise ValueError("This flag's object has been deleted.")
93
+
94
+ if not value:
95
+ for ax in obj.axes:
96
+ ax._maybe_check_unique()
97
+
98
+ self._allows_duplicate_labels = value
99
+
100
+ def __getitem__(self, key: str):
101
+ if key not in self._keys:
102
+ raise KeyError(key)
103
+
104
+ return getattr(self, key)
105
+
106
+ def __setitem__(self, key: str, value) -> None:
107
+ if key not in self._keys:
108
+ raise ValueError(f"Unknown flag {key}. Must be one of {self._keys}")
109
+ setattr(self, key, value)
110
+
111
+ def __repr__(self) -> str:
112
+ return f"<Flags(allows_duplicate_labels={self.allows_duplicate_labels})>"
113
+
114
+ def __eq__(self, other) -> bool:
115
+ if isinstance(other, type(self)):
116
+ return self.allows_duplicate_labels == other.allows_duplicate_labels
117
+ return False
venv/lib/python3.10/site-packages/pandas/core/frame.py ADDED
The diff for this file is too large to render. See raw diff
 
venv/lib/python3.10/site-packages/pandas/core/generic.py ADDED
The diff for this file is too large to render. See raw diff
 
venv/lib/python3.10/site-packages/pandas/core/indexing.py ADDED
The diff for this file is too large to render. See raw diff
 
venv/lib/python3.10/site-packages/pandas/core/interchange/__init__.py ADDED
File without changes
venv/lib/python3.10/site-packages/pandas/core/interchange/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (191 Bytes). View file
 
venv/lib/python3.10/site-packages/pandas/core/interchange/__pycache__/buffer.cpython-310.pyc ADDED
Binary file (3.62 kB). View file
 
venv/lib/python3.10/site-packages/pandas/core/interchange/__pycache__/column.cpython-310.pyc ADDED
Binary file (12.1 kB). View file
 
venv/lib/python3.10/site-packages/pandas/core/interchange/__pycache__/dataframe.cpython-310.pyc ADDED
Binary file (4.69 kB). View file
 
venv/lib/python3.10/site-packages/pandas/core/interchange/__pycache__/dataframe_protocol.cpython-310.pyc ADDED
Binary file (17.9 kB). View file
 
venv/lib/python3.10/site-packages/pandas/core/interchange/__pycache__/from_dataframe.cpython-310.pyc ADDED
Binary file (12.2 kB). View file
 
venv/lib/python3.10/site-packages/pandas/core/interchange/__pycache__/utils.cpython-310.pyc ADDED
Binary file (4.55 kB). View file
 
venv/lib/python3.10/site-packages/pandas/core/interchange/buffer.py ADDED
@@ -0,0 +1,136 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from __future__ import annotations
2
+
3
+ from typing import (
4
+ TYPE_CHECKING,
5
+ Any,
6
+ )
7
+
8
+ from pandas.core.interchange.dataframe_protocol import (
9
+ Buffer,
10
+ DlpackDeviceType,
11
+ )
12
+
13
+ if TYPE_CHECKING:
14
+ import numpy as np
15
+ import pyarrow as pa
16
+
17
+
18
+ class PandasBuffer(Buffer):
19
+ """
20
+ Data in the buffer is guaranteed to be contiguous in memory.
21
+ """
22
+
23
+ def __init__(self, x: np.ndarray, allow_copy: bool = True) -> None:
24
+ """
25
+ Handle only regular columns (= numpy arrays) for now.
26
+ """
27
+ if x.strides[0] and not x.strides == (x.dtype.itemsize,):
28
+ # The protocol does not support strided buffers, so a copy is
29
+ # necessary. If that's not allowed, we need to raise an exception.
30
+ if allow_copy:
31
+ x = x.copy()
32
+ else:
33
+ raise RuntimeError(
34
+ "Exports cannot be zero-copy in the case "
35
+ "of a non-contiguous buffer"
36
+ )
37
+
38
+ # Store the numpy array in which the data resides as a private
39
+ # attribute, so we can use it to retrieve the public attributes
40
+ self._x = x
41
+
42
+ @property
43
+ def bufsize(self) -> int:
44
+ """
45
+ Buffer size in bytes.
46
+ """
47
+ return self._x.size * self._x.dtype.itemsize
48
+
49
+ @property
50
+ def ptr(self) -> int:
51
+ """
52
+ Pointer to start of the buffer as an integer.
53
+ """
54
+ return self._x.__array_interface__["data"][0]
55
+
56
+ def __dlpack__(self) -> Any:
57
+ """
58
+ Represent this structure as DLPack interface.
59
+ """
60
+ return self._x.__dlpack__()
61
+
62
+ def __dlpack_device__(self) -> tuple[DlpackDeviceType, int | None]:
63
+ """
64
+ Device type and device ID for where the data in the buffer resides.
65
+ """
66
+ return (DlpackDeviceType.CPU, None)
67
+
68
+ def __repr__(self) -> str:
69
+ return (
70
+ "PandasBuffer("
71
+ + str(
72
+ {
73
+ "bufsize": self.bufsize,
74
+ "ptr": self.ptr,
75
+ "device": self.__dlpack_device__()[0].name,
76
+ }
77
+ )
78
+ + ")"
79
+ )
80
+
81
+
82
+ class PandasBufferPyarrow(Buffer):
83
+ """
84
+ Data in the buffer is guaranteed to be contiguous in memory.
85
+ """
86
+
87
+ def __init__(
88
+ self,
89
+ buffer: pa.Buffer,
90
+ *,
91
+ length: int,
92
+ ) -> None:
93
+ """
94
+ Handle pyarrow chunked arrays.
95
+ """
96
+ self._buffer = buffer
97
+ self._length = length
98
+
99
+ @property
100
+ def bufsize(self) -> int:
101
+ """
102
+ Buffer size in bytes.
103
+ """
104
+ return self._buffer.size
105
+
106
+ @property
107
+ def ptr(self) -> int:
108
+ """
109
+ Pointer to start of the buffer as an integer.
110
+ """
111
+ return self._buffer.address
112
+
113
+ def __dlpack__(self) -> Any:
114
+ """
115
+ Represent this structure as DLPack interface.
116
+ """
117
+ raise NotImplementedError()
118
+
119
+ def __dlpack_device__(self) -> tuple[DlpackDeviceType, int | None]:
120
+ """
121
+ Device type and device ID for where the data in the buffer resides.
122
+ """
123
+ return (DlpackDeviceType.CPU, None)
124
+
125
+ def __repr__(self) -> str:
126
+ return (
127
+ "PandasBuffer[pyarrow]("
128
+ + str(
129
+ {
130
+ "bufsize": self.bufsize,
131
+ "ptr": self.ptr,
132
+ "device": "CPU",
133
+ }
134
+ )
135
+ + ")"
136
+ )
venv/lib/python3.10/site-packages/pandas/core/interchange/column.py ADDED
@@ -0,0 +1,461 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from __future__ import annotations
2
+
3
+ from typing import (
4
+ TYPE_CHECKING,
5
+ Any,
6
+ )
7
+
8
+ import numpy as np
9
+
10
+ from pandas._libs.lib import infer_dtype
11
+ from pandas._libs.tslibs import iNaT
12
+ from pandas.errors import NoBufferPresent
13
+ from pandas.util._decorators import cache_readonly
14
+
15
+ from pandas.core.dtypes.dtypes import BaseMaskedDtype
16
+
17
+ import pandas as pd
18
+ from pandas import (
19
+ ArrowDtype,
20
+ DatetimeTZDtype,
21
+ )
22
+ from pandas.api.types import is_string_dtype
23
+ from pandas.core.interchange.buffer import (
24
+ PandasBuffer,
25
+ PandasBufferPyarrow,
26
+ )
27
+ from pandas.core.interchange.dataframe_protocol import (
28
+ Column,
29
+ ColumnBuffers,
30
+ ColumnNullType,
31
+ DtypeKind,
32
+ )
33
+ from pandas.core.interchange.utils import (
34
+ ArrowCTypes,
35
+ Endianness,
36
+ dtype_to_arrow_c_fmt,
37
+ )
38
+
39
+ if TYPE_CHECKING:
40
+ from pandas.core.interchange.dataframe_protocol import Buffer
41
+
42
+ _NP_KINDS = {
43
+ "i": DtypeKind.INT,
44
+ "u": DtypeKind.UINT,
45
+ "f": DtypeKind.FLOAT,
46
+ "b": DtypeKind.BOOL,
47
+ "U": DtypeKind.STRING,
48
+ "M": DtypeKind.DATETIME,
49
+ "m": DtypeKind.DATETIME,
50
+ }
51
+
52
+ _NULL_DESCRIPTION = {
53
+ DtypeKind.FLOAT: (ColumnNullType.USE_NAN, None),
54
+ DtypeKind.DATETIME: (ColumnNullType.USE_SENTINEL, iNaT),
55
+ DtypeKind.INT: (ColumnNullType.NON_NULLABLE, None),
56
+ DtypeKind.UINT: (ColumnNullType.NON_NULLABLE, None),
57
+ DtypeKind.BOOL: (ColumnNullType.NON_NULLABLE, None),
58
+ # Null values for categoricals are stored as `-1` sentinel values
59
+ # in the category date (e.g., `col.values.codes` is int8 np.ndarray)
60
+ DtypeKind.CATEGORICAL: (ColumnNullType.USE_SENTINEL, -1),
61
+ # follow Arrow in using 1 as valid value and 0 for missing/null value
62
+ DtypeKind.STRING: (ColumnNullType.USE_BYTEMASK, 0),
63
+ }
64
+
65
+ _NO_VALIDITY_BUFFER = {
66
+ ColumnNullType.NON_NULLABLE: "This column is non-nullable",
67
+ ColumnNullType.USE_NAN: "This column uses NaN as null",
68
+ ColumnNullType.USE_SENTINEL: "This column uses a sentinel value",
69
+ }
70
+
71
+
72
+ class PandasColumn(Column):
73
+ """
74
+ A column object, with only the methods and properties required by the
75
+ interchange protocol defined.
76
+ A column can contain one or more chunks. Each chunk can contain up to three
77
+ buffers - a data buffer, a mask buffer (depending on null representation),
78
+ and an offsets buffer (if variable-size binary; e.g., variable-length
79
+ strings).
80
+ Note: this Column object can only be produced by ``__dataframe__``, so
81
+ doesn't need its own version or ``__column__`` protocol.
82
+ """
83
+
84
+ def __init__(self, column: pd.Series, allow_copy: bool = True) -> None:
85
+ """
86
+ Note: doesn't deal with extension arrays yet, just assume a regular
87
+ Series/ndarray for now.
88
+ """
89
+ if isinstance(column, pd.DataFrame):
90
+ raise TypeError(
91
+ "Expected a Series, got a DataFrame. This likely happened "
92
+ "because you called __dataframe__ on a DataFrame which, "
93
+ "after converting column names to string, resulted in duplicated "
94
+ f"names: {column.columns}. Please rename these columns before "
95
+ "using the interchange protocol."
96
+ )
97
+ if not isinstance(column, pd.Series):
98
+ raise NotImplementedError(f"Columns of type {type(column)} not handled yet")
99
+
100
+ # Store the column as a private attribute
101
+ self._col = column
102
+ self._allow_copy = allow_copy
103
+
104
+ def size(self) -> int:
105
+ """
106
+ Size of the column, in elements.
107
+ """
108
+ return self._col.size
109
+
110
+ @property
111
+ def offset(self) -> int:
112
+ """
113
+ Offset of first element. Always zero.
114
+ """
115
+ # TODO: chunks are implemented now, probably this should return something
116
+ return 0
117
+
118
+ @cache_readonly
119
+ def dtype(self) -> tuple[DtypeKind, int, str, str]:
120
+ dtype = self._col.dtype
121
+
122
+ if isinstance(dtype, pd.CategoricalDtype):
123
+ codes = self._col.values.codes
124
+ (
125
+ _,
126
+ bitwidth,
127
+ c_arrow_dtype_f_str,
128
+ _,
129
+ ) = self._dtype_from_pandasdtype(codes.dtype)
130
+ return (
131
+ DtypeKind.CATEGORICAL,
132
+ bitwidth,
133
+ c_arrow_dtype_f_str,
134
+ Endianness.NATIVE,
135
+ )
136
+ elif is_string_dtype(dtype):
137
+ if infer_dtype(self._col) in ("string", "empty"):
138
+ return (
139
+ DtypeKind.STRING,
140
+ 8,
141
+ dtype_to_arrow_c_fmt(dtype),
142
+ Endianness.NATIVE,
143
+ )
144
+ raise NotImplementedError("Non-string object dtypes are not supported yet")
145
+ else:
146
+ return self._dtype_from_pandasdtype(dtype)
147
+
148
+ def _dtype_from_pandasdtype(self, dtype) -> tuple[DtypeKind, int, str, str]:
149
+ """
150
+ See `self.dtype` for details.
151
+ """
152
+ # Note: 'c' (complex) not handled yet (not in array spec v1).
153
+ # 'b', 'B' (bytes), 'S', 'a', (old-style string) 'V' (void) not handled
154
+ # datetime and timedelta both map to datetime (is timedelta handled?)
155
+
156
+ kind = _NP_KINDS.get(dtype.kind, None)
157
+ if kind is None:
158
+ # Not a NumPy dtype. Check if it's a categorical maybe
159
+ raise ValueError(f"Data type {dtype} not supported by interchange protocol")
160
+ if isinstance(dtype, ArrowDtype):
161
+ byteorder = dtype.numpy_dtype.byteorder
162
+ elif isinstance(dtype, DatetimeTZDtype):
163
+ byteorder = dtype.base.byteorder # type: ignore[union-attr]
164
+ elif isinstance(dtype, BaseMaskedDtype):
165
+ byteorder = dtype.numpy_dtype.byteorder
166
+ else:
167
+ byteorder = dtype.byteorder
168
+
169
+ if dtype == "bool[pyarrow]":
170
+ # return early to avoid the `* 8` below, as this is a bitmask
171
+ # rather than a bytemask
172
+ return (
173
+ kind,
174
+ dtype.itemsize, # pyright: ignore[reportGeneralTypeIssues]
175
+ ArrowCTypes.BOOL,
176
+ byteorder,
177
+ )
178
+
179
+ return kind, dtype.itemsize * 8, dtype_to_arrow_c_fmt(dtype), byteorder
180
+
181
+ @property
182
+ def describe_categorical(self):
183
+ """
184
+ If the dtype is categorical, there are two options:
185
+ - There are only values in the data buffer.
186
+ - There is a separate non-categorical Column encoding for categorical values.
187
+
188
+ Raises TypeError if the dtype is not categorical
189
+
190
+ Content of returned dict:
191
+ - "is_ordered" : bool, whether the ordering of dictionary indices is
192
+ semantically meaningful.
193
+ - "is_dictionary" : bool, whether a dictionary-style mapping of
194
+ categorical values to other objects exists
195
+ - "categories" : Column representing the (implicit) mapping of indices to
196
+ category values (e.g. an array of cat1, cat2, ...).
197
+ None if not a dictionary-style categorical.
198
+ """
199
+ if not self.dtype[0] == DtypeKind.CATEGORICAL:
200
+ raise TypeError(
201
+ "describe_categorical only works on a column with categorical dtype!"
202
+ )
203
+
204
+ return {
205
+ "is_ordered": self._col.cat.ordered,
206
+ "is_dictionary": True,
207
+ "categories": PandasColumn(pd.Series(self._col.cat.categories)),
208
+ }
209
+
210
+ @property
211
+ def describe_null(self):
212
+ if isinstance(self._col.dtype, BaseMaskedDtype):
213
+ column_null_dtype = ColumnNullType.USE_BYTEMASK
214
+ null_value = 1
215
+ return column_null_dtype, null_value
216
+ if isinstance(self._col.dtype, ArrowDtype):
217
+ # We already rechunk (if necessary / allowed) upon initialization, so this
218
+ # is already single-chunk by the time we get here.
219
+ if self._col.array._pa_array.chunks[0].buffers()[0] is None: # type: ignore[attr-defined]
220
+ return ColumnNullType.NON_NULLABLE, None
221
+ return ColumnNullType.USE_BITMASK, 0
222
+ kind = self.dtype[0]
223
+ try:
224
+ null, value = _NULL_DESCRIPTION[kind]
225
+ except KeyError:
226
+ raise NotImplementedError(f"Data type {kind} not yet supported")
227
+
228
+ return null, value
229
+
230
+ @cache_readonly
231
+ def null_count(self) -> int:
232
+ """
233
+ Number of null elements. Should always be known.
234
+ """
235
+ return self._col.isna().sum().item()
236
+
237
+ @property
238
+ def metadata(self) -> dict[str, pd.Index]:
239
+ """
240
+ Store specific metadata of the column.
241
+ """
242
+ return {"pandas.index": self._col.index}
243
+
244
+ def num_chunks(self) -> int:
245
+ """
246
+ Return the number of chunks the column consists of.
247
+ """
248
+ return 1
249
+
250
+ def get_chunks(self, n_chunks: int | None = None):
251
+ """
252
+ Return an iterator yielding the chunks.
253
+ See `DataFrame.get_chunks` for details on ``n_chunks``.
254
+ """
255
+ if n_chunks and n_chunks > 1:
256
+ size = len(self._col)
257
+ step = size // n_chunks
258
+ if size % n_chunks != 0:
259
+ step += 1
260
+ for start in range(0, step * n_chunks, step):
261
+ yield PandasColumn(
262
+ self._col.iloc[start : start + step], self._allow_copy
263
+ )
264
+ else:
265
+ yield self
266
+
267
+ def get_buffers(self) -> ColumnBuffers:
268
+ """
269
+ Return a dictionary containing the underlying buffers.
270
+ The returned dictionary has the following contents:
271
+ - "data": a two-element tuple whose first element is a buffer
272
+ containing the data and whose second element is the data
273
+ buffer's associated dtype.
274
+ - "validity": a two-element tuple whose first element is a buffer
275
+ containing mask values indicating missing data and
276
+ whose second element is the mask value buffer's
277
+ associated dtype. None if the null representation is
278
+ not a bit or byte mask.
279
+ - "offsets": a two-element tuple whose first element is a buffer
280
+ containing the offset values for variable-size binary
281
+ data (e.g., variable-length strings) and whose second
282
+ element is the offsets buffer's associated dtype. None
283
+ if the data buffer does not have an associated offsets
284
+ buffer.
285
+ """
286
+ buffers: ColumnBuffers = {
287
+ "data": self._get_data_buffer(),
288
+ "validity": None,
289
+ "offsets": None,
290
+ }
291
+
292
+ try:
293
+ buffers["validity"] = self._get_validity_buffer()
294
+ except NoBufferPresent:
295
+ pass
296
+
297
+ try:
298
+ buffers["offsets"] = self._get_offsets_buffer()
299
+ except NoBufferPresent:
300
+ pass
301
+
302
+ return buffers
303
+
304
+ def _get_data_buffer(
305
+ self,
306
+ ) -> tuple[Buffer, tuple[DtypeKind, int, str, str]]:
307
+ """
308
+ Return the buffer containing the data and the buffer's associated dtype.
309
+ """
310
+ buffer: Buffer
311
+ if self.dtype[0] in (
312
+ DtypeKind.INT,
313
+ DtypeKind.UINT,
314
+ DtypeKind.FLOAT,
315
+ DtypeKind.BOOL,
316
+ DtypeKind.DATETIME,
317
+ ):
318
+ # self.dtype[2] is an ArrowCTypes.TIMESTAMP where the tz will make
319
+ # it longer than 4 characters
320
+ dtype = self.dtype
321
+ if self.dtype[0] == DtypeKind.DATETIME and len(self.dtype[2]) > 4:
322
+ np_arr = self._col.dt.tz_convert(None).to_numpy()
323
+ else:
324
+ arr = self._col.array
325
+ if isinstance(self._col.dtype, BaseMaskedDtype):
326
+ np_arr = arr._data # type: ignore[attr-defined]
327
+ elif isinstance(self._col.dtype, ArrowDtype):
328
+ # We already rechunk (if necessary / allowed) upon initialization,
329
+ # so this is already single-chunk by the time we get here.
330
+ arr = arr._pa_array.chunks[0] # type: ignore[attr-defined]
331
+ buffer = PandasBufferPyarrow(
332
+ arr.buffers()[1], # type: ignore[attr-defined]
333
+ length=len(arr),
334
+ )
335
+ return buffer, dtype
336
+ else:
337
+ np_arr = arr._ndarray # type: ignore[attr-defined]
338
+ buffer = PandasBuffer(np_arr, allow_copy=self._allow_copy)
339
+ elif self.dtype[0] == DtypeKind.CATEGORICAL:
340
+ codes = self._col.values._codes
341
+ buffer = PandasBuffer(codes, allow_copy=self._allow_copy)
342
+ dtype = self._dtype_from_pandasdtype(codes.dtype)
343
+ elif self.dtype[0] == DtypeKind.STRING:
344
+ # Marshal the strings from a NumPy object array into a byte array
345
+ buf = self._col.to_numpy()
346
+ b = bytearray()
347
+
348
+ # TODO: this for-loop is slow; can be implemented in Cython/C/C++ later
349
+ for obj in buf:
350
+ if isinstance(obj, str):
351
+ b.extend(obj.encode(encoding="utf-8"))
352
+
353
+ # Convert the byte array to a Pandas "buffer" using
354
+ # a NumPy array as the backing store
355
+ buffer = PandasBuffer(np.frombuffer(b, dtype="uint8"))
356
+
357
+ # Define the dtype for the returned buffer
358
+ # TODO: this will need correcting
359
+ # https://github.com/pandas-dev/pandas/issues/54781
360
+ dtype = self.dtype
361
+ else:
362
+ raise NotImplementedError(f"Data type {self._col.dtype} not handled yet")
363
+
364
+ return buffer, dtype
365
+
366
+ def _get_validity_buffer(self) -> tuple[Buffer, Any] | None:
367
+ """
368
+ Return the buffer containing the mask values indicating missing data and
369
+ the buffer's associated dtype.
370
+ Raises NoBufferPresent if null representation is not a bit or byte mask.
371
+ """
372
+ null, invalid = self.describe_null
373
+ buffer: Buffer
374
+ if isinstance(self._col.dtype, ArrowDtype):
375
+ # We already rechunk (if necessary / allowed) upon initialization, so this
376
+ # is already single-chunk by the time we get here.
377
+ arr = self._col.array._pa_array.chunks[0] # type: ignore[attr-defined]
378
+ dtype = (DtypeKind.BOOL, 1, ArrowCTypes.BOOL, Endianness.NATIVE)
379
+ if arr.buffers()[0] is None:
380
+ return None
381
+ buffer = PandasBufferPyarrow(
382
+ arr.buffers()[0],
383
+ length=len(arr),
384
+ )
385
+ return buffer, dtype
386
+
387
+ if isinstance(self._col.dtype, BaseMaskedDtype):
388
+ mask = self._col.array._mask # type: ignore[attr-defined]
389
+ buffer = PandasBuffer(mask)
390
+ dtype = (DtypeKind.BOOL, 8, ArrowCTypes.BOOL, Endianness.NATIVE)
391
+ return buffer, dtype
392
+
393
+ if self.dtype[0] == DtypeKind.STRING:
394
+ # For now, use byte array as the mask.
395
+ # TODO: maybe store as bit array to save space?..
396
+ buf = self._col.to_numpy()
397
+
398
+ # Determine the encoding for valid values
399
+ valid = invalid == 0
400
+ invalid = not valid
401
+
402
+ mask = np.zeros(shape=(len(buf),), dtype=np.bool_)
403
+ for i, obj in enumerate(buf):
404
+ mask[i] = valid if isinstance(obj, str) else invalid
405
+
406
+ # Convert the mask array to a Pandas "buffer" using
407
+ # a NumPy array as the backing store
408
+ buffer = PandasBuffer(mask)
409
+
410
+ # Define the dtype of the returned buffer
411
+ dtype = (DtypeKind.BOOL, 8, ArrowCTypes.BOOL, Endianness.NATIVE)
412
+
413
+ return buffer, dtype
414
+
415
+ try:
416
+ msg = f"{_NO_VALIDITY_BUFFER[null]} so does not have a separate mask"
417
+ except KeyError:
418
+ # TODO: implement for other bit/byte masks?
419
+ raise NotImplementedError("See self.describe_null")
420
+
421
+ raise NoBufferPresent(msg)
422
+
423
+ def _get_offsets_buffer(self) -> tuple[PandasBuffer, Any]:
424
+ """
425
+ Return the buffer containing the offset values for variable-size binary
426
+ data (e.g., variable-length strings) and the buffer's associated dtype.
427
+ Raises NoBufferPresent if the data buffer does not have an associated
428
+ offsets buffer.
429
+ """
430
+ if self.dtype[0] == DtypeKind.STRING:
431
+ # For each string, we need to manually determine the next offset
432
+ values = self._col.to_numpy()
433
+ ptr = 0
434
+ offsets = np.zeros(shape=(len(values) + 1,), dtype=np.int64)
435
+ for i, v in enumerate(values):
436
+ # For missing values (in this case, `np.nan` values)
437
+ # we don't increment the pointer
438
+ if isinstance(v, str):
439
+ b = v.encode(encoding="utf-8")
440
+ ptr += len(b)
441
+
442
+ offsets[i + 1] = ptr
443
+
444
+ # Convert the offsets to a Pandas "buffer" using
445
+ # the NumPy array as the backing store
446
+ buffer = PandasBuffer(offsets)
447
+
448
+ # Assemble the buffer dtype info
449
+ dtype = (
450
+ DtypeKind.INT,
451
+ 64,
452
+ ArrowCTypes.INT64,
453
+ Endianness.NATIVE,
454
+ ) # note: currently only support native endianness
455
+ else:
456
+ raise NoBufferPresent(
457
+ "This column has a fixed-length dtype so "
458
+ "it does not have an offsets buffer"
459
+ )
460
+
461
+ return buffer, dtype
venv/lib/python3.10/site-packages/pandas/core/interchange/dataframe.py ADDED
@@ -0,0 +1,113 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from __future__ import annotations
2
+
3
+ from collections import abc
4
+ from typing import TYPE_CHECKING
5
+
6
+ from pandas.core.interchange.column import PandasColumn
7
+ from pandas.core.interchange.dataframe_protocol import DataFrame as DataFrameXchg
8
+ from pandas.core.interchange.utils import maybe_rechunk
9
+
10
+ if TYPE_CHECKING:
11
+ from collections.abc import (
12
+ Iterable,
13
+ Sequence,
14
+ )
15
+
16
+ from pandas import (
17
+ DataFrame,
18
+ Index,
19
+ )
20
+
21
+
22
+ class PandasDataFrameXchg(DataFrameXchg):
23
+ """
24
+ A data frame class, with only the methods required by the interchange
25
+ protocol defined.
26
+ Instances of this (private) class are returned from
27
+ ``pd.DataFrame.__dataframe__`` as objects with the methods and
28
+ attributes defined on this class.
29
+ """
30
+
31
+ def __init__(self, df: DataFrame, allow_copy: bool = True) -> None:
32
+ """
33
+ Constructor - an instance of this (private) class is returned from
34
+ `pd.DataFrame.__dataframe__`.
35
+ """
36
+ self._df = df.rename(columns=str, copy=False)
37
+ self._allow_copy = allow_copy
38
+ for i, _col in enumerate(self._df.columns):
39
+ rechunked = maybe_rechunk(self._df.iloc[:, i], allow_copy=allow_copy)
40
+ if rechunked is not None:
41
+ self._df.isetitem(i, rechunked)
42
+
43
+ def __dataframe__(
44
+ self, nan_as_null: bool = False, allow_copy: bool = True
45
+ ) -> PandasDataFrameXchg:
46
+ # `nan_as_null` can be removed here once it's removed from
47
+ # Dataframe.__dataframe__
48
+ return PandasDataFrameXchg(self._df, allow_copy)
49
+
50
+ @property
51
+ def metadata(self) -> dict[str, Index]:
52
+ # `index` isn't a regular column, and the protocol doesn't support row
53
+ # labels - so we export it as Pandas-specific metadata here.
54
+ return {"pandas.index": self._df.index}
55
+
56
+ def num_columns(self) -> int:
57
+ return len(self._df.columns)
58
+
59
+ def num_rows(self) -> int:
60
+ return len(self._df)
61
+
62
+ def num_chunks(self) -> int:
63
+ return 1
64
+
65
+ def column_names(self) -> Index:
66
+ return self._df.columns
67
+
68
+ def get_column(self, i: int) -> PandasColumn:
69
+ return PandasColumn(self._df.iloc[:, i], allow_copy=self._allow_copy)
70
+
71
+ def get_column_by_name(self, name: str) -> PandasColumn:
72
+ return PandasColumn(self._df[name], allow_copy=self._allow_copy)
73
+
74
+ def get_columns(self) -> list[PandasColumn]:
75
+ return [
76
+ PandasColumn(self._df[name], allow_copy=self._allow_copy)
77
+ for name in self._df.columns
78
+ ]
79
+
80
+ def select_columns(self, indices: Sequence[int]) -> PandasDataFrameXchg:
81
+ if not isinstance(indices, abc.Sequence):
82
+ raise ValueError("`indices` is not a sequence")
83
+ if not isinstance(indices, list):
84
+ indices = list(indices)
85
+
86
+ return PandasDataFrameXchg(
87
+ self._df.iloc[:, indices], allow_copy=self._allow_copy
88
+ )
89
+
90
+ def select_columns_by_name(self, names: list[str]) -> PandasDataFrameXchg: # type: ignore[override]
91
+ if not isinstance(names, abc.Sequence):
92
+ raise ValueError("`names` is not a sequence")
93
+ if not isinstance(names, list):
94
+ names = list(names)
95
+
96
+ return PandasDataFrameXchg(self._df.loc[:, names], allow_copy=self._allow_copy)
97
+
98
+ def get_chunks(self, n_chunks: int | None = None) -> Iterable[PandasDataFrameXchg]:
99
+ """
100
+ Return an iterator yielding the chunks.
101
+ """
102
+ if n_chunks and n_chunks > 1:
103
+ size = len(self._df)
104
+ step = size // n_chunks
105
+ if size % n_chunks != 0:
106
+ step += 1
107
+ for start in range(0, step * n_chunks, step):
108
+ yield PandasDataFrameXchg(
109
+ self._df.iloc[start : start + step, :],
110
+ allow_copy=self._allow_copy,
111
+ )
112
+ else:
113
+ yield self
venv/lib/python3.10/site-packages/pandas/core/interchange/dataframe_protocol.py ADDED
@@ -0,0 +1,465 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ A verbatim copy (vendored) of the spec from https://github.com/data-apis/dataframe-api
3
+ """
4
+
5
+ from __future__ import annotations
6
+
7
+ from abc import (
8
+ ABC,
9
+ abstractmethod,
10
+ )
11
+ import enum
12
+ from typing import (
13
+ TYPE_CHECKING,
14
+ Any,
15
+ TypedDict,
16
+ )
17
+
18
+ if TYPE_CHECKING:
19
+ from collections.abc import (
20
+ Iterable,
21
+ Sequence,
22
+ )
23
+
24
+
25
+ class DlpackDeviceType(enum.IntEnum):
26
+ """Integer enum for device type codes matching DLPack."""
27
+
28
+ CPU = 1
29
+ CUDA = 2
30
+ CPU_PINNED = 3
31
+ OPENCL = 4
32
+ VULKAN = 7
33
+ METAL = 8
34
+ VPI = 9
35
+ ROCM = 10
36
+
37
+
38
+ class DtypeKind(enum.IntEnum):
39
+ """
40
+ Integer enum for data types.
41
+
42
+ Attributes
43
+ ----------
44
+ INT : int
45
+ Matches to signed integer data type.
46
+ UINT : int
47
+ Matches to unsigned integer data type.
48
+ FLOAT : int
49
+ Matches to floating point data type.
50
+ BOOL : int
51
+ Matches to boolean data type.
52
+ STRING : int
53
+ Matches to string data type (UTF-8 encoded).
54
+ DATETIME : int
55
+ Matches to datetime data type.
56
+ CATEGORICAL : int
57
+ Matches to categorical data type.
58
+ """
59
+
60
+ INT = 0
61
+ UINT = 1
62
+ FLOAT = 2
63
+ BOOL = 20
64
+ STRING = 21 # UTF-8
65
+ DATETIME = 22
66
+ CATEGORICAL = 23
67
+
68
+
69
+ class ColumnNullType(enum.IntEnum):
70
+ """
71
+ Integer enum for null type representation.
72
+
73
+ Attributes
74
+ ----------
75
+ NON_NULLABLE : int
76
+ Non-nullable column.
77
+ USE_NAN : int
78
+ Use explicit float NaN value.
79
+ USE_SENTINEL : int
80
+ Sentinel value besides NaN/NaT.
81
+ USE_BITMASK : int
82
+ The bit is set/unset representing a null on a certain position.
83
+ USE_BYTEMASK : int
84
+ The byte is set/unset representing a null on a certain position.
85
+ """
86
+
87
+ NON_NULLABLE = 0
88
+ USE_NAN = 1
89
+ USE_SENTINEL = 2
90
+ USE_BITMASK = 3
91
+ USE_BYTEMASK = 4
92
+
93
+
94
+ class ColumnBuffers(TypedDict):
95
+ # first element is a buffer containing the column data;
96
+ # second element is the data buffer's associated dtype
97
+ data: tuple[Buffer, Any]
98
+
99
+ # first element is a buffer containing mask values indicating missing data;
100
+ # second element is the mask value buffer's associated dtype.
101
+ # None if the null representation is not a bit or byte mask
102
+ validity: tuple[Buffer, Any] | None
103
+
104
+ # first element is a buffer containing the offset values for
105
+ # variable-size binary data (e.g., variable-length strings);
106
+ # second element is the offsets buffer's associated dtype.
107
+ # None if the data buffer does not have an associated offsets buffer
108
+ offsets: tuple[Buffer, Any] | None
109
+
110
+
111
+ class CategoricalDescription(TypedDict):
112
+ # whether the ordering of dictionary indices is semantically meaningful
113
+ is_ordered: bool
114
+ # whether a dictionary-style mapping of categorical values to other objects exists
115
+ is_dictionary: bool
116
+ # Python-level only (e.g. ``{int: str}``).
117
+ # None if not a dictionary-style categorical.
118
+ categories: Column | None
119
+
120
+
121
+ class Buffer(ABC):
122
+ """
123
+ Data in the buffer is guaranteed to be contiguous in memory.
124
+
125
+ Note that there is no dtype attribute present, a buffer can be thought of
126
+ as simply a block of memory. However, if the column that the buffer is
127
+ attached to has a dtype that's supported by DLPack and ``__dlpack__`` is
128
+ implemented, then that dtype information will be contained in the return
129
+ value from ``__dlpack__``.
130
+
131
+ This distinction is useful to support both data exchange via DLPack on a
132
+ buffer and (b) dtypes like variable-length strings which do not have a
133
+ fixed number of bytes per element.
134
+ """
135
+
136
+ @property
137
+ @abstractmethod
138
+ def bufsize(self) -> int:
139
+ """
140
+ Buffer size in bytes.
141
+ """
142
+
143
+ @property
144
+ @abstractmethod
145
+ def ptr(self) -> int:
146
+ """
147
+ Pointer to start of the buffer as an integer.
148
+ """
149
+
150
+ @abstractmethod
151
+ def __dlpack__(self):
152
+ """
153
+ Produce DLPack capsule (see array API standard).
154
+
155
+ Raises:
156
+
157
+ - TypeError : if the buffer contains unsupported dtypes.
158
+ - NotImplementedError : if DLPack support is not implemented
159
+
160
+ Useful to have to connect to array libraries. Support optional because
161
+ it's not completely trivial to implement for a Python-only library.
162
+ """
163
+ raise NotImplementedError("__dlpack__")
164
+
165
+ @abstractmethod
166
+ def __dlpack_device__(self) -> tuple[DlpackDeviceType, int | None]:
167
+ """
168
+ Device type and device ID for where the data in the buffer resides.
169
+ Uses device type codes matching DLPack.
170
+ Note: must be implemented even if ``__dlpack__`` is not.
171
+ """
172
+
173
+
174
+ class Column(ABC):
175
+ """
176
+ A column object, with only the methods and properties required by the
177
+ interchange protocol defined.
178
+
179
+ A column can contain one or more chunks. Each chunk can contain up to three
180
+ buffers - a data buffer, a mask buffer (depending on null representation),
181
+ and an offsets buffer (if variable-size binary; e.g., variable-length
182
+ strings).
183
+
184
+ TBD: Arrow has a separate "null" dtype, and has no separate mask concept.
185
+ Instead, it seems to use "children" for both columns with a bit mask,
186
+ and for nested dtypes. Unclear whether this is elegant or confusing.
187
+ This design requires checking the null representation explicitly.
188
+
189
+ The Arrow design requires checking:
190
+ 1. the ARROW_FLAG_NULLABLE (for sentinel values)
191
+ 2. if a column has two children, combined with one of those children
192
+ having a null dtype.
193
+
194
+ Making the mask concept explicit seems useful. One null dtype would
195
+ not be enough to cover both bit and byte masks, so that would mean
196
+ even more checking if we did it the Arrow way.
197
+
198
+ TBD: there's also the "chunk" concept here, which is implicit in Arrow as
199
+ multiple buffers per array (= column here). Semantically it may make
200
+ sense to have both: chunks were meant for example for lazy evaluation
201
+ of data which doesn't fit in memory, while multiple buffers per column
202
+ could also come from doing a selection operation on a single
203
+ contiguous buffer.
204
+
205
+ Given these concepts, one would expect chunks to be all of the same
206
+ size (say a 10,000 row dataframe could have 10 chunks of 1,000 rows),
207
+ while multiple buffers could have data-dependent lengths. Not an issue
208
+ in pandas if one column is backed by a single NumPy array, but in
209
+ Arrow it seems possible.
210
+ Are multiple chunks *and* multiple buffers per column necessary for
211
+ the purposes of this interchange protocol, or must producers either
212
+ reuse the chunk concept for this or copy the data?
213
+
214
+ Note: this Column object can only be produced by ``__dataframe__``, so
215
+ doesn't need its own version or ``__column__`` protocol.
216
+ """
217
+
218
+ @abstractmethod
219
+ def size(self) -> int:
220
+ """
221
+ Size of the column, in elements.
222
+
223
+ Corresponds to DataFrame.num_rows() if column is a single chunk;
224
+ equal to size of this current chunk otherwise.
225
+ """
226
+
227
+ @property
228
+ @abstractmethod
229
+ def offset(self) -> int:
230
+ """
231
+ Offset of first element.
232
+
233
+ May be > 0 if using chunks; for example for a column with N chunks of
234
+ equal size M (only the last chunk may be shorter),
235
+ ``offset = n * M``, ``n = 0 .. N-1``.
236
+ """
237
+
238
+ @property
239
+ @abstractmethod
240
+ def dtype(self) -> tuple[DtypeKind, int, str, str]:
241
+ """
242
+ Dtype description as a tuple ``(kind, bit-width, format string, endianness)``.
243
+
244
+ Bit-width : the number of bits as an integer
245
+ Format string : data type description format string in Apache Arrow C
246
+ Data Interface format.
247
+ Endianness : current only native endianness (``=``) is supported
248
+
249
+ Notes:
250
+ - Kind specifiers are aligned with DLPack where possible (hence the
251
+ jump to 20, leave enough room for future extension)
252
+ - Masks must be specified as boolean with either bit width 1 (for bit
253
+ masks) or 8 (for byte masks).
254
+ - Dtype width in bits was preferred over bytes
255
+ - Endianness isn't too useful, but included now in case in the future
256
+ we need to support non-native endianness
257
+ - Went with Apache Arrow format strings over NumPy format strings
258
+ because they're more complete from a dataframe perspective
259
+ - Format strings are mostly useful for datetime specification, and
260
+ for categoricals.
261
+ - For categoricals, the format string describes the type of the
262
+ categorical in the data buffer. In case of a separate encoding of
263
+ the categorical (e.g. an integer to string mapping), this can
264
+ be derived from ``self.describe_categorical``.
265
+ - Data types not included: complex, Arrow-style null, binary, decimal,
266
+ and nested (list, struct, map, union) dtypes.
267
+ """
268
+
269
+ @property
270
+ @abstractmethod
271
+ def describe_categorical(self) -> CategoricalDescription:
272
+ """
273
+ If the dtype is categorical, there are two options:
274
+ - There are only values in the data buffer.
275
+ - There is a separate non-categorical Column encoding for categorical values.
276
+
277
+ Raises TypeError if the dtype is not categorical
278
+
279
+ Returns the dictionary with description on how to interpret the data buffer:
280
+ - "is_ordered" : bool, whether the ordering of dictionary indices is
281
+ semantically meaningful.
282
+ - "is_dictionary" : bool, whether a mapping of
283
+ categorical values to other objects exists
284
+ - "categories" : Column representing the (implicit) mapping of indices to
285
+ category values (e.g. an array of cat1, cat2, ...).
286
+ None if not a dictionary-style categorical.
287
+
288
+ TBD: are there any other in-memory representations that are needed?
289
+ """
290
+
291
+ @property
292
+ @abstractmethod
293
+ def describe_null(self) -> tuple[ColumnNullType, Any]:
294
+ """
295
+ Return the missing value (or "null") representation the column dtype
296
+ uses, as a tuple ``(kind, value)``.
297
+
298
+ Value : if kind is "sentinel value", the actual value. If kind is a bit
299
+ mask or a byte mask, the value (0 or 1) indicating a missing value. None
300
+ otherwise.
301
+ """
302
+
303
+ @property
304
+ @abstractmethod
305
+ def null_count(self) -> int | None:
306
+ """
307
+ Number of null elements, if known.
308
+
309
+ Note: Arrow uses -1 to indicate "unknown", but None seems cleaner.
310
+ """
311
+
312
+ @property
313
+ @abstractmethod
314
+ def metadata(self) -> dict[str, Any]:
315
+ """
316
+ The metadata for the column. See `DataFrame.metadata` for more details.
317
+ """
318
+
319
+ @abstractmethod
320
+ def num_chunks(self) -> int:
321
+ """
322
+ Return the number of chunks the column consists of.
323
+ """
324
+
325
+ @abstractmethod
326
+ def get_chunks(self, n_chunks: int | None = None) -> Iterable[Column]:
327
+ """
328
+ Return an iterator yielding the chunks.
329
+
330
+ See `DataFrame.get_chunks` for details on ``n_chunks``.
331
+ """
332
+
333
+ @abstractmethod
334
+ def get_buffers(self) -> ColumnBuffers:
335
+ """
336
+ Return a dictionary containing the underlying buffers.
337
+
338
+ The returned dictionary has the following contents:
339
+
340
+ - "data": a two-element tuple whose first element is a buffer
341
+ containing the data and whose second element is the data
342
+ buffer's associated dtype.
343
+ - "validity": a two-element tuple whose first element is a buffer
344
+ containing mask values indicating missing data and
345
+ whose second element is the mask value buffer's
346
+ associated dtype. None if the null representation is
347
+ not a bit or byte mask.
348
+ - "offsets": a two-element tuple whose first element is a buffer
349
+ containing the offset values for variable-size binary
350
+ data (e.g., variable-length strings) and whose second
351
+ element is the offsets buffer's associated dtype. None
352
+ if the data buffer does not have an associated offsets
353
+ buffer.
354
+ """
355
+
356
+
357
+ # def get_children(self) -> Iterable[Column]:
358
+ # """
359
+ # Children columns underneath the column, each object in this iterator
360
+ # must adhere to the column specification.
361
+ # """
362
+ # pass
363
+
364
+
365
+ class DataFrame(ABC):
366
+ """
367
+ A data frame class, with only the methods required by the interchange
368
+ protocol defined.
369
+
370
+ A "data frame" represents an ordered collection of named columns.
371
+ A column's "name" must be a unique string.
372
+ Columns may be accessed by name or by position.
373
+
374
+ This could be a public data frame class, or an object with the methods and
375
+ attributes defined on this DataFrame class could be returned from the
376
+ ``__dataframe__`` method of a public data frame class in a library adhering
377
+ to the dataframe interchange protocol specification.
378
+ """
379
+
380
+ version = 0 # version of the protocol
381
+
382
+ @abstractmethod
383
+ def __dataframe__(self, nan_as_null: bool = False, allow_copy: bool = True):
384
+ """Construct a new interchange object, potentially changing the parameters."""
385
+
386
+ @property
387
+ @abstractmethod
388
+ def metadata(self) -> dict[str, Any]:
389
+ """
390
+ The metadata for the data frame, as a dictionary with string keys. The
391
+ contents of `metadata` may be anything, they are meant for a library
392
+ to store information that it needs to, e.g., roundtrip losslessly or
393
+ for two implementations to share data that is not (yet) part of the
394
+ interchange protocol specification. For avoiding collisions with other
395
+ entries, please add name the keys with the name of the library
396
+ followed by a period and the desired name, e.g, ``pandas.indexcol``.
397
+ """
398
+
399
+ @abstractmethod
400
+ def num_columns(self) -> int:
401
+ """
402
+ Return the number of columns in the DataFrame.
403
+ """
404
+
405
+ @abstractmethod
406
+ def num_rows(self) -> int | None:
407
+ # TODO: not happy with Optional, but need to flag it may be expensive
408
+ # why include it if it may be None - what do we expect consumers
409
+ # to do here?
410
+ """
411
+ Return the number of rows in the DataFrame, if available.
412
+ """
413
+
414
+ @abstractmethod
415
+ def num_chunks(self) -> int:
416
+ """
417
+ Return the number of chunks the DataFrame consists of.
418
+ """
419
+
420
+ @abstractmethod
421
+ def column_names(self) -> Iterable[str]:
422
+ """
423
+ Return an iterator yielding the column names.
424
+ """
425
+
426
+ @abstractmethod
427
+ def get_column(self, i: int) -> Column:
428
+ """
429
+ Return the column at the indicated position.
430
+ """
431
+
432
+ @abstractmethod
433
+ def get_column_by_name(self, name: str) -> Column:
434
+ """
435
+ Return the column whose name is the indicated name.
436
+ """
437
+
438
+ @abstractmethod
439
+ def get_columns(self) -> Iterable[Column]:
440
+ """
441
+ Return an iterator yielding the columns.
442
+ """
443
+
444
+ @abstractmethod
445
+ def select_columns(self, indices: Sequence[int]) -> DataFrame:
446
+ """
447
+ Create a new DataFrame by selecting a subset of columns by index.
448
+ """
449
+
450
+ @abstractmethod
451
+ def select_columns_by_name(self, names: Sequence[str]) -> DataFrame:
452
+ """
453
+ Create a new DataFrame by selecting a subset of columns by name.
454
+ """
455
+
456
+ @abstractmethod
457
+ def get_chunks(self, n_chunks: int | None = None) -> Iterable[DataFrame]:
458
+ """
459
+ Return an iterator yielding the chunks.
460
+
461
+ By default (None), yields the chunks that the data is stored as by the
462
+ producer. If given, ``n_chunks`` must be a multiple of
463
+ ``self.num_chunks()``, meaning the producer must subdivide each chunk
464
+ before yielding it.
465
+ """
venv/lib/python3.10/site-packages/pandas/core/interchange/from_dataframe.py ADDED
@@ -0,0 +1,526 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from __future__ import annotations
2
+
3
+ import ctypes
4
+ import re
5
+ from typing import Any
6
+
7
+ import numpy as np
8
+
9
+ from pandas.compat._optional import import_optional_dependency
10
+ from pandas.errors import SettingWithCopyError
11
+
12
+ import pandas as pd
13
+ from pandas.core.interchange.dataframe_protocol import (
14
+ Buffer,
15
+ Column,
16
+ ColumnNullType,
17
+ DataFrame as DataFrameXchg,
18
+ DtypeKind,
19
+ )
20
+ from pandas.core.interchange.utils import (
21
+ ArrowCTypes,
22
+ Endianness,
23
+ )
24
+
25
+ _NP_DTYPES: dict[DtypeKind, dict[int, Any]] = {
26
+ DtypeKind.INT: {8: np.int8, 16: np.int16, 32: np.int32, 64: np.int64},
27
+ DtypeKind.UINT: {8: np.uint8, 16: np.uint16, 32: np.uint32, 64: np.uint64},
28
+ DtypeKind.FLOAT: {32: np.float32, 64: np.float64},
29
+ DtypeKind.BOOL: {1: bool, 8: bool},
30
+ }
31
+
32
+
33
+ def from_dataframe(df, allow_copy: bool = True) -> pd.DataFrame:
34
+ """
35
+ Build a ``pd.DataFrame`` from any DataFrame supporting the interchange protocol.
36
+
37
+ Parameters
38
+ ----------
39
+ df : DataFrameXchg
40
+ Object supporting the interchange protocol, i.e. `__dataframe__` method.
41
+ allow_copy : bool, default: True
42
+ Whether to allow copying the memory to perform the conversion
43
+ (if false then zero-copy approach is requested).
44
+
45
+ Returns
46
+ -------
47
+ pd.DataFrame
48
+
49
+ Examples
50
+ --------
51
+ >>> df_not_necessarily_pandas = pd.DataFrame({'A': [1, 2], 'B': [3, 4]})
52
+ >>> interchange_object = df_not_necessarily_pandas.__dataframe__()
53
+ >>> interchange_object.column_names()
54
+ Index(['A', 'B'], dtype='object')
55
+ >>> df_pandas = (pd.api.interchange.from_dataframe
56
+ ... (interchange_object.select_columns_by_name(['A'])))
57
+ >>> df_pandas
58
+ A
59
+ 0 1
60
+ 1 2
61
+
62
+ These methods (``column_names``, ``select_columns_by_name``) should work
63
+ for any dataframe library which implements the interchange protocol.
64
+ """
65
+ if isinstance(df, pd.DataFrame):
66
+ return df
67
+
68
+ if not hasattr(df, "__dataframe__"):
69
+ raise ValueError("`df` does not support __dataframe__")
70
+
71
+ return _from_dataframe(
72
+ df.__dataframe__(allow_copy=allow_copy), allow_copy=allow_copy
73
+ )
74
+
75
+
76
+ def _from_dataframe(df: DataFrameXchg, allow_copy: bool = True):
77
+ """
78
+ Build a ``pd.DataFrame`` from the DataFrame interchange object.
79
+
80
+ Parameters
81
+ ----------
82
+ df : DataFrameXchg
83
+ Object supporting the interchange protocol, i.e. `__dataframe__` method.
84
+ allow_copy : bool, default: True
85
+ Whether to allow copying the memory to perform the conversion
86
+ (if false then zero-copy approach is requested).
87
+
88
+ Returns
89
+ -------
90
+ pd.DataFrame
91
+ """
92
+ pandas_dfs = []
93
+ for chunk in df.get_chunks():
94
+ pandas_df = protocol_df_chunk_to_pandas(chunk)
95
+ pandas_dfs.append(pandas_df)
96
+
97
+ if not allow_copy and len(pandas_dfs) > 1:
98
+ raise RuntimeError(
99
+ "To join chunks a copy is required which is forbidden by allow_copy=False"
100
+ )
101
+ if not pandas_dfs:
102
+ pandas_df = protocol_df_chunk_to_pandas(df)
103
+ elif len(pandas_dfs) == 1:
104
+ pandas_df = pandas_dfs[0]
105
+ else:
106
+ pandas_df = pd.concat(pandas_dfs, axis=0, ignore_index=True, copy=False)
107
+
108
+ index_obj = df.metadata.get("pandas.index", None)
109
+ if index_obj is not None:
110
+ pandas_df.index = index_obj
111
+
112
+ return pandas_df
113
+
114
+
115
+ def protocol_df_chunk_to_pandas(df: DataFrameXchg) -> pd.DataFrame:
116
+ """
117
+ Convert interchange protocol chunk to ``pd.DataFrame``.
118
+
119
+ Parameters
120
+ ----------
121
+ df : DataFrameXchg
122
+
123
+ Returns
124
+ -------
125
+ pd.DataFrame
126
+ """
127
+ # We need a dict of columns here, with each column being a NumPy array (at
128
+ # least for now, deal with non-NumPy dtypes later).
129
+ columns: dict[str, Any] = {}
130
+ buffers = [] # hold on to buffers, keeps memory alive
131
+ for name in df.column_names():
132
+ if not isinstance(name, str):
133
+ raise ValueError(f"Column {name} is not a string")
134
+ if name in columns:
135
+ raise ValueError(f"Column {name} is not unique")
136
+ col = df.get_column_by_name(name)
137
+ dtype = col.dtype[0]
138
+ if dtype in (
139
+ DtypeKind.INT,
140
+ DtypeKind.UINT,
141
+ DtypeKind.FLOAT,
142
+ DtypeKind.BOOL,
143
+ ):
144
+ columns[name], buf = primitive_column_to_ndarray(col)
145
+ elif dtype == DtypeKind.CATEGORICAL:
146
+ columns[name], buf = categorical_column_to_series(col)
147
+ elif dtype == DtypeKind.STRING:
148
+ columns[name], buf = string_column_to_ndarray(col)
149
+ elif dtype == DtypeKind.DATETIME:
150
+ columns[name], buf = datetime_column_to_ndarray(col)
151
+ else:
152
+ raise NotImplementedError(f"Data type {dtype} not handled yet")
153
+
154
+ buffers.append(buf)
155
+
156
+ pandas_df = pd.DataFrame(columns)
157
+ pandas_df.attrs["_INTERCHANGE_PROTOCOL_BUFFERS"] = buffers
158
+ return pandas_df
159
+
160
+
161
+ def primitive_column_to_ndarray(col: Column) -> tuple[np.ndarray, Any]:
162
+ """
163
+ Convert a column holding one of the primitive dtypes to a NumPy array.
164
+
165
+ A primitive type is one of: int, uint, float, bool.
166
+
167
+ Parameters
168
+ ----------
169
+ col : Column
170
+
171
+ Returns
172
+ -------
173
+ tuple
174
+ Tuple of np.ndarray holding the data and the memory owner object
175
+ that keeps the memory alive.
176
+ """
177
+ buffers = col.get_buffers()
178
+
179
+ data_buff, data_dtype = buffers["data"]
180
+ data = buffer_to_ndarray(
181
+ data_buff, data_dtype, offset=col.offset, length=col.size()
182
+ )
183
+
184
+ data = set_nulls(data, col, buffers["validity"])
185
+ return data, buffers
186
+
187
+
188
+ def categorical_column_to_series(col: Column) -> tuple[pd.Series, Any]:
189
+ """
190
+ Convert a column holding categorical data to a pandas Series.
191
+
192
+ Parameters
193
+ ----------
194
+ col : Column
195
+
196
+ Returns
197
+ -------
198
+ tuple
199
+ Tuple of pd.Series holding the data and the memory owner object
200
+ that keeps the memory alive.
201
+ """
202
+ categorical = col.describe_categorical
203
+
204
+ if not categorical["is_dictionary"]:
205
+ raise NotImplementedError("Non-dictionary categoricals not supported yet")
206
+
207
+ cat_column = categorical["categories"]
208
+ if hasattr(cat_column, "_col"):
209
+ # Item "Column" of "Optional[Column]" has no attribute "_col"
210
+ # Item "None" of "Optional[Column]" has no attribute "_col"
211
+ categories = np.array(cat_column._col) # type: ignore[union-attr]
212
+ else:
213
+ raise NotImplementedError(
214
+ "Interchanging categorical columns isn't supported yet, and our "
215
+ "fallback of using the `col._col` attribute (a ndarray) failed."
216
+ )
217
+ buffers = col.get_buffers()
218
+
219
+ codes_buff, codes_dtype = buffers["data"]
220
+ codes = buffer_to_ndarray(
221
+ codes_buff, codes_dtype, offset=col.offset, length=col.size()
222
+ )
223
+
224
+ # Doing module in order to not get ``IndexError`` for
225
+ # out-of-bounds sentinel values in `codes`
226
+ if len(categories) > 0:
227
+ values = categories[codes % len(categories)]
228
+ else:
229
+ values = codes
230
+
231
+ cat = pd.Categorical(
232
+ values, categories=categories, ordered=categorical["is_ordered"]
233
+ )
234
+ data = pd.Series(cat)
235
+
236
+ data = set_nulls(data, col, buffers["validity"])
237
+ return data, buffers
238
+
239
+
240
+ def string_column_to_ndarray(col: Column) -> tuple[np.ndarray, Any]:
241
+ """
242
+ Convert a column holding string data to a NumPy array.
243
+
244
+ Parameters
245
+ ----------
246
+ col : Column
247
+
248
+ Returns
249
+ -------
250
+ tuple
251
+ Tuple of np.ndarray holding the data and the memory owner object
252
+ that keeps the memory alive.
253
+ """
254
+ null_kind, sentinel_val = col.describe_null
255
+
256
+ if null_kind not in (
257
+ ColumnNullType.NON_NULLABLE,
258
+ ColumnNullType.USE_BITMASK,
259
+ ColumnNullType.USE_BYTEMASK,
260
+ ):
261
+ raise NotImplementedError(
262
+ f"{null_kind} null kind is not yet supported for string columns."
263
+ )
264
+
265
+ buffers = col.get_buffers()
266
+
267
+ assert buffers["offsets"], "String buffers must contain offsets"
268
+ # Retrieve the data buffer containing the UTF-8 code units
269
+ data_buff, _ = buffers["data"]
270
+ # We're going to reinterpret the buffer as uint8, so make sure we can do it safely
271
+ assert col.dtype[2] in (
272
+ ArrowCTypes.STRING,
273
+ ArrowCTypes.LARGE_STRING,
274
+ ) # format_str == utf-8
275
+ # Convert the buffers to NumPy arrays. In order to go from STRING to
276
+ # an equivalent ndarray, we claim that the buffer is uint8 (i.e., a byte array)
277
+ data_dtype = (
278
+ DtypeKind.UINT,
279
+ 8,
280
+ ArrowCTypes.UINT8,
281
+ Endianness.NATIVE,
282
+ )
283
+ # Specify zero offset as we don't want to chunk the string data
284
+ data = buffer_to_ndarray(data_buff, data_dtype, offset=0, length=data_buff.bufsize)
285
+
286
+ # Retrieve the offsets buffer containing the index offsets demarcating
287
+ # the beginning and the ending of each string
288
+ offset_buff, offset_dtype = buffers["offsets"]
289
+ # Offsets buffer contains start-stop positions of strings in the data buffer,
290
+ # meaning that it has more elements than in the data buffer, do `col.size() + 1`
291
+ # here to pass a proper offsets buffer size
292
+ offsets = buffer_to_ndarray(
293
+ offset_buff, offset_dtype, offset=col.offset, length=col.size() + 1
294
+ )
295
+
296
+ null_pos = None
297
+ if null_kind in (ColumnNullType.USE_BITMASK, ColumnNullType.USE_BYTEMASK):
298
+ validity = buffers["validity"]
299
+ if validity is not None:
300
+ valid_buff, valid_dtype = validity
301
+ null_pos = buffer_to_ndarray(
302
+ valid_buff, valid_dtype, offset=col.offset, length=col.size()
303
+ )
304
+ if sentinel_val == 0:
305
+ null_pos = ~null_pos
306
+
307
+ # Assemble the strings from the code units
308
+ str_list: list[None | float | str] = [None] * col.size()
309
+ for i in range(col.size()):
310
+ # Check for missing values
311
+ if null_pos is not None and null_pos[i]:
312
+ str_list[i] = np.nan
313
+ continue
314
+
315
+ # Extract a range of code units
316
+ units = data[offsets[i] : offsets[i + 1]]
317
+
318
+ # Convert the list of code units to bytes
319
+ str_bytes = bytes(units)
320
+
321
+ # Create the string
322
+ string = str_bytes.decode(encoding="utf-8")
323
+
324
+ # Add to our list of strings
325
+ str_list[i] = string
326
+
327
+ # Convert the string list to a NumPy array
328
+ return np.asarray(str_list, dtype="object"), buffers
329
+
330
+
331
+ def parse_datetime_format_str(format_str, data) -> pd.Series | np.ndarray:
332
+ """Parse datetime `format_str` to interpret the `data`."""
333
+ # timestamp 'ts{unit}:tz'
334
+ timestamp_meta = re.match(r"ts([smun]):(.*)", format_str)
335
+ if timestamp_meta:
336
+ unit, tz = timestamp_meta.group(1), timestamp_meta.group(2)
337
+ if unit != "s":
338
+ # the format string describes only a first letter of the unit, so
339
+ # add one extra letter to convert the unit to numpy-style:
340
+ # 'm' -> 'ms', 'u' -> 'us', 'n' -> 'ns'
341
+ unit += "s"
342
+ data = data.astype(f"datetime64[{unit}]")
343
+ if tz != "":
344
+ data = pd.Series(data).dt.tz_localize("UTC").dt.tz_convert(tz)
345
+ return data
346
+
347
+ # date 'td{Days/Ms}'
348
+ date_meta = re.match(r"td([Dm])", format_str)
349
+ if date_meta:
350
+ unit = date_meta.group(1)
351
+ if unit == "D":
352
+ # NumPy doesn't support DAY unit, so converting days to seconds
353
+ # (converting to uint64 to avoid overflow)
354
+ data = (data.astype(np.uint64) * (24 * 60 * 60)).astype("datetime64[s]")
355
+ elif unit == "m":
356
+ data = data.astype("datetime64[ms]")
357
+ else:
358
+ raise NotImplementedError(f"Date unit is not supported: {unit}")
359
+ return data
360
+
361
+ raise NotImplementedError(f"DateTime kind is not supported: {format_str}")
362
+
363
+
364
+ def datetime_column_to_ndarray(col: Column) -> tuple[np.ndarray | pd.Series, Any]:
365
+ """
366
+ Convert a column holding DateTime data to a NumPy array.
367
+
368
+ Parameters
369
+ ----------
370
+ col : Column
371
+
372
+ Returns
373
+ -------
374
+ tuple
375
+ Tuple of np.ndarray holding the data and the memory owner object
376
+ that keeps the memory alive.
377
+ """
378
+ buffers = col.get_buffers()
379
+
380
+ _, col_bit_width, format_str, _ = col.dtype
381
+ dbuf, _ = buffers["data"]
382
+ # Consider dtype being `uint` to get number of units passed since the 01.01.1970
383
+
384
+ data = buffer_to_ndarray(
385
+ dbuf,
386
+ (
387
+ DtypeKind.INT,
388
+ col_bit_width,
389
+ getattr(ArrowCTypes, f"INT{col_bit_width}"),
390
+ Endianness.NATIVE,
391
+ ),
392
+ offset=col.offset,
393
+ length=col.size(),
394
+ )
395
+
396
+ data = parse_datetime_format_str(format_str, data) # type: ignore[assignment]
397
+ data = set_nulls(data, col, buffers["validity"])
398
+ return data, buffers
399
+
400
+
401
+ def buffer_to_ndarray(
402
+ buffer: Buffer,
403
+ dtype: tuple[DtypeKind, int, str, str],
404
+ *,
405
+ length: int,
406
+ offset: int = 0,
407
+ ) -> np.ndarray:
408
+ """
409
+ Build a NumPy array from the passed buffer.
410
+
411
+ Parameters
412
+ ----------
413
+ buffer : Buffer
414
+ Buffer to build a NumPy array from.
415
+ dtype : tuple
416
+ Data type of the buffer conforming protocol dtypes format.
417
+ offset : int, default: 0
418
+ Number of elements to offset from the start of the buffer.
419
+ length : int, optional
420
+ If the buffer is a bit-mask, specifies a number of bits to read
421
+ from the buffer. Has no effect otherwise.
422
+
423
+ Returns
424
+ -------
425
+ np.ndarray
426
+
427
+ Notes
428
+ -----
429
+ The returned array doesn't own the memory. The caller of this function is
430
+ responsible for keeping the memory owner object alive as long as
431
+ the returned NumPy array is being used.
432
+ """
433
+ kind, bit_width, _, _ = dtype
434
+
435
+ column_dtype = _NP_DTYPES.get(kind, {}).get(bit_width, None)
436
+ if column_dtype is None:
437
+ raise NotImplementedError(f"Conversion for {dtype} is not yet supported.")
438
+
439
+ # TODO: No DLPack yet, so need to construct a new ndarray from the data pointer
440
+ # and size in the buffer plus the dtype on the column. Use DLPack as NumPy supports
441
+ # it since https://github.com/numpy/numpy/pull/19083
442
+ ctypes_type = np.ctypeslib.as_ctypes_type(column_dtype)
443
+
444
+ if bit_width == 1:
445
+ assert length is not None, "`length` must be specified for a bit-mask buffer."
446
+ pa = import_optional_dependency("pyarrow")
447
+ arr = pa.BooleanArray.from_buffers(
448
+ pa.bool_(),
449
+ length,
450
+ [None, pa.foreign_buffer(buffer.ptr, length)],
451
+ offset=offset,
452
+ )
453
+ return np.asarray(arr)
454
+ else:
455
+ data_pointer = ctypes.cast(
456
+ buffer.ptr + (offset * bit_width // 8), ctypes.POINTER(ctypes_type)
457
+ )
458
+ if length > 0:
459
+ return np.ctypeslib.as_array(data_pointer, shape=(length,))
460
+ return np.array([], dtype=ctypes_type)
461
+
462
+
463
+ def set_nulls(
464
+ data: np.ndarray | pd.Series,
465
+ col: Column,
466
+ validity: tuple[Buffer, tuple[DtypeKind, int, str, str]] | None,
467
+ allow_modify_inplace: bool = True,
468
+ ):
469
+ """
470
+ Set null values for the data according to the column null kind.
471
+
472
+ Parameters
473
+ ----------
474
+ data : np.ndarray or pd.Series
475
+ Data to set nulls in.
476
+ col : Column
477
+ Column object that describes the `data`.
478
+ validity : tuple(Buffer, dtype) or None
479
+ The return value of ``col.buffers()``. We do not access the ``col.buffers()``
480
+ here to not take the ownership of the memory of buffer objects.
481
+ allow_modify_inplace : bool, default: True
482
+ Whether to modify the `data` inplace when zero-copy is possible (True) or always
483
+ modify a copy of the `data` (False).
484
+
485
+ Returns
486
+ -------
487
+ np.ndarray or pd.Series
488
+ Data with the nulls being set.
489
+ """
490
+ if validity is None:
491
+ return data
492
+ null_kind, sentinel_val = col.describe_null
493
+ null_pos = None
494
+
495
+ if null_kind == ColumnNullType.USE_SENTINEL:
496
+ null_pos = pd.Series(data) == sentinel_val
497
+ elif null_kind in (ColumnNullType.USE_BITMASK, ColumnNullType.USE_BYTEMASK):
498
+ assert validity, "Expected to have a validity buffer for the mask"
499
+ valid_buff, valid_dtype = validity
500
+ null_pos = buffer_to_ndarray(
501
+ valid_buff, valid_dtype, offset=col.offset, length=col.size()
502
+ )
503
+ if sentinel_val == 0:
504
+ null_pos = ~null_pos
505
+ elif null_kind in (ColumnNullType.NON_NULLABLE, ColumnNullType.USE_NAN):
506
+ pass
507
+ else:
508
+ raise NotImplementedError(f"Null kind {null_kind} is not yet supported.")
509
+
510
+ if null_pos is not None and np.any(null_pos):
511
+ if not allow_modify_inplace:
512
+ data = data.copy()
513
+ try:
514
+ data[null_pos] = None
515
+ except TypeError:
516
+ # TypeError happens if the `data` dtype appears to be non-nullable
517
+ # in numpy notation (bool, int, uint). If this happens,
518
+ # cast the `data` to nullable float dtype.
519
+ data = data.astype(float)
520
+ data[null_pos] = None
521
+ except SettingWithCopyError:
522
+ # `SettingWithCopyError` may happen for datetime-like with missing values.
523
+ data = data.copy()
524
+ data[null_pos] = None
525
+
526
+ return data
venv/lib/python3.10/site-packages/pandas/core/interchange/utils.py ADDED
@@ -0,0 +1,178 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ Utility functions and objects for implementing the interchange API.
3
+ """
4
+
5
+ from __future__ import annotations
6
+
7
+ import typing
8
+
9
+ import numpy as np
10
+
11
+ from pandas._libs import lib
12
+
13
+ from pandas.core.dtypes.dtypes import (
14
+ ArrowDtype,
15
+ CategoricalDtype,
16
+ DatetimeTZDtype,
17
+ )
18
+
19
+ import pandas as pd
20
+
21
+ if typing.TYPE_CHECKING:
22
+ from pandas._typing import DtypeObj
23
+
24
+
25
+ # Maps str(pyarrow.DataType) = C type format string
26
+ # Currently, no pyarrow API for this
27
+ PYARROW_CTYPES = {
28
+ "null": "n",
29
+ "bool": "b",
30
+ "uint8": "C",
31
+ "uint16": "S",
32
+ "uint32": "I",
33
+ "uint64": "L",
34
+ "int8": "c",
35
+ "int16": "S",
36
+ "int32": "i",
37
+ "int64": "l",
38
+ "halffloat": "e", # float16
39
+ "float": "f", # float32
40
+ "double": "g", # float64
41
+ "string": "u",
42
+ "large_string": "U",
43
+ "binary": "z",
44
+ "time32[s]": "tts",
45
+ "time32[ms]": "ttm",
46
+ "time64[us]": "ttu",
47
+ "time64[ns]": "ttn",
48
+ "date32[day]": "tdD",
49
+ "date64[ms]": "tdm",
50
+ "timestamp[s]": "tss:",
51
+ "timestamp[ms]": "tsm:",
52
+ "timestamp[us]": "tsu:",
53
+ "timestamp[ns]": "tsn:",
54
+ "duration[s]": "tDs",
55
+ "duration[ms]": "tDm",
56
+ "duration[us]": "tDu",
57
+ "duration[ns]": "tDn",
58
+ }
59
+
60
+
61
+ class ArrowCTypes:
62
+ """
63
+ Enum for Apache Arrow C type format strings.
64
+
65
+ The Arrow C data interface:
66
+ https://arrow.apache.org/docs/format/CDataInterface.html#data-type-description-format-strings
67
+ """
68
+
69
+ NULL = "n"
70
+ BOOL = "b"
71
+ INT8 = "c"
72
+ UINT8 = "C"
73
+ INT16 = "s"
74
+ UINT16 = "S"
75
+ INT32 = "i"
76
+ UINT32 = "I"
77
+ INT64 = "l"
78
+ UINT64 = "L"
79
+ FLOAT16 = "e"
80
+ FLOAT32 = "f"
81
+ FLOAT64 = "g"
82
+ STRING = "u" # utf-8
83
+ LARGE_STRING = "U" # utf-8
84
+ DATE32 = "tdD"
85
+ DATE64 = "tdm"
86
+ # Resoulution:
87
+ # - seconds -> 's'
88
+ # - milliseconds -> 'm'
89
+ # - microseconds -> 'u'
90
+ # - nanoseconds -> 'n'
91
+ TIMESTAMP = "ts{resolution}:{tz}"
92
+ TIME = "tt{resolution}"
93
+
94
+
95
+ class Endianness:
96
+ """Enum indicating the byte-order of a data-type."""
97
+
98
+ LITTLE = "<"
99
+ BIG = ">"
100
+ NATIVE = "="
101
+ NA = "|"
102
+
103
+
104
+ def dtype_to_arrow_c_fmt(dtype: DtypeObj) -> str:
105
+ """
106
+ Represent pandas `dtype` as a format string in Apache Arrow C notation.
107
+
108
+ Parameters
109
+ ----------
110
+ dtype : np.dtype
111
+ Datatype of pandas DataFrame to represent.
112
+
113
+ Returns
114
+ -------
115
+ str
116
+ Format string in Apache Arrow C notation of the given `dtype`.
117
+ """
118
+ if isinstance(dtype, CategoricalDtype):
119
+ return ArrowCTypes.INT64
120
+ elif dtype == np.dtype("O"):
121
+ return ArrowCTypes.STRING
122
+ elif isinstance(dtype, ArrowDtype):
123
+ import pyarrow as pa
124
+
125
+ pa_type = dtype.pyarrow_dtype
126
+ if pa.types.is_decimal(pa_type):
127
+ return f"d:{pa_type.precision},{pa_type.scale}"
128
+ elif pa.types.is_timestamp(pa_type) and pa_type.tz is not None:
129
+ return f"ts{pa_type.unit[0]}:{pa_type.tz}"
130
+ format_str = PYARROW_CTYPES.get(str(pa_type), None)
131
+ if format_str is not None:
132
+ return format_str
133
+
134
+ format_str = getattr(ArrowCTypes, dtype.name.upper(), None)
135
+ if format_str is not None:
136
+ return format_str
137
+
138
+ if lib.is_np_dtype(dtype, "M"):
139
+ # Selecting the first char of resolution string:
140
+ # dtype.str -> '<M8[ns]' -> 'n'
141
+ resolution = np.datetime_data(dtype)[0][0]
142
+ return ArrowCTypes.TIMESTAMP.format(resolution=resolution, tz="")
143
+
144
+ elif isinstance(dtype, DatetimeTZDtype):
145
+ return ArrowCTypes.TIMESTAMP.format(resolution=dtype.unit[0], tz=dtype.tz)
146
+
147
+ elif isinstance(dtype, pd.BooleanDtype):
148
+ return ArrowCTypes.BOOL
149
+
150
+ raise NotImplementedError(
151
+ f"Conversion of {dtype} to Arrow C format string is not implemented."
152
+ )
153
+
154
+
155
+ def maybe_rechunk(series: pd.Series, *, allow_copy: bool) -> pd.Series | None:
156
+ """
157
+ Rechunk a multi-chunk pyarrow array into a single-chunk array, if necessary.
158
+
159
+ - Returns `None` if the input series is not backed by a multi-chunk pyarrow array
160
+ (and so doesn't need rechunking)
161
+ - Returns a single-chunk-backed-Series if the input is backed by a multi-chunk
162
+ pyarrow array and `allow_copy` is `True`.
163
+ - Raises a `RuntimeError` if `allow_copy` is `False` and input is a
164
+ based by a multi-chunk pyarrow array.
165
+ """
166
+ if not isinstance(series.dtype, pd.ArrowDtype):
167
+ return None
168
+ chunked_array = series.array._pa_array # type: ignore[attr-defined]
169
+ if len(chunked_array.chunks) == 1:
170
+ return None
171
+ if not allow_copy:
172
+ raise RuntimeError(
173
+ "Found multi-chunk pyarrow array, but `allow_copy` is False. "
174
+ "Please rechunk the array before calling this function, or set "
175
+ "`allow_copy=True`."
176
+ )
177
+ arr = chunked_array.combine_chunks()
178
+ return pd.Series(arr, dtype=series.dtype, name=series.name, index=series.index)
venv/lib/python3.10/site-packages/pandas/core/missing.py ADDED
@@ -0,0 +1,1158 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ Routines for filling missing data.
3
+ """
4
+ from __future__ import annotations
5
+
6
+ from functools import wraps
7
+ from typing import (
8
+ TYPE_CHECKING,
9
+ Any,
10
+ Literal,
11
+ cast,
12
+ overload,
13
+ )
14
+
15
+ import numpy as np
16
+
17
+ from pandas._libs import (
18
+ NaT,
19
+ algos,
20
+ lib,
21
+ )
22
+ from pandas._typing import (
23
+ ArrayLike,
24
+ AxisInt,
25
+ F,
26
+ ReindexMethod,
27
+ npt,
28
+ )
29
+ from pandas.compat._optional import import_optional_dependency
30
+
31
+ from pandas.core.dtypes.cast import infer_dtype_from
32
+ from pandas.core.dtypes.common import (
33
+ is_array_like,
34
+ is_bool_dtype,
35
+ is_numeric_dtype,
36
+ is_numeric_v_string_like,
37
+ is_object_dtype,
38
+ needs_i8_conversion,
39
+ )
40
+ from pandas.core.dtypes.dtypes import DatetimeTZDtype
41
+ from pandas.core.dtypes.missing import (
42
+ is_valid_na_for_dtype,
43
+ isna,
44
+ na_value_for_dtype,
45
+ )
46
+
47
+ if TYPE_CHECKING:
48
+ from pandas import Index
49
+
50
+
51
+ def check_value_size(value, mask: npt.NDArray[np.bool_], length: int):
52
+ """
53
+ Validate the size of the values passed to ExtensionArray.fillna.
54
+ """
55
+ if is_array_like(value):
56
+ if len(value) != length:
57
+ raise ValueError(
58
+ f"Length of 'value' does not match. Got ({len(value)}) "
59
+ f" expected {length}"
60
+ )
61
+ value = value[mask]
62
+
63
+ return value
64
+
65
+
66
+ def mask_missing(arr: ArrayLike, values_to_mask) -> npt.NDArray[np.bool_]:
67
+ """
68
+ Return a masking array of same size/shape as arr
69
+ with entries equaling any member of values_to_mask set to True
70
+
71
+ Parameters
72
+ ----------
73
+ arr : ArrayLike
74
+ values_to_mask: list, tuple, or scalar
75
+
76
+ Returns
77
+ -------
78
+ np.ndarray[bool]
79
+ """
80
+ # When called from Block.replace/replace_list, values_to_mask is a scalar
81
+ # known to be holdable by arr.
82
+ # When called from Series._single_replace, values_to_mask is tuple or list
83
+ dtype, values_to_mask = infer_dtype_from(values_to_mask)
84
+
85
+ if isinstance(dtype, np.dtype):
86
+ values_to_mask = np.array(values_to_mask, dtype=dtype)
87
+ else:
88
+ cls = dtype.construct_array_type()
89
+ if not lib.is_list_like(values_to_mask):
90
+ values_to_mask = [values_to_mask]
91
+ values_to_mask = cls._from_sequence(values_to_mask, dtype=dtype, copy=False)
92
+
93
+ potential_na = False
94
+ if is_object_dtype(arr.dtype):
95
+ # pre-compute mask to avoid comparison to NA
96
+ potential_na = True
97
+ arr_mask = ~isna(arr)
98
+
99
+ na_mask = isna(values_to_mask)
100
+ nonna = values_to_mask[~na_mask]
101
+
102
+ # GH 21977
103
+ mask = np.zeros(arr.shape, dtype=bool)
104
+ if (
105
+ is_numeric_dtype(arr.dtype)
106
+ and not is_bool_dtype(arr.dtype)
107
+ and is_bool_dtype(nonna.dtype)
108
+ ):
109
+ pass
110
+ elif (
111
+ is_bool_dtype(arr.dtype)
112
+ and is_numeric_dtype(nonna.dtype)
113
+ and not is_bool_dtype(nonna.dtype)
114
+ ):
115
+ pass
116
+ else:
117
+ for x in nonna:
118
+ if is_numeric_v_string_like(arr, x):
119
+ # GH#29553 prevent numpy deprecation warnings
120
+ pass
121
+ else:
122
+ if potential_na:
123
+ new_mask = np.zeros(arr.shape, dtype=np.bool_)
124
+ new_mask[arr_mask] = arr[arr_mask] == x
125
+ else:
126
+ new_mask = arr == x
127
+
128
+ if not isinstance(new_mask, np.ndarray):
129
+ # usually BooleanArray
130
+ new_mask = new_mask.to_numpy(dtype=bool, na_value=False)
131
+ mask |= new_mask
132
+
133
+ if na_mask.any():
134
+ mask |= isna(arr)
135
+
136
+ return mask
137
+
138
+
139
+ @overload
140
+ def clean_fill_method(
141
+ method: Literal["ffill", "pad", "bfill", "backfill"],
142
+ *,
143
+ allow_nearest: Literal[False] = ...,
144
+ ) -> Literal["pad", "backfill"]:
145
+ ...
146
+
147
+
148
+ @overload
149
+ def clean_fill_method(
150
+ method: Literal["ffill", "pad", "bfill", "backfill", "nearest"],
151
+ *,
152
+ allow_nearest: Literal[True],
153
+ ) -> Literal["pad", "backfill", "nearest"]:
154
+ ...
155
+
156
+
157
+ def clean_fill_method(
158
+ method: Literal["ffill", "pad", "bfill", "backfill", "nearest"],
159
+ *,
160
+ allow_nearest: bool = False,
161
+ ) -> Literal["pad", "backfill", "nearest"]:
162
+ if isinstance(method, str):
163
+ # error: Incompatible types in assignment (expression has type "str", variable
164
+ # has type "Literal['ffill', 'pad', 'bfill', 'backfill', 'nearest']")
165
+ method = method.lower() # type: ignore[assignment]
166
+ if method == "ffill":
167
+ method = "pad"
168
+ elif method == "bfill":
169
+ method = "backfill"
170
+
171
+ valid_methods = ["pad", "backfill"]
172
+ expecting = "pad (ffill) or backfill (bfill)"
173
+ if allow_nearest:
174
+ valid_methods.append("nearest")
175
+ expecting = "pad (ffill), backfill (bfill) or nearest"
176
+ if method not in valid_methods:
177
+ raise ValueError(f"Invalid fill method. Expecting {expecting}. Got {method}")
178
+ return method
179
+
180
+
181
+ # interpolation methods that dispatch to np.interp
182
+
183
+ NP_METHODS = ["linear", "time", "index", "values"]
184
+
185
+ # interpolation methods that dispatch to _interpolate_scipy_wrapper
186
+
187
+ SP_METHODS = [
188
+ "nearest",
189
+ "zero",
190
+ "slinear",
191
+ "quadratic",
192
+ "cubic",
193
+ "barycentric",
194
+ "krogh",
195
+ "spline",
196
+ "polynomial",
197
+ "from_derivatives",
198
+ "piecewise_polynomial",
199
+ "pchip",
200
+ "akima",
201
+ "cubicspline",
202
+ ]
203
+
204
+
205
+ def clean_interp_method(method: str, index: Index, **kwargs) -> str:
206
+ order = kwargs.get("order")
207
+
208
+ if method in ("spline", "polynomial") and order is None:
209
+ raise ValueError("You must specify the order of the spline or polynomial.")
210
+
211
+ valid = NP_METHODS + SP_METHODS
212
+ if method not in valid:
213
+ raise ValueError(f"method must be one of {valid}. Got '{method}' instead.")
214
+
215
+ if method in ("krogh", "piecewise_polynomial", "pchip"):
216
+ if not index.is_monotonic_increasing:
217
+ raise ValueError(
218
+ f"{method} interpolation requires that the index be monotonic."
219
+ )
220
+
221
+ return method
222
+
223
+
224
+ def find_valid_index(how: str, is_valid: npt.NDArray[np.bool_]) -> int | None:
225
+ """
226
+ Retrieves the positional index of the first valid value.
227
+
228
+ Parameters
229
+ ----------
230
+ how : {'first', 'last'}
231
+ Use this parameter to change between the first or last valid index.
232
+ is_valid: np.ndarray
233
+ Mask to find na_values.
234
+
235
+ Returns
236
+ -------
237
+ int or None
238
+ """
239
+ assert how in ["first", "last"]
240
+
241
+ if len(is_valid) == 0: # early stop
242
+ return None
243
+
244
+ if is_valid.ndim == 2:
245
+ is_valid = is_valid.any(axis=1) # reduce axis 1
246
+
247
+ if how == "first":
248
+ idxpos = is_valid[::].argmax()
249
+
250
+ elif how == "last":
251
+ idxpos = len(is_valid) - 1 - is_valid[::-1].argmax()
252
+
253
+ chk_notna = is_valid[idxpos]
254
+
255
+ if not chk_notna:
256
+ return None
257
+ # Incompatible return value type (got "signedinteger[Any]",
258
+ # expected "Optional[int]")
259
+ return idxpos # type: ignore[return-value]
260
+
261
+
262
+ def validate_limit_direction(
263
+ limit_direction: str,
264
+ ) -> Literal["forward", "backward", "both"]:
265
+ valid_limit_directions = ["forward", "backward", "both"]
266
+ limit_direction = limit_direction.lower()
267
+ if limit_direction not in valid_limit_directions:
268
+ raise ValueError(
269
+ "Invalid limit_direction: expecting one of "
270
+ f"{valid_limit_directions}, got '{limit_direction}'."
271
+ )
272
+ # error: Incompatible return value type (got "str", expected
273
+ # "Literal['forward', 'backward', 'both']")
274
+ return limit_direction # type: ignore[return-value]
275
+
276
+
277
+ def validate_limit_area(limit_area: str | None) -> Literal["inside", "outside"] | None:
278
+ if limit_area is not None:
279
+ valid_limit_areas = ["inside", "outside"]
280
+ limit_area = limit_area.lower()
281
+ if limit_area not in valid_limit_areas:
282
+ raise ValueError(
283
+ f"Invalid limit_area: expecting one of {valid_limit_areas}, got "
284
+ f"{limit_area}."
285
+ )
286
+ # error: Incompatible return value type (got "Optional[str]", expected
287
+ # "Optional[Literal['inside', 'outside']]")
288
+ return limit_area # type: ignore[return-value]
289
+
290
+
291
+ def infer_limit_direction(
292
+ limit_direction: Literal["backward", "forward", "both"] | None, method: str
293
+ ) -> Literal["backward", "forward", "both"]:
294
+ # Set `limit_direction` depending on `method`
295
+ if limit_direction is None:
296
+ if method in ("backfill", "bfill"):
297
+ limit_direction = "backward"
298
+ else:
299
+ limit_direction = "forward"
300
+ else:
301
+ if method in ("pad", "ffill") and limit_direction != "forward":
302
+ raise ValueError(
303
+ f"`limit_direction` must be 'forward' for method `{method}`"
304
+ )
305
+ if method in ("backfill", "bfill") and limit_direction != "backward":
306
+ raise ValueError(
307
+ f"`limit_direction` must be 'backward' for method `{method}`"
308
+ )
309
+ return limit_direction
310
+
311
+
312
+ def get_interp_index(method, index: Index) -> Index:
313
+ # create/use the index
314
+ if method == "linear":
315
+ # prior default
316
+ from pandas import Index
317
+
318
+ index = Index(np.arange(len(index)))
319
+ else:
320
+ methods = {"index", "values", "nearest", "time"}
321
+ is_numeric_or_datetime = (
322
+ is_numeric_dtype(index.dtype)
323
+ or isinstance(index.dtype, DatetimeTZDtype)
324
+ or lib.is_np_dtype(index.dtype, "mM")
325
+ )
326
+ if method not in methods and not is_numeric_or_datetime:
327
+ raise ValueError(
328
+ "Index column must be numeric or datetime type when "
329
+ f"using {method} method other than linear. "
330
+ "Try setting a numeric or datetime index column before "
331
+ "interpolating."
332
+ )
333
+
334
+ if isna(index).any():
335
+ raise NotImplementedError(
336
+ "Interpolation with NaNs in the index "
337
+ "has not been implemented. Try filling "
338
+ "those NaNs before interpolating."
339
+ )
340
+ return index
341
+
342
+
343
+ def interpolate_2d_inplace(
344
+ data: np.ndarray, # floating dtype
345
+ index: Index,
346
+ axis: AxisInt,
347
+ method: str = "linear",
348
+ limit: int | None = None,
349
+ limit_direction: str = "forward",
350
+ limit_area: str | None = None,
351
+ fill_value: Any | None = None,
352
+ mask=None,
353
+ **kwargs,
354
+ ) -> None:
355
+ """
356
+ Column-wise application of _interpolate_1d.
357
+
358
+ Notes
359
+ -----
360
+ Alters 'data' in-place.
361
+
362
+ The signature does differ from _interpolate_1d because it only
363
+ includes what is needed for Block.interpolate.
364
+ """
365
+ # validate the interp method
366
+ clean_interp_method(method, index, **kwargs)
367
+
368
+ if is_valid_na_for_dtype(fill_value, data.dtype):
369
+ fill_value = na_value_for_dtype(data.dtype, compat=False)
370
+
371
+ if method == "time":
372
+ if not needs_i8_conversion(index.dtype):
373
+ raise ValueError(
374
+ "time-weighted interpolation only works "
375
+ "on Series or DataFrames with a "
376
+ "DatetimeIndex"
377
+ )
378
+ method = "values"
379
+
380
+ limit_direction = validate_limit_direction(limit_direction)
381
+ limit_area_validated = validate_limit_area(limit_area)
382
+
383
+ # default limit is unlimited GH #16282
384
+ limit = algos.validate_limit(nobs=None, limit=limit)
385
+
386
+ indices = _index_to_interp_indices(index, method)
387
+
388
+ def func(yvalues: np.ndarray) -> None:
389
+ # process 1-d slices in the axis direction
390
+
391
+ _interpolate_1d(
392
+ indices=indices,
393
+ yvalues=yvalues,
394
+ method=method,
395
+ limit=limit,
396
+ limit_direction=limit_direction,
397
+ limit_area=limit_area_validated,
398
+ fill_value=fill_value,
399
+ bounds_error=False,
400
+ mask=mask,
401
+ **kwargs,
402
+ )
403
+
404
+ # error: Argument 1 to "apply_along_axis" has incompatible type
405
+ # "Callable[[ndarray[Any, Any]], None]"; expected "Callable[...,
406
+ # Union[_SupportsArray[dtype[<nothing>]], Sequence[_SupportsArray
407
+ # [dtype[<nothing>]]], Sequence[Sequence[_SupportsArray[dtype[<nothing>]]]],
408
+ # Sequence[Sequence[Sequence[_SupportsArray[dtype[<nothing>]]]]],
409
+ # Sequence[Sequence[Sequence[Sequence[_SupportsArray[dtype[<nothing>]]]]]]]]"
410
+ np.apply_along_axis(func, axis, data) # type: ignore[arg-type]
411
+
412
+
413
+ def _index_to_interp_indices(index: Index, method: str) -> np.ndarray:
414
+ """
415
+ Convert Index to ndarray of indices to pass to NumPy/SciPy.
416
+ """
417
+ xarr = index._values
418
+ if needs_i8_conversion(xarr.dtype):
419
+ # GH#1646 for dt64tz
420
+ xarr = xarr.view("i8")
421
+
422
+ if method == "linear":
423
+ inds = xarr
424
+ inds = cast(np.ndarray, inds)
425
+ else:
426
+ inds = np.asarray(xarr)
427
+
428
+ if method in ("values", "index"):
429
+ if inds.dtype == np.object_:
430
+ inds = lib.maybe_convert_objects(inds)
431
+
432
+ return inds
433
+
434
+
435
+ def _interpolate_1d(
436
+ indices: np.ndarray,
437
+ yvalues: np.ndarray,
438
+ method: str = "linear",
439
+ limit: int | None = None,
440
+ limit_direction: str = "forward",
441
+ limit_area: Literal["inside", "outside"] | None = None,
442
+ fill_value: Any | None = None,
443
+ bounds_error: bool = False,
444
+ order: int | None = None,
445
+ mask=None,
446
+ **kwargs,
447
+ ) -> None:
448
+ """
449
+ Logic for the 1-d interpolation. The input
450
+ indices and yvalues will each be 1-d arrays of the same length.
451
+
452
+ Bounds_error is currently hardcoded to False since non-scipy ones don't
453
+ take it as an argument.
454
+
455
+ Notes
456
+ -----
457
+ Fills 'yvalues' in-place.
458
+ """
459
+ if mask is not None:
460
+ invalid = mask
461
+ else:
462
+ invalid = isna(yvalues)
463
+ valid = ~invalid
464
+
465
+ if not valid.any():
466
+ return
467
+
468
+ if valid.all():
469
+ return
470
+
471
+ # These are sets of index pointers to invalid values... i.e. {0, 1, etc...
472
+ all_nans = set(np.flatnonzero(invalid))
473
+
474
+ first_valid_index = find_valid_index(how="first", is_valid=valid)
475
+ if first_valid_index is None: # no nan found in start
476
+ first_valid_index = 0
477
+ start_nans = set(range(first_valid_index))
478
+
479
+ last_valid_index = find_valid_index(how="last", is_valid=valid)
480
+ if last_valid_index is None: # no nan found in end
481
+ last_valid_index = len(yvalues)
482
+ end_nans = set(range(1 + last_valid_index, len(valid)))
483
+
484
+ # Like the sets above, preserve_nans contains indices of invalid values,
485
+ # but in this case, it is the final set of indices that need to be
486
+ # preserved as NaN after the interpolation.
487
+
488
+ # For example if limit_direction='forward' then preserve_nans will
489
+ # contain indices of NaNs at the beginning of the series, and NaNs that
490
+ # are more than 'limit' away from the prior non-NaN.
491
+
492
+ # set preserve_nans based on direction using _interp_limit
493
+ preserve_nans: list | set
494
+ if limit_direction == "forward":
495
+ preserve_nans = start_nans | set(_interp_limit(invalid, limit, 0))
496
+ elif limit_direction == "backward":
497
+ preserve_nans = end_nans | set(_interp_limit(invalid, 0, limit))
498
+ else:
499
+ # both directions... just use _interp_limit
500
+ preserve_nans = set(_interp_limit(invalid, limit, limit))
501
+
502
+ # if limit_area is set, add either mid or outside indices
503
+ # to preserve_nans GH #16284
504
+ if limit_area == "inside":
505
+ # preserve NaNs on the outside
506
+ preserve_nans |= start_nans | end_nans
507
+ elif limit_area == "outside":
508
+ # preserve NaNs on the inside
509
+ mid_nans = all_nans - start_nans - end_nans
510
+ preserve_nans |= mid_nans
511
+
512
+ # sort preserve_nans and convert to list
513
+ preserve_nans = sorted(preserve_nans)
514
+
515
+ is_datetimelike = yvalues.dtype.kind in "mM"
516
+
517
+ if is_datetimelike:
518
+ yvalues = yvalues.view("i8")
519
+
520
+ if method in NP_METHODS:
521
+ # np.interp requires sorted X values, #21037
522
+
523
+ indexer = np.argsort(indices[valid])
524
+ yvalues[invalid] = np.interp(
525
+ indices[invalid], indices[valid][indexer], yvalues[valid][indexer]
526
+ )
527
+ else:
528
+ yvalues[invalid] = _interpolate_scipy_wrapper(
529
+ indices[valid],
530
+ yvalues[valid],
531
+ indices[invalid],
532
+ method=method,
533
+ fill_value=fill_value,
534
+ bounds_error=bounds_error,
535
+ order=order,
536
+ **kwargs,
537
+ )
538
+
539
+ if mask is not None:
540
+ mask[:] = False
541
+ mask[preserve_nans] = True
542
+ elif is_datetimelike:
543
+ yvalues[preserve_nans] = NaT.value
544
+ else:
545
+ yvalues[preserve_nans] = np.nan
546
+ return
547
+
548
+
549
+ def _interpolate_scipy_wrapper(
550
+ x: np.ndarray,
551
+ y: np.ndarray,
552
+ new_x: np.ndarray,
553
+ method: str,
554
+ fill_value=None,
555
+ bounds_error: bool = False,
556
+ order=None,
557
+ **kwargs,
558
+ ):
559
+ """
560
+ Passed off to scipy.interpolate.interp1d. method is scipy's kind.
561
+ Returns an array interpolated at new_x. Add any new methods to
562
+ the list in _clean_interp_method.
563
+ """
564
+ extra = f"{method} interpolation requires SciPy."
565
+ import_optional_dependency("scipy", extra=extra)
566
+ from scipy import interpolate
567
+
568
+ new_x = np.asarray(new_x)
569
+
570
+ # ignores some kwargs that could be passed along.
571
+ alt_methods = {
572
+ "barycentric": interpolate.barycentric_interpolate,
573
+ "krogh": interpolate.krogh_interpolate,
574
+ "from_derivatives": _from_derivatives,
575
+ "piecewise_polynomial": _from_derivatives,
576
+ "cubicspline": _cubicspline_interpolate,
577
+ "akima": _akima_interpolate,
578
+ "pchip": interpolate.pchip_interpolate,
579
+ }
580
+
581
+ interp1d_methods = [
582
+ "nearest",
583
+ "zero",
584
+ "slinear",
585
+ "quadratic",
586
+ "cubic",
587
+ "polynomial",
588
+ ]
589
+ if method in interp1d_methods:
590
+ if method == "polynomial":
591
+ kind = order
592
+ else:
593
+ kind = method
594
+ terp = interpolate.interp1d(
595
+ x, y, kind=kind, fill_value=fill_value, bounds_error=bounds_error
596
+ )
597
+ new_y = terp(new_x)
598
+ elif method == "spline":
599
+ # GH #10633, #24014
600
+ if isna(order) or (order <= 0):
601
+ raise ValueError(
602
+ f"order needs to be specified and greater than 0; got order: {order}"
603
+ )
604
+ terp = interpolate.UnivariateSpline(x, y, k=order, **kwargs)
605
+ new_y = terp(new_x)
606
+ else:
607
+ # GH 7295: need to be able to write for some reason
608
+ # in some circumstances: check all three
609
+ if not x.flags.writeable:
610
+ x = x.copy()
611
+ if not y.flags.writeable:
612
+ y = y.copy()
613
+ if not new_x.flags.writeable:
614
+ new_x = new_x.copy()
615
+ terp = alt_methods[method]
616
+ new_y = terp(x, y, new_x, **kwargs)
617
+ return new_y
618
+
619
+
620
+ def _from_derivatives(
621
+ xi: np.ndarray,
622
+ yi: np.ndarray,
623
+ x: np.ndarray,
624
+ order=None,
625
+ der: int | list[int] | None = 0,
626
+ extrapolate: bool = False,
627
+ ):
628
+ """
629
+ Convenience function for interpolate.BPoly.from_derivatives.
630
+
631
+ Construct a piecewise polynomial in the Bernstein basis, compatible
632
+ with the specified values and derivatives at breakpoints.
633
+
634
+ Parameters
635
+ ----------
636
+ xi : array-like
637
+ sorted 1D array of x-coordinates
638
+ yi : array-like or list of array-likes
639
+ yi[i][j] is the j-th derivative known at xi[i]
640
+ order: None or int or array-like of ints. Default: None.
641
+ Specifies the degree of local polynomials. If not None, some
642
+ derivatives are ignored.
643
+ der : int or list
644
+ How many derivatives to extract; None for all potentially nonzero
645
+ derivatives (that is a number equal to the number of points), or a
646
+ list of derivatives to extract. This number includes the function
647
+ value as 0th derivative.
648
+ extrapolate : bool, optional
649
+ Whether to extrapolate to ouf-of-bounds points based on first and last
650
+ intervals, or to return NaNs. Default: True.
651
+
652
+ See Also
653
+ --------
654
+ scipy.interpolate.BPoly.from_derivatives
655
+
656
+ Returns
657
+ -------
658
+ y : scalar or array-like
659
+ The result, of length R or length M or M by R.
660
+ """
661
+ from scipy import interpolate
662
+
663
+ # return the method for compat with scipy version & backwards compat
664
+ method = interpolate.BPoly.from_derivatives
665
+ m = method(xi, yi.reshape(-1, 1), orders=order, extrapolate=extrapolate)
666
+
667
+ return m(x)
668
+
669
+
670
+ def _akima_interpolate(
671
+ xi: np.ndarray,
672
+ yi: np.ndarray,
673
+ x: np.ndarray,
674
+ der: int | list[int] | None = 0,
675
+ axis: AxisInt = 0,
676
+ ):
677
+ """
678
+ Convenience function for akima interpolation.
679
+ xi and yi are arrays of values used to approximate some function f,
680
+ with ``yi = f(xi)``.
681
+
682
+ See `Akima1DInterpolator` for details.
683
+
684
+ Parameters
685
+ ----------
686
+ xi : np.ndarray
687
+ A sorted list of x-coordinates, of length N.
688
+ yi : np.ndarray
689
+ A 1-D array of real values. `yi`'s length along the interpolation
690
+ axis must be equal to the length of `xi`. If N-D array, use axis
691
+ parameter to select correct axis.
692
+ x : np.ndarray
693
+ Of length M.
694
+ der : int, optional
695
+ How many derivatives to extract; None for all potentially
696
+ nonzero derivatives (that is a number equal to the number
697
+ of points), or a list of derivatives to extract. This number
698
+ includes the function value as 0th derivative.
699
+ axis : int, optional
700
+ Axis in the yi array corresponding to the x-coordinate values.
701
+
702
+ See Also
703
+ --------
704
+ scipy.interpolate.Akima1DInterpolator
705
+
706
+ Returns
707
+ -------
708
+ y : scalar or array-like
709
+ The result, of length R or length M or M by R,
710
+
711
+ """
712
+ from scipy import interpolate
713
+
714
+ P = interpolate.Akima1DInterpolator(xi, yi, axis=axis)
715
+
716
+ return P(x, nu=der)
717
+
718
+
719
+ def _cubicspline_interpolate(
720
+ xi: np.ndarray,
721
+ yi: np.ndarray,
722
+ x: np.ndarray,
723
+ axis: AxisInt = 0,
724
+ bc_type: str | tuple[Any, Any] = "not-a-knot",
725
+ extrapolate=None,
726
+ ):
727
+ """
728
+ Convenience function for cubic spline data interpolator.
729
+
730
+ See `scipy.interpolate.CubicSpline` for details.
731
+
732
+ Parameters
733
+ ----------
734
+ xi : np.ndarray, shape (n,)
735
+ 1-d array containing values of the independent variable.
736
+ Values must be real, finite and in strictly increasing order.
737
+ yi : np.ndarray
738
+ Array containing values of the dependent variable. It can have
739
+ arbitrary number of dimensions, but the length along ``axis``
740
+ (see below) must match the length of ``x``. Values must be finite.
741
+ x : np.ndarray, shape (m,)
742
+ axis : int, optional
743
+ Axis along which `y` is assumed to be varying. Meaning that for
744
+ ``x[i]`` the corresponding values are ``np.take(y, i, axis=axis)``.
745
+ Default is 0.
746
+ bc_type : string or 2-tuple, optional
747
+ Boundary condition type. Two additional equations, given by the
748
+ boundary conditions, are required to determine all coefficients of
749
+ polynomials on each segment [2]_.
750
+ If `bc_type` is a string, then the specified condition will be applied
751
+ at both ends of a spline. Available conditions are:
752
+ * 'not-a-knot' (default): The first and second segment at a curve end
753
+ are the same polynomial. It is a good default when there is no
754
+ information on boundary conditions.
755
+ * 'periodic': The interpolated functions is assumed to be periodic
756
+ of period ``x[-1] - x[0]``. The first and last value of `y` must be
757
+ identical: ``y[0] == y[-1]``. This boundary condition will result in
758
+ ``y'[0] == y'[-1]`` and ``y''[0] == y''[-1]``.
759
+ * 'clamped': The first derivative at curves ends are zero. Assuming
760
+ a 1D `y`, ``bc_type=((1, 0.0), (1, 0.0))`` is the same condition.
761
+ * 'natural': The second derivative at curve ends are zero. Assuming
762
+ a 1D `y`, ``bc_type=((2, 0.0), (2, 0.0))`` is the same condition.
763
+ If `bc_type` is a 2-tuple, the first and the second value will be
764
+ applied at the curve start and end respectively. The tuple values can
765
+ be one of the previously mentioned strings (except 'periodic') or a
766
+ tuple `(order, deriv_values)` allowing to specify arbitrary
767
+ derivatives at curve ends:
768
+ * `order`: the derivative order, 1 or 2.
769
+ * `deriv_value`: array-like containing derivative values, shape must
770
+ be the same as `y`, excluding ``axis`` dimension. For example, if
771
+ `y` is 1D, then `deriv_value` must be a scalar. If `y` is 3D with
772
+ the shape (n0, n1, n2) and axis=2, then `deriv_value` must be 2D
773
+ and have the shape (n0, n1).
774
+ extrapolate : {bool, 'periodic', None}, optional
775
+ If bool, determines whether to extrapolate to out-of-bounds points
776
+ based on first and last intervals, or to return NaNs. If 'periodic',
777
+ periodic extrapolation is used. If None (default), ``extrapolate`` is
778
+ set to 'periodic' for ``bc_type='periodic'`` and to True otherwise.
779
+
780
+ See Also
781
+ --------
782
+ scipy.interpolate.CubicHermiteSpline
783
+
784
+ Returns
785
+ -------
786
+ y : scalar or array-like
787
+ The result, of shape (m,)
788
+
789
+ References
790
+ ----------
791
+ .. [1] `Cubic Spline Interpolation
792
+ <https://en.wikiversity.org/wiki/Cubic_Spline_Interpolation>`_
793
+ on Wikiversity.
794
+ .. [2] Carl de Boor, "A Practical Guide to Splines", Springer-Verlag, 1978.
795
+ """
796
+ from scipy import interpolate
797
+
798
+ P = interpolate.CubicSpline(
799
+ xi, yi, axis=axis, bc_type=bc_type, extrapolate=extrapolate
800
+ )
801
+
802
+ return P(x)
803
+
804
+
805
+ def _interpolate_with_limit_area(
806
+ values: np.ndarray,
807
+ method: Literal["pad", "backfill"],
808
+ limit: int | None,
809
+ limit_area: Literal["inside", "outside"],
810
+ ) -> None:
811
+ """
812
+ Apply interpolation and limit_area logic to values along a to-be-specified axis.
813
+
814
+ Parameters
815
+ ----------
816
+ values: np.ndarray
817
+ Input array.
818
+ method: str
819
+ Interpolation method. Could be "bfill" or "pad"
820
+ limit: int, optional
821
+ Index limit on interpolation.
822
+ limit_area: {'inside', 'outside'}
823
+ Limit area for interpolation.
824
+
825
+ Notes
826
+ -----
827
+ Modifies values in-place.
828
+ """
829
+
830
+ invalid = isna(values)
831
+ is_valid = ~invalid
832
+
833
+ if not invalid.all():
834
+ first = find_valid_index(how="first", is_valid=is_valid)
835
+ if first is None:
836
+ first = 0
837
+ last = find_valid_index(how="last", is_valid=is_valid)
838
+ if last is None:
839
+ last = len(values)
840
+
841
+ pad_or_backfill_inplace(
842
+ values,
843
+ method=method,
844
+ limit=limit,
845
+ limit_area=limit_area,
846
+ )
847
+
848
+ if limit_area == "inside":
849
+ invalid[first : last + 1] = False
850
+ elif limit_area == "outside":
851
+ invalid[:first] = invalid[last + 1 :] = False
852
+ else:
853
+ raise ValueError("limit_area should be 'inside' or 'outside'")
854
+
855
+ values[invalid] = np.nan
856
+
857
+
858
+ def pad_or_backfill_inplace(
859
+ values: np.ndarray,
860
+ method: Literal["pad", "backfill"] = "pad",
861
+ axis: AxisInt = 0,
862
+ limit: int | None = None,
863
+ limit_area: Literal["inside", "outside"] | None = None,
864
+ ) -> None:
865
+ """
866
+ Perform an actual interpolation of values, values will be make 2-d if
867
+ needed fills inplace, returns the result.
868
+
869
+ Parameters
870
+ ----------
871
+ values: np.ndarray
872
+ Input array.
873
+ method: str, default "pad"
874
+ Interpolation method. Could be "bfill" or "pad"
875
+ axis: 0 or 1
876
+ Interpolation axis
877
+ limit: int, optional
878
+ Index limit on interpolation.
879
+ limit_area: str, optional
880
+ Limit area for interpolation. Can be "inside" or "outside"
881
+
882
+ Notes
883
+ -----
884
+ Modifies values in-place.
885
+ """
886
+ transf = (lambda x: x) if axis == 0 else (lambda x: x.T)
887
+
888
+ # reshape a 1 dim if needed
889
+ if values.ndim == 1:
890
+ if axis != 0: # pragma: no cover
891
+ raise AssertionError("cannot interpolate on a ndim == 1 with axis != 0")
892
+ values = values.reshape(tuple((1,) + values.shape))
893
+
894
+ method = clean_fill_method(method)
895
+ tvalues = transf(values)
896
+
897
+ func = get_fill_func(method, ndim=2)
898
+ # _pad_2d and _backfill_2d both modify tvalues inplace
899
+ func(tvalues, limit=limit, limit_area=limit_area)
900
+
901
+
902
+ def _fillna_prep(
903
+ values, mask: npt.NDArray[np.bool_] | None = None
904
+ ) -> npt.NDArray[np.bool_]:
905
+ # boilerplate for _pad_1d, _backfill_1d, _pad_2d, _backfill_2d
906
+
907
+ if mask is None:
908
+ mask = isna(values)
909
+
910
+ return mask
911
+
912
+
913
+ def _datetimelike_compat(func: F) -> F:
914
+ """
915
+ Wrapper to handle datetime64 and timedelta64 dtypes.
916
+ """
917
+
918
+ @wraps(func)
919
+ def new_func(
920
+ values,
921
+ limit: int | None = None,
922
+ limit_area: Literal["inside", "outside"] | None = None,
923
+ mask=None,
924
+ ):
925
+ if needs_i8_conversion(values.dtype):
926
+ if mask is None:
927
+ # This needs to occur before casting to int64
928
+ mask = isna(values)
929
+
930
+ result, mask = func(
931
+ values.view("i8"), limit=limit, limit_area=limit_area, mask=mask
932
+ )
933
+ return result.view(values.dtype), mask
934
+
935
+ return func(values, limit=limit, limit_area=limit_area, mask=mask)
936
+
937
+ return cast(F, new_func)
938
+
939
+
940
+ @_datetimelike_compat
941
+ def _pad_1d(
942
+ values: np.ndarray,
943
+ limit: int | None = None,
944
+ limit_area: Literal["inside", "outside"] | None = None,
945
+ mask: npt.NDArray[np.bool_] | None = None,
946
+ ) -> tuple[np.ndarray, npt.NDArray[np.bool_]]:
947
+ mask = _fillna_prep(values, mask)
948
+ if limit_area is not None and not mask.all():
949
+ _fill_limit_area_1d(mask, limit_area)
950
+ algos.pad_inplace(values, mask, limit=limit)
951
+ return values, mask
952
+
953
+
954
+ @_datetimelike_compat
955
+ def _backfill_1d(
956
+ values: np.ndarray,
957
+ limit: int | None = None,
958
+ limit_area: Literal["inside", "outside"] | None = None,
959
+ mask: npt.NDArray[np.bool_] | None = None,
960
+ ) -> tuple[np.ndarray, npt.NDArray[np.bool_]]:
961
+ mask = _fillna_prep(values, mask)
962
+ if limit_area is not None and not mask.all():
963
+ _fill_limit_area_1d(mask, limit_area)
964
+ algos.backfill_inplace(values, mask, limit=limit)
965
+ return values, mask
966
+
967
+
968
+ @_datetimelike_compat
969
+ def _pad_2d(
970
+ values: np.ndarray,
971
+ limit: int | None = None,
972
+ limit_area: Literal["inside", "outside"] | None = None,
973
+ mask: npt.NDArray[np.bool_] | None = None,
974
+ ):
975
+ mask = _fillna_prep(values, mask)
976
+ if limit_area is not None:
977
+ _fill_limit_area_2d(mask, limit_area)
978
+
979
+ if values.size:
980
+ algos.pad_2d_inplace(values, mask, limit=limit)
981
+ else:
982
+ # for test coverage
983
+ pass
984
+ return values, mask
985
+
986
+
987
+ @_datetimelike_compat
988
+ def _backfill_2d(
989
+ values,
990
+ limit: int | None = None,
991
+ limit_area: Literal["inside", "outside"] | None = None,
992
+ mask: npt.NDArray[np.bool_] | None = None,
993
+ ):
994
+ mask = _fillna_prep(values, mask)
995
+ if limit_area is not None:
996
+ _fill_limit_area_2d(mask, limit_area)
997
+
998
+ if values.size:
999
+ algos.backfill_2d_inplace(values, mask, limit=limit)
1000
+ else:
1001
+ # for test coverage
1002
+ pass
1003
+ return values, mask
1004
+
1005
+
1006
+ def _fill_limit_area_1d(
1007
+ mask: npt.NDArray[np.bool_], limit_area: Literal["outside", "inside"]
1008
+ ) -> None:
1009
+ """Prepare 1d mask for ffill/bfill with limit_area.
1010
+
1011
+ Caller is responsible for checking at least one value of mask is False.
1012
+ When called, mask will no longer faithfully represent when
1013
+ the corresponding are NA or not.
1014
+
1015
+ Parameters
1016
+ ----------
1017
+ mask : np.ndarray[bool, ndim=1]
1018
+ Mask representing NA values when filling.
1019
+ limit_area : { "outside", "inside" }
1020
+ Whether to limit filling to outside or inside the outer most non-NA value.
1021
+ """
1022
+ neg_mask = ~mask
1023
+ first = neg_mask.argmax()
1024
+ last = len(neg_mask) - neg_mask[::-1].argmax() - 1
1025
+ if limit_area == "inside":
1026
+ mask[:first] = False
1027
+ mask[last + 1 :] = False
1028
+ elif limit_area == "outside":
1029
+ mask[first + 1 : last] = False
1030
+
1031
+
1032
+ def _fill_limit_area_2d(
1033
+ mask: npt.NDArray[np.bool_], limit_area: Literal["outside", "inside"]
1034
+ ) -> None:
1035
+ """Prepare 2d mask for ffill/bfill with limit_area.
1036
+
1037
+ When called, mask will no longer faithfully represent when
1038
+ the corresponding are NA or not.
1039
+
1040
+ Parameters
1041
+ ----------
1042
+ mask : np.ndarray[bool, ndim=1]
1043
+ Mask representing NA values when filling.
1044
+ limit_area : { "outside", "inside" }
1045
+ Whether to limit filling to outside or inside the outer most non-NA value.
1046
+ """
1047
+ neg_mask = ~mask.T
1048
+ if limit_area == "outside":
1049
+ # Identify inside
1050
+ la_mask = (
1051
+ np.maximum.accumulate(neg_mask, axis=0)
1052
+ & np.maximum.accumulate(neg_mask[::-1], axis=0)[::-1]
1053
+ )
1054
+ else:
1055
+ # Identify outside
1056
+ la_mask = (
1057
+ ~np.maximum.accumulate(neg_mask, axis=0)
1058
+ | ~np.maximum.accumulate(neg_mask[::-1], axis=0)[::-1]
1059
+ )
1060
+ mask[la_mask.T] = False
1061
+
1062
+
1063
+ _fill_methods = {"pad": _pad_1d, "backfill": _backfill_1d}
1064
+
1065
+
1066
+ def get_fill_func(method, ndim: int = 1):
1067
+ method = clean_fill_method(method)
1068
+ if ndim == 1:
1069
+ return _fill_methods[method]
1070
+ return {"pad": _pad_2d, "backfill": _backfill_2d}[method]
1071
+
1072
+
1073
+ def clean_reindex_fill_method(method) -> ReindexMethod | None:
1074
+ if method is None:
1075
+ return None
1076
+ return clean_fill_method(method, allow_nearest=True)
1077
+
1078
+
1079
+ def _interp_limit(
1080
+ invalid: npt.NDArray[np.bool_], fw_limit: int | None, bw_limit: int | None
1081
+ ):
1082
+ """
1083
+ Get indexers of values that won't be filled
1084
+ because they exceed the limits.
1085
+
1086
+ Parameters
1087
+ ----------
1088
+ invalid : np.ndarray[bool]
1089
+ fw_limit : int or None
1090
+ forward limit to index
1091
+ bw_limit : int or None
1092
+ backward limit to index
1093
+
1094
+ Returns
1095
+ -------
1096
+ set of indexers
1097
+
1098
+ Notes
1099
+ -----
1100
+ This is equivalent to the more readable, but slower
1101
+
1102
+ .. code-block:: python
1103
+
1104
+ def _interp_limit(invalid, fw_limit, bw_limit):
1105
+ for x in np.where(invalid)[0]:
1106
+ if invalid[max(0, x - fw_limit):x + bw_limit + 1].all():
1107
+ yield x
1108
+ """
1109
+ # handle forward first; the backward direction is the same except
1110
+ # 1. operate on the reversed array
1111
+ # 2. subtract the returned indices from N - 1
1112
+ N = len(invalid)
1113
+ f_idx = set()
1114
+ b_idx = set()
1115
+
1116
+ def inner(invalid, limit: int):
1117
+ limit = min(limit, N)
1118
+ windowed = _rolling_window(invalid, limit + 1).all(1)
1119
+ idx = set(np.where(windowed)[0] + limit) | set(
1120
+ np.where((~invalid[: limit + 1]).cumsum() == 0)[0]
1121
+ )
1122
+ return idx
1123
+
1124
+ if fw_limit is not None:
1125
+ if fw_limit == 0:
1126
+ f_idx = set(np.where(invalid)[0])
1127
+ else:
1128
+ f_idx = inner(invalid, fw_limit)
1129
+
1130
+ if bw_limit is not None:
1131
+ if bw_limit == 0:
1132
+ # then we don't even need to care about backwards
1133
+ # just use forwards
1134
+ return f_idx
1135
+ else:
1136
+ b_idx_inv = list(inner(invalid[::-1], bw_limit))
1137
+ b_idx = set(N - 1 - np.asarray(b_idx_inv))
1138
+ if fw_limit == 0:
1139
+ return b_idx
1140
+
1141
+ return f_idx & b_idx
1142
+
1143
+
1144
+ def _rolling_window(a: npt.NDArray[np.bool_], window: int) -> npt.NDArray[np.bool_]:
1145
+ """
1146
+ [True, True, False, True, False], 2 ->
1147
+
1148
+ [
1149
+ [True, True],
1150
+ [True, False],
1151
+ [False, True],
1152
+ [True, False],
1153
+ ]
1154
+ """
1155
+ # https://stackoverflow.com/a/6811241
1156
+ shape = a.shape[:-1] + (a.shape[-1] - window + 1, window)
1157
+ strides = a.strides + (a.strides[-1],)
1158
+ return np.lib.stride_tricks.as_strided(a, shape=shape, strides=strides)
venv/lib/python3.10/site-packages/pandas/core/nanops.py ADDED
@@ -0,0 +1,1748 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from __future__ import annotations
2
+
3
+ import functools
4
+ import itertools
5
+ from typing import (
6
+ Any,
7
+ Callable,
8
+ cast,
9
+ )
10
+ import warnings
11
+
12
+ import numpy as np
13
+
14
+ from pandas._config import get_option
15
+
16
+ from pandas._libs import (
17
+ NaT,
18
+ NaTType,
19
+ iNaT,
20
+ lib,
21
+ )
22
+ from pandas._typing import (
23
+ ArrayLike,
24
+ AxisInt,
25
+ CorrelationMethod,
26
+ Dtype,
27
+ DtypeObj,
28
+ F,
29
+ Scalar,
30
+ Shape,
31
+ npt,
32
+ )
33
+ from pandas.compat._optional import import_optional_dependency
34
+ from pandas.util._exceptions import find_stack_level
35
+
36
+ from pandas.core.dtypes.common import (
37
+ is_complex,
38
+ is_float,
39
+ is_float_dtype,
40
+ is_integer,
41
+ is_numeric_dtype,
42
+ is_object_dtype,
43
+ needs_i8_conversion,
44
+ pandas_dtype,
45
+ )
46
+ from pandas.core.dtypes.missing import (
47
+ isna,
48
+ na_value_for_dtype,
49
+ notna,
50
+ )
51
+
52
+ bn = import_optional_dependency("bottleneck", errors="warn")
53
+ _BOTTLENECK_INSTALLED = bn is not None
54
+ _USE_BOTTLENECK = False
55
+
56
+
57
+ def set_use_bottleneck(v: bool = True) -> None:
58
+ # set/unset to use bottleneck
59
+ global _USE_BOTTLENECK
60
+ if _BOTTLENECK_INSTALLED:
61
+ _USE_BOTTLENECK = v
62
+
63
+
64
+ set_use_bottleneck(get_option("compute.use_bottleneck"))
65
+
66
+
67
+ class disallow:
68
+ def __init__(self, *dtypes: Dtype) -> None:
69
+ super().__init__()
70
+ self.dtypes = tuple(pandas_dtype(dtype).type for dtype in dtypes)
71
+
72
+ def check(self, obj) -> bool:
73
+ return hasattr(obj, "dtype") and issubclass(obj.dtype.type, self.dtypes)
74
+
75
+ def __call__(self, f: F) -> F:
76
+ @functools.wraps(f)
77
+ def _f(*args, **kwargs):
78
+ obj_iter = itertools.chain(args, kwargs.values())
79
+ if any(self.check(obj) for obj in obj_iter):
80
+ f_name = f.__name__.replace("nan", "")
81
+ raise TypeError(
82
+ f"reduction operation '{f_name}' not allowed for this dtype"
83
+ )
84
+ try:
85
+ return f(*args, **kwargs)
86
+ except ValueError as e:
87
+ # we want to transform an object array
88
+ # ValueError message to the more typical TypeError
89
+ # e.g. this is normally a disallowed function on
90
+ # object arrays that contain strings
91
+ if is_object_dtype(args[0]):
92
+ raise TypeError(e) from e
93
+ raise
94
+
95
+ return cast(F, _f)
96
+
97
+
98
+ class bottleneck_switch:
99
+ def __init__(self, name=None, **kwargs) -> None:
100
+ self.name = name
101
+ self.kwargs = kwargs
102
+
103
+ def __call__(self, alt: F) -> F:
104
+ bn_name = self.name or alt.__name__
105
+
106
+ try:
107
+ bn_func = getattr(bn, bn_name)
108
+ except (AttributeError, NameError): # pragma: no cover
109
+ bn_func = None
110
+
111
+ @functools.wraps(alt)
112
+ def f(
113
+ values: np.ndarray,
114
+ *,
115
+ axis: AxisInt | None = None,
116
+ skipna: bool = True,
117
+ **kwds,
118
+ ):
119
+ if len(self.kwargs) > 0:
120
+ for k, v in self.kwargs.items():
121
+ if k not in kwds:
122
+ kwds[k] = v
123
+
124
+ if values.size == 0 and kwds.get("min_count") is None:
125
+ # We are empty, returning NA for our type
126
+ # Only applies for the default `min_count` of None
127
+ # since that affects how empty arrays are handled.
128
+ # TODO(GH-18976) update all the nanops methods to
129
+ # correctly handle empty inputs and remove this check.
130
+ # It *may* just be `var`
131
+ return _na_for_min_count(values, axis)
132
+
133
+ if _USE_BOTTLENECK and skipna and _bn_ok_dtype(values.dtype, bn_name):
134
+ if kwds.get("mask", None) is None:
135
+ # `mask` is not recognised by bottleneck, would raise
136
+ # TypeError if called
137
+ kwds.pop("mask", None)
138
+ result = bn_func(values, axis=axis, **kwds)
139
+
140
+ # prefer to treat inf/-inf as NA, but must compute the func
141
+ # twice :(
142
+ if _has_infs(result):
143
+ result = alt(values, axis=axis, skipna=skipna, **kwds)
144
+ else:
145
+ result = alt(values, axis=axis, skipna=skipna, **kwds)
146
+ else:
147
+ result = alt(values, axis=axis, skipna=skipna, **kwds)
148
+
149
+ return result
150
+
151
+ return cast(F, f)
152
+
153
+
154
+ def _bn_ok_dtype(dtype: DtypeObj, name: str) -> bool:
155
+ # Bottleneck chokes on datetime64, PeriodDtype (or and EA)
156
+ if dtype != object and not needs_i8_conversion(dtype):
157
+ # GH 42878
158
+ # Bottleneck uses naive summation leading to O(n) loss of precision
159
+ # unlike numpy which implements pairwise summation, which has O(log(n)) loss
160
+ # crossref: https://github.com/pydata/bottleneck/issues/379
161
+
162
+ # GH 15507
163
+ # bottleneck does not properly upcast during the sum
164
+ # so can overflow
165
+
166
+ # GH 9422
167
+ # further we also want to preserve NaN when all elements
168
+ # are NaN, unlike bottleneck/numpy which consider this
169
+ # to be 0
170
+ return name not in ["nansum", "nanprod", "nanmean"]
171
+ return False
172
+
173
+
174
+ def _has_infs(result) -> bool:
175
+ if isinstance(result, np.ndarray):
176
+ if result.dtype in ("f8", "f4"):
177
+ # Note: outside of an nanops-specific test, we always have
178
+ # result.ndim == 1, so there is no risk of this ravel making a copy.
179
+ return lib.has_infs(result.ravel("K"))
180
+ try:
181
+ return np.isinf(result).any()
182
+ except (TypeError, NotImplementedError):
183
+ # if it doesn't support infs, then it can't have infs
184
+ return False
185
+
186
+
187
+ def _get_fill_value(
188
+ dtype: DtypeObj, fill_value: Scalar | None = None, fill_value_typ=None
189
+ ):
190
+ """return the correct fill value for the dtype of the values"""
191
+ if fill_value is not None:
192
+ return fill_value
193
+ if _na_ok_dtype(dtype):
194
+ if fill_value_typ is None:
195
+ return np.nan
196
+ else:
197
+ if fill_value_typ == "+inf":
198
+ return np.inf
199
+ else:
200
+ return -np.inf
201
+ else:
202
+ if fill_value_typ == "+inf":
203
+ # need the max int here
204
+ return lib.i8max
205
+ else:
206
+ return iNaT
207
+
208
+
209
+ def _maybe_get_mask(
210
+ values: np.ndarray, skipna: bool, mask: npt.NDArray[np.bool_] | None
211
+ ) -> npt.NDArray[np.bool_] | None:
212
+ """
213
+ Compute a mask if and only if necessary.
214
+
215
+ This function will compute a mask iff it is necessary. Otherwise,
216
+ return the provided mask (potentially None) when a mask does not need to be
217
+ computed.
218
+
219
+ A mask is never necessary if the values array is of boolean or integer
220
+ dtypes, as these are incapable of storing NaNs. If passing a NaN-capable
221
+ dtype that is interpretable as either boolean or integer data (eg,
222
+ timedelta64), a mask must be provided.
223
+
224
+ If the skipna parameter is False, a new mask will not be computed.
225
+
226
+ The mask is computed using isna() by default. Setting invert=True selects
227
+ notna() as the masking function.
228
+
229
+ Parameters
230
+ ----------
231
+ values : ndarray
232
+ input array to potentially compute mask for
233
+ skipna : bool
234
+ boolean for whether NaNs should be skipped
235
+ mask : Optional[ndarray]
236
+ nan-mask if known
237
+
238
+ Returns
239
+ -------
240
+ Optional[np.ndarray[bool]]
241
+ """
242
+ if mask is None:
243
+ if values.dtype.kind in "biu":
244
+ # Boolean data cannot contain nulls, so signal via mask being None
245
+ return None
246
+
247
+ if skipna or values.dtype.kind in "mM":
248
+ mask = isna(values)
249
+
250
+ return mask
251
+
252
+
253
+ def _get_values(
254
+ values: np.ndarray,
255
+ skipna: bool,
256
+ fill_value: Any = None,
257
+ fill_value_typ: str | None = None,
258
+ mask: npt.NDArray[np.bool_] | None = None,
259
+ ) -> tuple[np.ndarray, npt.NDArray[np.bool_] | None]:
260
+ """
261
+ Utility to get the values view, mask, dtype, dtype_max, and fill_value.
262
+
263
+ If both mask and fill_value/fill_value_typ are not None and skipna is True,
264
+ the values array will be copied.
265
+
266
+ For input arrays of boolean or integer dtypes, copies will only occur if a
267
+ precomputed mask, a fill_value/fill_value_typ, and skipna=True are
268
+ provided.
269
+
270
+ Parameters
271
+ ----------
272
+ values : ndarray
273
+ input array to potentially compute mask for
274
+ skipna : bool
275
+ boolean for whether NaNs should be skipped
276
+ fill_value : Any
277
+ value to fill NaNs with
278
+ fill_value_typ : str
279
+ Set to '+inf' or '-inf' to handle dtype-specific infinities
280
+ mask : Optional[np.ndarray[bool]]
281
+ nan-mask if known
282
+
283
+ Returns
284
+ -------
285
+ values : ndarray
286
+ Potential copy of input value array
287
+ mask : Optional[ndarray[bool]]
288
+ Mask for values, if deemed necessary to compute
289
+ """
290
+ # In _get_values is only called from within nanops, and in all cases
291
+ # with scalar fill_value. This guarantee is important for the
292
+ # np.where call below
293
+
294
+ mask = _maybe_get_mask(values, skipna, mask)
295
+
296
+ dtype = values.dtype
297
+
298
+ datetimelike = False
299
+ if values.dtype.kind in "mM":
300
+ # changing timedelta64/datetime64 to int64 needs to happen after
301
+ # finding `mask` above
302
+ values = np.asarray(values.view("i8"))
303
+ datetimelike = True
304
+
305
+ if skipna and (mask is not None):
306
+ # get our fill value (in case we need to provide an alternative
307
+ # dtype for it)
308
+ fill_value = _get_fill_value(
309
+ dtype, fill_value=fill_value, fill_value_typ=fill_value_typ
310
+ )
311
+
312
+ if fill_value is not None:
313
+ if mask.any():
314
+ if datetimelike or _na_ok_dtype(dtype):
315
+ values = values.copy()
316
+ np.putmask(values, mask, fill_value)
317
+ else:
318
+ # np.where will promote if needed
319
+ values = np.where(~mask, values, fill_value)
320
+
321
+ return values, mask
322
+
323
+
324
+ def _get_dtype_max(dtype: np.dtype) -> np.dtype:
325
+ # return a platform independent precision dtype
326
+ dtype_max = dtype
327
+ if dtype.kind in "bi":
328
+ dtype_max = np.dtype(np.int64)
329
+ elif dtype.kind == "u":
330
+ dtype_max = np.dtype(np.uint64)
331
+ elif dtype.kind == "f":
332
+ dtype_max = np.dtype(np.float64)
333
+ return dtype_max
334
+
335
+
336
+ def _na_ok_dtype(dtype: DtypeObj) -> bool:
337
+ if needs_i8_conversion(dtype):
338
+ return False
339
+ return not issubclass(dtype.type, np.integer)
340
+
341
+
342
+ def _wrap_results(result, dtype: np.dtype, fill_value=None):
343
+ """wrap our results if needed"""
344
+ if result is NaT:
345
+ pass
346
+
347
+ elif dtype.kind == "M":
348
+ if fill_value is None:
349
+ # GH#24293
350
+ fill_value = iNaT
351
+ if not isinstance(result, np.ndarray):
352
+ assert not isna(fill_value), "Expected non-null fill_value"
353
+ if result == fill_value:
354
+ result = np.nan
355
+
356
+ if isna(result):
357
+ result = np.datetime64("NaT", "ns").astype(dtype)
358
+ else:
359
+ result = np.int64(result).view(dtype)
360
+ # retain original unit
361
+ result = result.astype(dtype, copy=False)
362
+ else:
363
+ # If we have float dtype, taking a view will give the wrong result
364
+ result = result.astype(dtype)
365
+ elif dtype.kind == "m":
366
+ if not isinstance(result, np.ndarray):
367
+ if result == fill_value or np.isnan(result):
368
+ result = np.timedelta64("NaT").astype(dtype)
369
+
370
+ elif np.fabs(result) > lib.i8max:
371
+ # raise if we have a timedelta64[ns] which is too large
372
+ raise ValueError("overflow in timedelta operation")
373
+ else:
374
+ # return a timedelta64 with the original unit
375
+ result = np.int64(result).astype(dtype, copy=False)
376
+
377
+ else:
378
+ result = result.astype("m8[ns]").view(dtype)
379
+
380
+ return result
381
+
382
+
383
+ def _datetimelike_compat(func: F) -> F:
384
+ """
385
+ If we have datetime64 or timedelta64 values, ensure we have a correct
386
+ mask before calling the wrapped function, then cast back afterwards.
387
+ """
388
+
389
+ @functools.wraps(func)
390
+ def new_func(
391
+ values: np.ndarray,
392
+ *,
393
+ axis: AxisInt | None = None,
394
+ skipna: bool = True,
395
+ mask: npt.NDArray[np.bool_] | None = None,
396
+ **kwargs,
397
+ ):
398
+ orig_values = values
399
+
400
+ datetimelike = values.dtype.kind in "mM"
401
+ if datetimelike and mask is None:
402
+ mask = isna(values)
403
+
404
+ result = func(values, axis=axis, skipna=skipna, mask=mask, **kwargs)
405
+
406
+ if datetimelike:
407
+ result = _wrap_results(result, orig_values.dtype, fill_value=iNaT)
408
+ if not skipna:
409
+ assert mask is not None # checked above
410
+ result = _mask_datetimelike_result(result, axis, mask, orig_values)
411
+
412
+ return result
413
+
414
+ return cast(F, new_func)
415
+
416
+
417
+ def _na_for_min_count(values: np.ndarray, axis: AxisInt | None) -> Scalar | np.ndarray:
418
+ """
419
+ Return the missing value for `values`.
420
+
421
+ Parameters
422
+ ----------
423
+ values : ndarray
424
+ axis : int or None
425
+ axis for the reduction, required if values.ndim > 1.
426
+
427
+ Returns
428
+ -------
429
+ result : scalar or ndarray
430
+ For 1-D values, returns a scalar of the correct missing type.
431
+ For 2-D values, returns a 1-D array where each element is missing.
432
+ """
433
+ # we either return np.nan or pd.NaT
434
+ if values.dtype.kind in "iufcb":
435
+ values = values.astype("float64")
436
+ fill_value = na_value_for_dtype(values.dtype)
437
+
438
+ if values.ndim == 1:
439
+ return fill_value
440
+ elif axis is None:
441
+ return fill_value
442
+ else:
443
+ result_shape = values.shape[:axis] + values.shape[axis + 1 :]
444
+
445
+ return np.full(result_shape, fill_value, dtype=values.dtype)
446
+
447
+
448
+ def maybe_operate_rowwise(func: F) -> F:
449
+ """
450
+ NumPy operations on C-contiguous ndarrays with axis=1 can be
451
+ very slow if axis 1 >> axis 0.
452
+ Operate row-by-row and concatenate the results.
453
+ """
454
+
455
+ @functools.wraps(func)
456
+ def newfunc(values: np.ndarray, *, axis: AxisInt | None = None, **kwargs):
457
+ if (
458
+ axis == 1
459
+ and values.ndim == 2
460
+ and values.flags["C_CONTIGUOUS"]
461
+ # only takes this path for wide arrays (long dataframes), for threshold see
462
+ # https://github.com/pandas-dev/pandas/pull/43311#issuecomment-974891737
463
+ and (values.shape[1] / 1000) > values.shape[0]
464
+ and values.dtype != object
465
+ and values.dtype != bool
466
+ ):
467
+ arrs = list(values)
468
+ if kwargs.get("mask") is not None:
469
+ mask = kwargs.pop("mask")
470
+ results = [
471
+ func(arrs[i], mask=mask[i], **kwargs) for i in range(len(arrs))
472
+ ]
473
+ else:
474
+ results = [func(x, **kwargs) for x in arrs]
475
+ return np.array(results)
476
+
477
+ return func(values, axis=axis, **kwargs)
478
+
479
+ return cast(F, newfunc)
480
+
481
+
482
+ def nanany(
483
+ values: np.ndarray,
484
+ *,
485
+ axis: AxisInt | None = None,
486
+ skipna: bool = True,
487
+ mask: npt.NDArray[np.bool_] | None = None,
488
+ ) -> bool:
489
+ """
490
+ Check if any elements along an axis evaluate to True.
491
+
492
+ Parameters
493
+ ----------
494
+ values : ndarray
495
+ axis : int, optional
496
+ skipna : bool, default True
497
+ mask : ndarray[bool], optional
498
+ nan-mask if known
499
+
500
+ Returns
501
+ -------
502
+ result : bool
503
+
504
+ Examples
505
+ --------
506
+ >>> from pandas.core import nanops
507
+ >>> s = pd.Series([1, 2])
508
+ >>> nanops.nanany(s.values)
509
+ True
510
+
511
+ >>> from pandas.core import nanops
512
+ >>> s = pd.Series([np.nan])
513
+ >>> nanops.nanany(s.values)
514
+ False
515
+ """
516
+ if values.dtype.kind in "iub" and mask is None:
517
+ # GH#26032 fastpath
518
+ # error: Incompatible return value type (got "Union[bool_, ndarray]",
519
+ # expected "bool")
520
+ return values.any(axis) # type: ignore[return-value]
521
+
522
+ if values.dtype.kind == "M":
523
+ # GH#34479
524
+ warnings.warn(
525
+ "'any' with datetime64 dtypes is deprecated and will raise in a "
526
+ "future version. Use (obj != pd.Timestamp(0)).any() instead.",
527
+ FutureWarning,
528
+ stacklevel=find_stack_level(),
529
+ )
530
+
531
+ values, _ = _get_values(values, skipna, fill_value=False, mask=mask)
532
+
533
+ # For object type, any won't necessarily return
534
+ # boolean values (numpy/numpy#4352)
535
+ if values.dtype == object:
536
+ values = values.astype(bool)
537
+
538
+ # error: Incompatible return value type (got "Union[bool_, ndarray]", expected
539
+ # "bool")
540
+ return values.any(axis) # type: ignore[return-value]
541
+
542
+
543
+ def nanall(
544
+ values: np.ndarray,
545
+ *,
546
+ axis: AxisInt | None = None,
547
+ skipna: bool = True,
548
+ mask: npt.NDArray[np.bool_] | None = None,
549
+ ) -> bool:
550
+ """
551
+ Check if all elements along an axis evaluate to True.
552
+
553
+ Parameters
554
+ ----------
555
+ values : ndarray
556
+ axis : int, optional
557
+ skipna : bool, default True
558
+ mask : ndarray[bool], optional
559
+ nan-mask if known
560
+
561
+ Returns
562
+ -------
563
+ result : bool
564
+
565
+ Examples
566
+ --------
567
+ >>> from pandas.core import nanops
568
+ >>> s = pd.Series([1, 2, np.nan])
569
+ >>> nanops.nanall(s.values)
570
+ True
571
+
572
+ >>> from pandas.core import nanops
573
+ >>> s = pd.Series([1, 0])
574
+ >>> nanops.nanall(s.values)
575
+ False
576
+ """
577
+ if values.dtype.kind in "iub" and mask is None:
578
+ # GH#26032 fastpath
579
+ # error: Incompatible return value type (got "Union[bool_, ndarray]",
580
+ # expected "bool")
581
+ return values.all(axis) # type: ignore[return-value]
582
+
583
+ if values.dtype.kind == "M":
584
+ # GH#34479
585
+ warnings.warn(
586
+ "'all' with datetime64 dtypes is deprecated and will raise in a "
587
+ "future version. Use (obj != pd.Timestamp(0)).all() instead.",
588
+ FutureWarning,
589
+ stacklevel=find_stack_level(),
590
+ )
591
+
592
+ values, _ = _get_values(values, skipna, fill_value=True, mask=mask)
593
+
594
+ # For object type, all won't necessarily return
595
+ # boolean values (numpy/numpy#4352)
596
+ if values.dtype == object:
597
+ values = values.astype(bool)
598
+
599
+ # error: Incompatible return value type (got "Union[bool_, ndarray]", expected
600
+ # "bool")
601
+ return values.all(axis) # type: ignore[return-value]
602
+
603
+
604
+ @disallow("M8")
605
+ @_datetimelike_compat
606
+ @maybe_operate_rowwise
607
+ def nansum(
608
+ values: np.ndarray,
609
+ *,
610
+ axis: AxisInt | None = None,
611
+ skipna: bool = True,
612
+ min_count: int = 0,
613
+ mask: npt.NDArray[np.bool_] | None = None,
614
+ ) -> float:
615
+ """
616
+ Sum the elements along an axis ignoring NaNs
617
+
618
+ Parameters
619
+ ----------
620
+ values : ndarray[dtype]
621
+ axis : int, optional
622
+ skipna : bool, default True
623
+ min_count: int, default 0
624
+ mask : ndarray[bool], optional
625
+ nan-mask if known
626
+
627
+ Returns
628
+ -------
629
+ result : dtype
630
+
631
+ Examples
632
+ --------
633
+ >>> from pandas.core import nanops
634
+ >>> s = pd.Series([1, 2, np.nan])
635
+ >>> nanops.nansum(s.values)
636
+ 3.0
637
+ """
638
+ dtype = values.dtype
639
+ values, mask = _get_values(values, skipna, fill_value=0, mask=mask)
640
+ dtype_sum = _get_dtype_max(dtype)
641
+ if dtype.kind == "f":
642
+ dtype_sum = dtype
643
+ elif dtype.kind == "m":
644
+ dtype_sum = np.dtype(np.float64)
645
+
646
+ the_sum = values.sum(axis, dtype=dtype_sum)
647
+ the_sum = _maybe_null_out(the_sum, axis, mask, values.shape, min_count=min_count)
648
+
649
+ return the_sum
650
+
651
+
652
+ def _mask_datetimelike_result(
653
+ result: np.ndarray | np.datetime64 | np.timedelta64,
654
+ axis: AxisInt | None,
655
+ mask: npt.NDArray[np.bool_],
656
+ orig_values: np.ndarray,
657
+ ) -> np.ndarray | np.datetime64 | np.timedelta64 | NaTType:
658
+ if isinstance(result, np.ndarray):
659
+ # we need to apply the mask
660
+ result = result.astype("i8").view(orig_values.dtype)
661
+ axis_mask = mask.any(axis=axis)
662
+ # error: Unsupported target for indexed assignment ("Union[ndarray[Any, Any],
663
+ # datetime64, timedelta64]")
664
+ result[axis_mask] = iNaT # type: ignore[index]
665
+ else:
666
+ if mask.any():
667
+ return np.int64(iNaT).view(orig_values.dtype)
668
+ return result
669
+
670
+
671
+ @bottleneck_switch()
672
+ @_datetimelike_compat
673
+ def nanmean(
674
+ values: np.ndarray,
675
+ *,
676
+ axis: AxisInt | None = None,
677
+ skipna: bool = True,
678
+ mask: npt.NDArray[np.bool_] | None = None,
679
+ ) -> float:
680
+ """
681
+ Compute the mean of the element along an axis ignoring NaNs
682
+
683
+ Parameters
684
+ ----------
685
+ values : ndarray
686
+ axis : int, optional
687
+ skipna : bool, default True
688
+ mask : ndarray[bool], optional
689
+ nan-mask if known
690
+
691
+ Returns
692
+ -------
693
+ float
694
+ Unless input is a float array, in which case use the same
695
+ precision as the input array.
696
+
697
+ Examples
698
+ --------
699
+ >>> from pandas.core import nanops
700
+ >>> s = pd.Series([1, 2, np.nan])
701
+ >>> nanops.nanmean(s.values)
702
+ 1.5
703
+ """
704
+ dtype = values.dtype
705
+ values, mask = _get_values(values, skipna, fill_value=0, mask=mask)
706
+ dtype_sum = _get_dtype_max(dtype)
707
+ dtype_count = np.dtype(np.float64)
708
+
709
+ # not using needs_i8_conversion because that includes period
710
+ if dtype.kind in "mM":
711
+ dtype_sum = np.dtype(np.float64)
712
+ elif dtype.kind in "iu":
713
+ dtype_sum = np.dtype(np.float64)
714
+ elif dtype.kind == "f":
715
+ dtype_sum = dtype
716
+ dtype_count = dtype
717
+
718
+ count = _get_counts(values.shape, mask, axis, dtype=dtype_count)
719
+ the_sum = values.sum(axis, dtype=dtype_sum)
720
+ the_sum = _ensure_numeric(the_sum)
721
+
722
+ if axis is not None and getattr(the_sum, "ndim", False):
723
+ count = cast(np.ndarray, count)
724
+ with np.errstate(all="ignore"):
725
+ # suppress division by zero warnings
726
+ the_mean = the_sum / count
727
+ ct_mask = count == 0
728
+ if ct_mask.any():
729
+ the_mean[ct_mask] = np.nan
730
+ else:
731
+ the_mean = the_sum / count if count > 0 else np.nan
732
+
733
+ return the_mean
734
+
735
+
736
+ @bottleneck_switch()
737
+ def nanmedian(values, *, axis: AxisInt | None = None, skipna: bool = True, mask=None):
738
+ """
739
+ Parameters
740
+ ----------
741
+ values : ndarray
742
+ axis : int, optional
743
+ skipna : bool, default True
744
+ mask : ndarray[bool], optional
745
+ nan-mask if known
746
+
747
+ Returns
748
+ -------
749
+ result : float
750
+ Unless input is a float array, in which case use the same
751
+ precision as the input array.
752
+
753
+ Examples
754
+ --------
755
+ >>> from pandas.core import nanops
756
+ >>> s = pd.Series([1, np.nan, 2, 2])
757
+ >>> nanops.nanmedian(s.values)
758
+ 2.0
759
+ """
760
+ # for floats without mask, the data already uses NaN as missing value
761
+ # indicator, and `mask` will be calculated from that below -> in those
762
+ # cases we never need to set NaN to the masked values
763
+ using_nan_sentinel = values.dtype.kind == "f" and mask is None
764
+
765
+ def get_median(x, _mask=None):
766
+ if _mask is None:
767
+ _mask = notna(x)
768
+ else:
769
+ _mask = ~_mask
770
+ if not skipna and not _mask.all():
771
+ return np.nan
772
+ with warnings.catch_warnings():
773
+ # Suppress RuntimeWarning about All-NaN slice
774
+ warnings.filterwarnings(
775
+ "ignore", "All-NaN slice encountered", RuntimeWarning
776
+ )
777
+ res = np.nanmedian(x[_mask])
778
+ return res
779
+
780
+ dtype = values.dtype
781
+ values, mask = _get_values(values, skipna, mask=mask, fill_value=None)
782
+ if values.dtype.kind != "f":
783
+ if values.dtype == object:
784
+ # GH#34671 avoid casting strings to numeric
785
+ inferred = lib.infer_dtype(values)
786
+ if inferred in ["string", "mixed"]:
787
+ raise TypeError(f"Cannot convert {values} to numeric")
788
+ try:
789
+ values = values.astype("f8")
790
+ except ValueError as err:
791
+ # e.g. "could not convert string to float: 'a'"
792
+ raise TypeError(str(err)) from err
793
+ if not using_nan_sentinel and mask is not None:
794
+ if not values.flags.writeable:
795
+ values = values.copy()
796
+ values[mask] = np.nan
797
+
798
+ notempty = values.size
799
+
800
+ # an array from a frame
801
+ if values.ndim > 1 and axis is not None:
802
+ # there's a non-empty array to apply over otherwise numpy raises
803
+ if notempty:
804
+ if not skipna:
805
+ res = np.apply_along_axis(get_median, axis, values)
806
+
807
+ else:
808
+ # fastpath for the skipna case
809
+ with warnings.catch_warnings():
810
+ # Suppress RuntimeWarning about All-NaN slice
811
+ warnings.filterwarnings(
812
+ "ignore", "All-NaN slice encountered", RuntimeWarning
813
+ )
814
+ if (values.shape[1] == 1 and axis == 0) or (
815
+ values.shape[0] == 1 and axis == 1
816
+ ):
817
+ # GH52788: fastpath when squeezable, nanmedian for 2D array slow
818
+ res = np.nanmedian(np.squeeze(values), keepdims=True)
819
+ else:
820
+ res = np.nanmedian(values, axis=axis)
821
+
822
+ else:
823
+ # must return the correct shape, but median is not defined for the
824
+ # empty set so return nans of shape "everything but the passed axis"
825
+ # since "axis" is where the reduction would occur if we had a nonempty
826
+ # array
827
+ res = _get_empty_reduction_result(values.shape, axis)
828
+
829
+ else:
830
+ # otherwise return a scalar value
831
+ res = get_median(values, mask) if notempty else np.nan
832
+ return _wrap_results(res, dtype)
833
+
834
+
835
+ def _get_empty_reduction_result(
836
+ shape: Shape,
837
+ axis: AxisInt,
838
+ ) -> np.ndarray:
839
+ """
840
+ The result from a reduction on an empty ndarray.
841
+
842
+ Parameters
843
+ ----------
844
+ shape : Tuple[int, ...]
845
+ axis : int
846
+
847
+ Returns
848
+ -------
849
+ np.ndarray
850
+ """
851
+ shp = np.array(shape)
852
+ dims = np.arange(len(shape))
853
+ ret = np.empty(shp[dims != axis], dtype=np.float64)
854
+ ret.fill(np.nan)
855
+ return ret
856
+
857
+
858
+ def _get_counts_nanvar(
859
+ values_shape: Shape,
860
+ mask: npt.NDArray[np.bool_] | None,
861
+ axis: AxisInt | None,
862
+ ddof: int,
863
+ dtype: np.dtype = np.dtype(np.float64),
864
+ ) -> tuple[float | np.ndarray, float | np.ndarray]:
865
+ """
866
+ Get the count of non-null values along an axis, accounting
867
+ for degrees of freedom.
868
+
869
+ Parameters
870
+ ----------
871
+ values_shape : Tuple[int, ...]
872
+ shape tuple from values ndarray, used if mask is None
873
+ mask : Optional[ndarray[bool]]
874
+ locations in values that should be considered missing
875
+ axis : Optional[int]
876
+ axis to count along
877
+ ddof : int
878
+ degrees of freedom
879
+ dtype : type, optional
880
+ type to use for count
881
+
882
+ Returns
883
+ -------
884
+ count : int, np.nan or np.ndarray
885
+ d : int, np.nan or np.ndarray
886
+ """
887
+ count = _get_counts(values_shape, mask, axis, dtype=dtype)
888
+ d = count - dtype.type(ddof)
889
+
890
+ # always return NaN, never inf
891
+ if is_float(count):
892
+ if count <= ddof:
893
+ # error: Incompatible types in assignment (expression has type
894
+ # "float", variable has type "Union[floating[Any], ndarray[Any,
895
+ # dtype[floating[Any]]]]")
896
+ count = np.nan # type: ignore[assignment]
897
+ d = np.nan
898
+ else:
899
+ # count is not narrowed by is_float check
900
+ count = cast(np.ndarray, count)
901
+ mask = count <= ddof
902
+ if mask.any():
903
+ np.putmask(d, mask, np.nan)
904
+ np.putmask(count, mask, np.nan)
905
+ return count, d
906
+
907
+
908
+ @bottleneck_switch(ddof=1)
909
+ def nanstd(
910
+ values,
911
+ *,
912
+ axis: AxisInt | None = None,
913
+ skipna: bool = True,
914
+ ddof: int = 1,
915
+ mask=None,
916
+ ):
917
+ """
918
+ Compute the standard deviation along given axis while ignoring NaNs
919
+
920
+ Parameters
921
+ ----------
922
+ values : ndarray
923
+ axis : int, optional
924
+ skipna : bool, default True
925
+ ddof : int, default 1
926
+ Delta Degrees of Freedom. The divisor used in calculations is N - ddof,
927
+ where N represents the number of elements.
928
+ mask : ndarray[bool], optional
929
+ nan-mask if known
930
+
931
+ Returns
932
+ -------
933
+ result : float
934
+ Unless input is a float array, in which case use the same
935
+ precision as the input array.
936
+
937
+ Examples
938
+ --------
939
+ >>> from pandas.core import nanops
940
+ >>> s = pd.Series([1, np.nan, 2, 3])
941
+ >>> nanops.nanstd(s.values)
942
+ 1.0
943
+ """
944
+ if values.dtype == "M8[ns]":
945
+ values = values.view("m8[ns]")
946
+
947
+ orig_dtype = values.dtype
948
+ values, mask = _get_values(values, skipna, mask=mask)
949
+
950
+ result = np.sqrt(nanvar(values, axis=axis, skipna=skipna, ddof=ddof, mask=mask))
951
+ return _wrap_results(result, orig_dtype)
952
+
953
+
954
+ @disallow("M8", "m8")
955
+ @bottleneck_switch(ddof=1)
956
+ def nanvar(
957
+ values: np.ndarray,
958
+ *,
959
+ axis: AxisInt | None = None,
960
+ skipna: bool = True,
961
+ ddof: int = 1,
962
+ mask=None,
963
+ ):
964
+ """
965
+ Compute the variance along given axis while ignoring NaNs
966
+
967
+ Parameters
968
+ ----------
969
+ values : ndarray
970
+ axis : int, optional
971
+ skipna : bool, default True
972
+ ddof : int, default 1
973
+ Delta Degrees of Freedom. The divisor used in calculations is N - ddof,
974
+ where N represents the number of elements.
975
+ mask : ndarray[bool], optional
976
+ nan-mask if known
977
+
978
+ Returns
979
+ -------
980
+ result : float
981
+ Unless input is a float array, in which case use the same
982
+ precision as the input array.
983
+
984
+ Examples
985
+ --------
986
+ >>> from pandas.core import nanops
987
+ >>> s = pd.Series([1, np.nan, 2, 3])
988
+ >>> nanops.nanvar(s.values)
989
+ 1.0
990
+ """
991
+ dtype = values.dtype
992
+ mask = _maybe_get_mask(values, skipna, mask)
993
+ if dtype.kind in "iu":
994
+ values = values.astype("f8")
995
+ if mask is not None:
996
+ values[mask] = np.nan
997
+
998
+ if values.dtype.kind == "f":
999
+ count, d = _get_counts_nanvar(values.shape, mask, axis, ddof, values.dtype)
1000
+ else:
1001
+ count, d = _get_counts_nanvar(values.shape, mask, axis, ddof)
1002
+
1003
+ if skipna and mask is not None:
1004
+ values = values.copy()
1005
+ np.putmask(values, mask, 0)
1006
+
1007
+ # xref GH10242
1008
+ # Compute variance via two-pass algorithm, which is stable against
1009
+ # cancellation errors and relatively accurate for small numbers of
1010
+ # observations.
1011
+ #
1012
+ # See https://en.wikipedia.org/wiki/Algorithms_for_calculating_variance
1013
+ avg = _ensure_numeric(values.sum(axis=axis, dtype=np.float64)) / count
1014
+ if axis is not None:
1015
+ avg = np.expand_dims(avg, axis)
1016
+ sqr = _ensure_numeric((avg - values) ** 2)
1017
+ if mask is not None:
1018
+ np.putmask(sqr, mask, 0)
1019
+ result = sqr.sum(axis=axis, dtype=np.float64) / d
1020
+
1021
+ # Return variance as np.float64 (the datatype used in the accumulator),
1022
+ # unless we were dealing with a float array, in which case use the same
1023
+ # precision as the original values array.
1024
+ if dtype.kind == "f":
1025
+ result = result.astype(dtype, copy=False)
1026
+ return result
1027
+
1028
+
1029
+ @disallow("M8", "m8")
1030
+ def nansem(
1031
+ values: np.ndarray,
1032
+ *,
1033
+ axis: AxisInt | None = None,
1034
+ skipna: bool = True,
1035
+ ddof: int = 1,
1036
+ mask: npt.NDArray[np.bool_] | None = None,
1037
+ ) -> float:
1038
+ """
1039
+ Compute the standard error in the mean along given axis while ignoring NaNs
1040
+
1041
+ Parameters
1042
+ ----------
1043
+ values : ndarray
1044
+ axis : int, optional
1045
+ skipna : bool, default True
1046
+ ddof : int, default 1
1047
+ Delta Degrees of Freedom. The divisor used in calculations is N - ddof,
1048
+ where N represents the number of elements.
1049
+ mask : ndarray[bool], optional
1050
+ nan-mask if known
1051
+
1052
+ Returns
1053
+ -------
1054
+ result : float64
1055
+ Unless input is a float array, in which case use the same
1056
+ precision as the input array.
1057
+
1058
+ Examples
1059
+ --------
1060
+ >>> from pandas.core import nanops
1061
+ >>> s = pd.Series([1, np.nan, 2, 3])
1062
+ >>> nanops.nansem(s.values)
1063
+ 0.5773502691896258
1064
+ """
1065
+ # This checks if non-numeric-like data is passed with numeric_only=False
1066
+ # and raises a TypeError otherwise
1067
+ nanvar(values, axis=axis, skipna=skipna, ddof=ddof, mask=mask)
1068
+
1069
+ mask = _maybe_get_mask(values, skipna, mask)
1070
+ if values.dtype.kind != "f":
1071
+ values = values.astype("f8")
1072
+
1073
+ if not skipna and mask is not None and mask.any():
1074
+ return np.nan
1075
+
1076
+ count, _ = _get_counts_nanvar(values.shape, mask, axis, ddof, values.dtype)
1077
+ var = nanvar(values, axis=axis, skipna=skipna, ddof=ddof, mask=mask)
1078
+
1079
+ return np.sqrt(var) / np.sqrt(count)
1080
+
1081
+
1082
+ def _nanminmax(meth, fill_value_typ):
1083
+ @bottleneck_switch(name=f"nan{meth}")
1084
+ @_datetimelike_compat
1085
+ def reduction(
1086
+ values: np.ndarray,
1087
+ *,
1088
+ axis: AxisInt | None = None,
1089
+ skipna: bool = True,
1090
+ mask: npt.NDArray[np.bool_] | None = None,
1091
+ ):
1092
+ if values.size == 0:
1093
+ return _na_for_min_count(values, axis)
1094
+
1095
+ values, mask = _get_values(
1096
+ values, skipna, fill_value_typ=fill_value_typ, mask=mask
1097
+ )
1098
+ result = getattr(values, meth)(axis)
1099
+ result = _maybe_null_out(result, axis, mask, values.shape)
1100
+ return result
1101
+
1102
+ return reduction
1103
+
1104
+
1105
+ nanmin = _nanminmax("min", fill_value_typ="+inf")
1106
+ nanmax = _nanminmax("max", fill_value_typ="-inf")
1107
+
1108
+
1109
+ def nanargmax(
1110
+ values: np.ndarray,
1111
+ *,
1112
+ axis: AxisInt | None = None,
1113
+ skipna: bool = True,
1114
+ mask: npt.NDArray[np.bool_] | None = None,
1115
+ ) -> int | np.ndarray:
1116
+ """
1117
+ Parameters
1118
+ ----------
1119
+ values : ndarray
1120
+ axis : int, optional
1121
+ skipna : bool, default True
1122
+ mask : ndarray[bool], optional
1123
+ nan-mask if known
1124
+
1125
+ Returns
1126
+ -------
1127
+ result : int or ndarray[int]
1128
+ The index/indices of max value in specified axis or -1 in the NA case
1129
+
1130
+ Examples
1131
+ --------
1132
+ >>> from pandas.core import nanops
1133
+ >>> arr = np.array([1, 2, 3, np.nan, 4])
1134
+ >>> nanops.nanargmax(arr)
1135
+ 4
1136
+
1137
+ >>> arr = np.array(range(12), dtype=np.float64).reshape(4, 3)
1138
+ >>> arr[2:, 2] = np.nan
1139
+ >>> arr
1140
+ array([[ 0., 1., 2.],
1141
+ [ 3., 4., 5.],
1142
+ [ 6., 7., nan],
1143
+ [ 9., 10., nan]])
1144
+ >>> nanops.nanargmax(arr, axis=1)
1145
+ array([2, 2, 1, 1])
1146
+ """
1147
+ values, mask = _get_values(values, True, fill_value_typ="-inf", mask=mask)
1148
+ result = values.argmax(axis)
1149
+ # error: Argument 1 to "_maybe_arg_null_out" has incompatible type "Any |
1150
+ # signedinteger[Any]"; expected "ndarray[Any, Any]"
1151
+ result = _maybe_arg_null_out(result, axis, mask, skipna) # type: ignore[arg-type]
1152
+ return result
1153
+
1154
+
1155
+ def nanargmin(
1156
+ values: np.ndarray,
1157
+ *,
1158
+ axis: AxisInt | None = None,
1159
+ skipna: bool = True,
1160
+ mask: npt.NDArray[np.bool_] | None = None,
1161
+ ) -> int | np.ndarray:
1162
+ """
1163
+ Parameters
1164
+ ----------
1165
+ values : ndarray
1166
+ axis : int, optional
1167
+ skipna : bool, default True
1168
+ mask : ndarray[bool], optional
1169
+ nan-mask if known
1170
+
1171
+ Returns
1172
+ -------
1173
+ result : int or ndarray[int]
1174
+ The index/indices of min value in specified axis or -1 in the NA case
1175
+
1176
+ Examples
1177
+ --------
1178
+ >>> from pandas.core import nanops
1179
+ >>> arr = np.array([1, 2, 3, np.nan, 4])
1180
+ >>> nanops.nanargmin(arr)
1181
+ 0
1182
+
1183
+ >>> arr = np.array(range(12), dtype=np.float64).reshape(4, 3)
1184
+ >>> arr[2:, 0] = np.nan
1185
+ >>> arr
1186
+ array([[ 0., 1., 2.],
1187
+ [ 3., 4., 5.],
1188
+ [nan, 7., 8.],
1189
+ [nan, 10., 11.]])
1190
+ >>> nanops.nanargmin(arr, axis=1)
1191
+ array([0, 0, 1, 1])
1192
+ """
1193
+ values, mask = _get_values(values, True, fill_value_typ="+inf", mask=mask)
1194
+ result = values.argmin(axis)
1195
+ # error: Argument 1 to "_maybe_arg_null_out" has incompatible type "Any |
1196
+ # signedinteger[Any]"; expected "ndarray[Any, Any]"
1197
+ result = _maybe_arg_null_out(result, axis, mask, skipna) # type: ignore[arg-type]
1198
+ return result
1199
+
1200
+
1201
+ @disallow("M8", "m8")
1202
+ @maybe_operate_rowwise
1203
+ def nanskew(
1204
+ values: np.ndarray,
1205
+ *,
1206
+ axis: AxisInt | None = None,
1207
+ skipna: bool = True,
1208
+ mask: npt.NDArray[np.bool_] | None = None,
1209
+ ) -> float:
1210
+ """
1211
+ Compute the sample skewness.
1212
+
1213
+ The statistic computed here is the adjusted Fisher-Pearson standardized
1214
+ moment coefficient G1. The algorithm computes this coefficient directly
1215
+ from the second and third central moment.
1216
+
1217
+ Parameters
1218
+ ----------
1219
+ values : ndarray
1220
+ axis : int, optional
1221
+ skipna : bool, default True
1222
+ mask : ndarray[bool], optional
1223
+ nan-mask if known
1224
+
1225
+ Returns
1226
+ -------
1227
+ result : float64
1228
+ Unless input is a float array, in which case use the same
1229
+ precision as the input array.
1230
+
1231
+ Examples
1232
+ --------
1233
+ >>> from pandas.core import nanops
1234
+ >>> s = pd.Series([1, np.nan, 1, 2])
1235
+ >>> nanops.nanskew(s.values)
1236
+ 1.7320508075688787
1237
+ """
1238
+ mask = _maybe_get_mask(values, skipna, mask)
1239
+ if values.dtype.kind != "f":
1240
+ values = values.astype("f8")
1241
+ count = _get_counts(values.shape, mask, axis)
1242
+ else:
1243
+ count = _get_counts(values.shape, mask, axis, dtype=values.dtype)
1244
+
1245
+ if skipna and mask is not None:
1246
+ values = values.copy()
1247
+ np.putmask(values, mask, 0)
1248
+ elif not skipna and mask is not None and mask.any():
1249
+ return np.nan
1250
+
1251
+ with np.errstate(invalid="ignore", divide="ignore"):
1252
+ mean = values.sum(axis, dtype=np.float64) / count
1253
+ if axis is not None:
1254
+ mean = np.expand_dims(mean, axis)
1255
+
1256
+ adjusted = values - mean
1257
+ if skipna and mask is not None:
1258
+ np.putmask(adjusted, mask, 0)
1259
+ adjusted2 = adjusted**2
1260
+ adjusted3 = adjusted2 * adjusted
1261
+ m2 = adjusted2.sum(axis, dtype=np.float64)
1262
+ m3 = adjusted3.sum(axis, dtype=np.float64)
1263
+
1264
+ # floating point error
1265
+ #
1266
+ # #18044 in _libs/windows.pyx calc_skew follow this behavior
1267
+ # to fix the fperr to treat m2 <1e-14 as zero
1268
+ m2 = _zero_out_fperr(m2)
1269
+ m3 = _zero_out_fperr(m3)
1270
+
1271
+ with np.errstate(invalid="ignore", divide="ignore"):
1272
+ result = (count * (count - 1) ** 0.5 / (count - 2)) * (m3 / m2**1.5)
1273
+
1274
+ dtype = values.dtype
1275
+ if dtype.kind == "f":
1276
+ result = result.astype(dtype, copy=False)
1277
+
1278
+ if isinstance(result, np.ndarray):
1279
+ result = np.where(m2 == 0, 0, result)
1280
+ result[count < 3] = np.nan
1281
+ else:
1282
+ result = dtype.type(0) if m2 == 0 else result
1283
+ if count < 3:
1284
+ return np.nan
1285
+
1286
+ return result
1287
+
1288
+
1289
+ @disallow("M8", "m8")
1290
+ @maybe_operate_rowwise
1291
+ def nankurt(
1292
+ values: np.ndarray,
1293
+ *,
1294
+ axis: AxisInt | None = None,
1295
+ skipna: bool = True,
1296
+ mask: npt.NDArray[np.bool_] | None = None,
1297
+ ) -> float:
1298
+ """
1299
+ Compute the sample excess kurtosis
1300
+
1301
+ The statistic computed here is the adjusted Fisher-Pearson standardized
1302
+ moment coefficient G2, computed directly from the second and fourth
1303
+ central moment.
1304
+
1305
+ Parameters
1306
+ ----------
1307
+ values : ndarray
1308
+ axis : int, optional
1309
+ skipna : bool, default True
1310
+ mask : ndarray[bool], optional
1311
+ nan-mask if known
1312
+
1313
+ Returns
1314
+ -------
1315
+ result : float64
1316
+ Unless input is a float array, in which case use the same
1317
+ precision as the input array.
1318
+
1319
+ Examples
1320
+ --------
1321
+ >>> from pandas.core import nanops
1322
+ >>> s = pd.Series([1, np.nan, 1, 3, 2])
1323
+ >>> nanops.nankurt(s.values)
1324
+ -1.2892561983471076
1325
+ """
1326
+ mask = _maybe_get_mask(values, skipna, mask)
1327
+ if values.dtype.kind != "f":
1328
+ values = values.astype("f8")
1329
+ count = _get_counts(values.shape, mask, axis)
1330
+ else:
1331
+ count = _get_counts(values.shape, mask, axis, dtype=values.dtype)
1332
+
1333
+ if skipna and mask is not None:
1334
+ values = values.copy()
1335
+ np.putmask(values, mask, 0)
1336
+ elif not skipna and mask is not None and mask.any():
1337
+ return np.nan
1338
+
1339
+ with np.errstate(invalid="ignore", divide="ignore"):
1340
+ mean = values.sum(axis, dtype=np.float64) / count
1341
+ if axis is not None:
1342
+ mean = np.expand_dims(mean, axis)
1343
+
1344
+ adjusted = values - mean
1345
+ if skipna and mask is not None:
1346
+ np.putmask(adjusted, mask, 0)
1347
+ adjusted2 = adjusted**2
1348
+ adjusted4 = adjusted2**2
1349
+ m2 = adjusted2.sum(axis, dtype=np.float64)
1350
+ m4 = adjusted4.sum(axis, dtype=np.float64)
1351
+
1352
+ with np.errstate(invalid="ignore", divide="ignore"):
1353
+ adj = 3 * (count - 1) ** 2 / ((count - 2) * (count - 3))
1354
+ numerator = count * (count + 1) * (count - 1) * m4
1355
+ denominator = (count - 2) * (count - 3) * m2**2
1356
+
1357
+ # floating point error
1358
+ #
1359
+ # #18044 in _libs/windows.pyx calc_kurt follow this behavior
1360
+ # to fix the fperr to treat denom <1e-14 as zero
1361
+ numerator = _zero_out_fperr(numerator)
1362
+ denominator = _zero_out_fperr(denominator)
1363
+
1364
+ if not isinstance(denominator, np.ndarray):
1365
+ # if ``denom`` is a scalar, check these corner cases first before
1366
+ # doing division
1367
+ if count < 4:
1368
+ return np.nan
1369
+ if denominator == 0:
1370
+ return values.dtype.type(0)
1371
+
1372
+ with np.errstate(invalid="ignore", divide="ignore"):
1373
+ result = numerator / denominator - adj
1374
+
1375
+ dtype = values.dtype
1376
+ if dtype.kind == "f":
1377
+ result = result.astype(dtype, copy=False)
1378
+
1379
+ if isinstance(result, np.ndarray):
1380
+ result = np.where(denominator == 0, 0, result)
1381
+ result[count < 4] = np.nan
1382
+
1383
+ return result
1384
+
1385
+
1386
+ @disallow("M8", "m8")
1387
+ @maybe_operate_rowwise
1388
+ def nanprod(
1389
+ values: np.ndarray,
1390
+ *,
1391
+ axis: AxisInt | None = None,
1392
+ skipna: bool = True,
1393
+ min_count: int = 0,
1394
+ mask: npt.NDArray[np.bool_] | None = None,
1395
+ ) -> float:
1396
+ """
1397
+ Parameters
1398
+ ----------
1399
+ values : ndarray[dtype]
1400
+ axis : int, optional
1401
+ skipna : bool, default True
1402
+ min_count: int, default 0
1403
+ mask : ndarray[bool], optional
1404
+ nan-mask if known
1405
+
1406
+ Returns
1407
+ -------
1408
+ Dtype
1409
+ The product of all elements on a given axis. ( NaNs are treated as 1)
1410
+
1411
+ Examples
1412
+ --------
1413
+ >>> from pandas.core import nanops
1414
+ >>> s = pd.Series([1, 2, 3, np.nan])
1415
+ >>> nanops.nanprod(s.values)
1416
+ 6.0
1417
+ """
1418
+ mask = _maybe_get_mask(values, skipna, mask)
1419
+
1420
+ if skipna and mask is not None:
1421
+ values = values.copy()
1422
+ values[mask] = 1
1423
+ result = values.prod(axis)
1424
+ # error: Incompatible return value type (got "Union[ndarray, float]", expected
1425
+ # "float")
1426
+ return _maybe_null_out( # type: ignore[return-value]
1427
+ result, axis, mask, values.shape, min_count=min_count
1428
+ )
1429
+
1430
+
1431
+ def _maybe_arg_null_out(
1432
+ result: np.ndarray,
1433
+ axis: AxisInt | None,
1434
+ mask: npt.NDArray[np.bool_] | None,
1435
+ skipna: bool,
1436
+ ) -> np.ndarray | int:
1437
+ # helper function for nanargmin/nanargmax
1438
+ if mask is None:
1439
+ return result
1440
+
1441
+ if axis is None or not getattr(result, "ndim", False):
1442
+ if skipna:
1443
+ if mask.all():
1444
+ return -1
1445
+ else:
1446
+ if mask.any():
1447
+ return -1
1448
+ else:
1449
+ if skipna:
1450
+ na_mask = mask.all(axis)
1451
+ else:
1452
+ na_mask = mask.any(axis)
1453
+ if na_mask.any():
1454
+ result[na_mask] = -1
1455
+ return result
1456
+
1457
+
1458
+ def _get_counts(
1459
+ values_shape: Shape,
1460
+ mask: npt.NDArray[np.bool_] | None,
1461
+ axis: AxisInt | None,
1462
+ dtype: np.dtype[np.floating] = np.dtype(np.float64),
1463
+ ) -> np.floating | npt.NDArray[np.floating]:
1464
+ """
1465
+ Get the count of non-null values along an axis
1466
+
1467
+ Parameters
1468
+ ----------
1469
+ values_shape : tuple of int
1470
+ shape tuple from values ndarray, used if mask is None
1471
+ mask : Optional[ndarray[bool]]
1472
+ locations in values that should be considered missing
1473
+ axis : Optional[int]
1474
+ axis to count along
1475
+ dtype : type, optional
1476
+ type to use for count
1477
+
1478
+ Returns
1479
+ -------
1480
+ count : scalar or array
1481
+ """
1482
+ if axis is None:
1483
+ if mask is not None:
1484
+ n = mask.size - mask.sum()
1485
+ else:
1486
+ n = np.prod(values_shape)
1487
+ return dtype.type(n)
1488
+
1489
+ if mask is not None:
1490
+ count = mask.shape[axis] - mask.sum(axis)
1491
+ else:
1492
+ count = values_shape[axis]
1493
+
1494
+ if is_integer(count):
1495
+ return dtype.type(count)
1496
+ return count.astype(dtype, copy=False)
1497
+
1498
+
1499
+ def _maybe_null_out(
1500
+ result: np.ndarray | float | NaTType,
1501
+ axis: AxisInt | None,
1502
+ mask: npt.NDArray[np.bool_] | None,
1503
+ shape: tuple[int, ...],
1504
+ min_count: int = 1,
1505
+ ) -> np.ndarray | float | NaTType:
1506
+ """
1507
+ Returns
1508
+ -------
1509
+ Dtype
1510
+ The product of all elements on a given axis. ( NaNs are treated as 1)
1511
+ """
1512
+ if mask is None and min_count == 0:
1513
+ # nothing to check; short-circuit
1514
+ return result
1515
+
1516
+ if axis is not None and isinstance(result, np.ndarray):
1517
+ if mask is not None:
1518
+ null_mask = (mask.shape[axis] - mask.sum(axis) - min_count) < 0
1519
+ else:
1520
+ # we have no nulls, kept mask=None in _maybe_get_mask
1521
+ below_count = shape[axis] - min_count < 0
1522
+ new_shape = shape[:axis] + shape[axis + 1 :]
1523
+ null_mask = np.broadcast_to(below_count, new_shape)
1524
+
1525
+ if np.any(null_mask):
1526
+ if is_numeric_dtype(result):
1527
+ if np.iscomplexobj(result):
1528
+ result = result.astype("c16")
1529
+ elif not is_float_dtype(result):
1530
+ result = result.astype("f8", copy=False)
1531
+ result[null_mask] = np.nan
1532
+ else:
1533
+ # GH12941, use None to auto cast null
1534
+ result[null_mask] = None
1535
+ elif result is not NaT:
1536
+ if check_below_min_count(shape, mask, min_count):
1537
+ result_dtype = getattr(result, "dtype", None)
1538
+ if is_float_dtype(result_dtype):
1539
+ # error: Item "None" of "Optional[Any]" has no attribute "type"
1540
+ result = result_dtype.type("nan") # type: ignore[union-attr]
1541
+ else:
1542
+ result = np.nan
1543
+
1544
+ return result
1545
+
1546
+
1547
+ def check_below_min_count(
1548
+ shape: tuple[int, ...], mask: npt.NDArray[np.bool_] | None, min_count: int
1549
+ ) -> bool:
1550
+ """
1551
+ Check for the `min_count` keyword. Returns True if below `min_count` (when
1552
+ missing value should be returned from the reduction).
1553
+
1554
+ Parameters
1555
+ ----------
1556
+ shape : tuple
1557
+ The shape of the values (`values.shape`).
1558
+ mask : ndarray[bool] or None
1559
+ Boolean numpy array (typically of same shape as `shape`) or None.
1560
+ min_count : int
1561
+ Keyword passed through from sum/prod call.
1562
+
1563
+ Returns
1564
+ -------
1565
+ bool
1566
+ """
1567
+ if min_count > 0:
1568
+ if mask is None:
1569
+ # no missing values, only check size
1570
+ non_nulls = np.prod(shape)
1571
+ else:
1572
+ non_nulls = mask.size - mask.sum()
1573
+ if non_nulls < min_count:
1574
+ return True
1575
+ return False
1576
+
1577
+
1578
+ def _zero_out_fperr(arg):
1579
+ # #18044 reference this behavior to fix rolling skew/kurt issue
1580
+ if isinstance(arg, np.ndarray):
1581
+ return np.where(np.abs(arg) < 1e-14, 0, arg)
1582
+ else:
1583
+ return arg.dtype.type(0) if np.abs(arg) < 1e-14 else arg
1584
+
1585
+
1586
+ @disallow("M8", "m8")
1587
+ def nancorr(
1588
+ a: np.ndarray,
1589
+ b: np.ndarray,
1590
+ *,
1591
+ method: CorrelationMethod = "pearson",
1592
+ min_periods: int | None = None,
1593
+ ) -> float:
1594
+ """
1595
+ a, b: ndarrays
1596
+ """
1597
+ if len(a) != len(b):
1598
+ raise AssertionError("Operands to nancorr must have same size")
1599
+
1600
+ if min_periods is None:
1601
+ min_periods = 1
1602
+
1603
+ valid = notna(a) & notna(b)
1604
+ if not valid.all():
1605
+ a = a[valid]
1606
+ b = b[valid]
1607
+
1608
+ if len(a) < min_periods:
1609
+ return np.nan
1610
+
1611
+ a = _ensure_numeric(a)
1612
+ b = _ensure_numeric(b)
1613
+
1614
+ f = get_corr_func(method)
1615
+ return f(a, b)
1616
+
1617
+
1618
+ def get_corr_func(
1619
+ method: CorrelationMethod,
1620
+ ) -> Callable[[np.ndarray, np.ndarray], float]:
1621
+ if method == "kendall":
1622
+ from scipy.stats import kendalltau
1623
+
1624
+ def func(a, b):
1625
+ return kendalltau(a, b)[0]
1626
+
1627
+ return func
1628
+ elif method == "spearman":
1629
+ from scipy.stats import spearmanr
1630
+
1631
+ def func(a, b):
1632
+ return spearmanr(a, b)[0]
1633
+
1634
+ return func
1635
+ elif method == "pearson":
1636
+
1637
+ def func(a, b):
1638
+ return np.corrcoef(a, b)[0, 1]
1639
+
1640
+ return func
1641
+ elif callable(method):
1642
+ return method
1643
+
1644
+ raise ValueError(
1645
+ f"Unknown method '{method}', expected one of "
1646
+ "'kendall', 'spearman', 'pearson', or callable"
1647
+ )
1648
+
1649
+
1650
+ @disallow("M8", "m8")
1651
+ def nancov(
1652
+ a: np.ndarray,
1653
+ b: np.ndarray,
1654
+ *,
1655
+ min_periods: int | None = None,
1656
+ ddof: int | None = 1,
1657
+ ) -> float:
1658
+ if len(a) != len(b):
1659
+ raise AssertionError("Operands to nancov must have same size")
1660
+
1661
+ if min_periods is None:
1662
+ min_periods = 1
1663
+
1664
+ valid = notna(a) & notna(b)
1665
+ if not valid.all():
1666
+ a = a[valid]
1667
+ b = b[valid]
1668
+
1669
+ if len(a) < min_periods:
1670
+ return np.nan
1671
+
1672
+ a = _ensure_numeric(a)
1673
+ b = _ensure_numeric(b)
1674
+
1675
+ return np.cov(a, b, ddof=ddof)[0, 1]
1676
+
1677
+
1678
+ def _ensure_numeric(x):
1679
+ if isinstance(x, np.ndarray):
1680
+ if x.dtype.kind in "biu":
1681
+ x = x.astype(np.float64)
1682
+ elif x.dtype == object:
1683
+ inferred = lib.infer_dtype(x)
1684
+ if inferred in ["string", "mixed"]:
1685
+ # GH#44008, GH#36703 avoid casting e.g. strings to numeric
1686
+ raise TypeError(f"Could not convert {x} to numeric")
1687
+ try:
1688
+ x = x.astype(np.complex128)
1689
+ except (TypeError, ValueError):
1690
+ try:
1691
+ x = x.astype(np.float64)
1692
+ except ValueError as err:
1693
+ # GH#29941 we get here with object arrays containing strs
1694
+ raise TypeError(f"Could not convert {x} to numeric") from err
1695
+ else:
1696
+ if not np.any(np.imag(x)):
1697
+ x = x.real
1698
+ elif not (is_float(x) or is_integer(x) or is_complex(x)):
1699
+ if isinstance(x, str):
1700
+ # GH#44008, GH#36703 avoid casting e.g. strings to numeric
1701
+ raise TypeError(f"Could not convert string '{x}' to numeric")
1702
+ try:
1703
+ x = float(x)
1704
+ except (TypeError, ValueError):
1705
+ # e.g. "1+1j" or "foo"
1706
+ try:
1707
+ x = complex(x)
1708
+ except ValueError as err:
1709
+ # e.g. "foo"
1710
+ raise TypeError(f"Could not convert {x} to numeric") from err
1711
+ return x
1712
+
1713
+
1714
+ def na_accum_func(values: ArrayLike, accum_func, *, skipna: bool) -> ArrayLike:
1715
+ """
1716
+ Cumulative function with skipna support.
1717
+
1718
+ Parameters
1719
+ ----------
1720
+ values : np.ndarray or ExtensionArray
1721
+ accum_func : {np.cumprod, np.maximum.accumulate, np.cumsum, np.minimum.accumulate}
1722
+ skipna : bool
1723
+
1724
+ Returns
1725
+ -------
1726
+ np.ndarray or ExtensionArray
1727
+ """
1728
+ mask_a, mask_b = {
1729
+ np.cumprod: (1.0, np.nan),
1730
+ np.maximum.accumulate: (-np.inf, np.nan),
1731
+ np.cumsum: (0.0, np.nan),
1732
+ np.minimum.accumulate: (np.inf, np.nan),
1733
+ }[accum_func]
1734
+
1735
+ # This should go through ea interface
1736
+ assert values.dtype.kind not in "mM"
1737
+
1738
+ # We will be applying this function to block values
1739
+ if skipna and not issubclass(values.dtype.type, (np.integer, np.bool_)):
1740
+ vals = values.copy()
1741
+ mask = isna(vals)
1742
+ vals[mask] = mask_a
1743
+ result = accum_func(vals, axis=0)
1744
+ result[mask] = mask_b
1745
+ else:
1746
+ result = accum_func(values, axis=0)
1747
+
1748
+ return result
venv/lib/python3.10/site-packages/pandas/core/resample.py ADDED
@@ -0,0 +1,2920 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from __future__ import annotations
2
+
3
+ import copy
4
+ from textwrap import dedent
5
+ from typing import (
6
+ TYPE_CHECKING,
7
+ Callable,
8
+ Literal,
9
+ cast,
10
+ final,
11
+ no_type_check,
12
+ )
13
+ import warnings
14
+
15
+ import numpy as np
16
+
17
+ from pandas._libs import lib
18
+ from pandas._libs.tslibs import (
19
+ BaseOffset,
20
+ IncompatibleFrequency,
21
+ NaT,
22
+ Period,
23
+ Timedelta,
24
+ Timestamp,
25
+ to_offset,
26
+ )
27
+ from pandas._libs.tslibs.dtypes import freq_to_period_freqstr
28
+ from pandas._typing import NDFrameT
29
+ from pandas.compat.numpy import function as nv
30
+ from pandas.errors import AbstractMethodError
31
+ from pandas.util._decorators import (
32
+ Appender,
33
+ Substitution,
34
+ doc,
35
+ )
36
+ from pandas.util._exceptions import (
37
+ find_stack_level,
38
+ rewrite_warning,
39
+ )
40
+
41
+ from pandas.core.dtypes.dtypes import ArrowDtype
42
+ from pandas.core.dtypes.generic import (
43
+ ABCDataFrame,
44
+ ABCSeries,
45
+ )
46
+
47
+ import pandas.core.algorithms as algos
48
+ from pandas.core.apply import (
49
+ ResamplerWindowApply,
50
+ warn_alias_replacement,
51
+ )
52
+ from pandas.core.arrays import ArrowExtensionArray
53
+ from pandas.core.base import (
54
+ PandasObject,
55
+ SelectionMixin,
56
+ )
57
+ import pandas.core.common as com
58
+ from pandas.core.generic import (
59
+ NDFrame,
60
+ _shared_docs,
61
+ )
62
+ from pandas.core.groupby.generic import SeriesGroupBy
63
+ from pandas.core.groupby.groupby import (
64
+ BaseGroupBy,
65
+ GroupBy,
66
+ _apply_groupings_depr,
67
+ _pipe_template,
68
+ get_groupby,
69
+ )
70
+ from pandas.core.groupby.grouper import Grouper
71
+ from pandas.core.groupby.ops import BinGrouper
72
+ from pandas.core.indexes.api import MultiIndex
73
+ from pandas.core.indexes.base import Index
74
+ from pandas.core.indexes.datetimes import (
75
+ DatetimeIndex,
76
+ date_range,
77
+ )
78
+ from pandas.core.indexes.period import (
79
+ PeriodIndex,
80
+ period_range,
81
+ )
82
+ from pandas.core.indexes.timedeltas import (
83
+ TimedeltaIndex,
84
+ timedelta_range,
85
+ )
86
+
87
+ from pandas.tseries.frequencies import (
88
+ is_subperiod,
89
+ is_superperiod,
90
+ )
91
+ from pandas.tseries.offsets import (
92
+ Day,
93
+ Tick,
94
+ )
95
+
96
+ if TYPE_CHECKING:
97
+ from collections.abc import Hashable
98
+
99
+ from pandas._typing import (
100
+ AnyArrayLike,
101
+ Axis,
102
+ AxisInt,
103
+ Frequency,
104
+ IndexLabel,
105
+ InterpolateOptions,
106
+ T,
107
+ TimedeltaConvertibleTypes,
108
+ TimeGrouperOrigin,
109
+ TimestampConvertibleTypes,
110
+ npt,
111
+ )
112
+
113
+ from pandas import (
114
+ DataFrame,
115
+ Series,
116
+ )
117
+
118
+ _shared_docs_kwargs: dict[str, str] = {}
119
+
120
+
121
+ class Resampler(BaseGroupBy, PandasObject):
122
+ """
123
+ Class for resampling datetimelike data, a groupby-like operation.
124
+ See aggregate, transform, and apply functions on this object.
125
+
126
+ It's easiest to use obj.resample(...) to use Resampler.
127
+
128
+ Parameters
129
+ ----------
130
+ obj : Series or DataFrame
131
+ groupby : TimeGrouper
132
+ axis : int, default 0
133
+ kind : str or None
134
+ 'period', 'timestamp' to override default index treatment
135
+
136
+ Returns
137
+ -------
138
+ a Resampler of the appropriate type
139
+
140
+ Notes
141
+ -----
142
+ After resampling, see aggregate, apply, and transform functions.
143
+ """
144
+
145
+ _grouper: BinGrouper
146
+ _timegrouper: TimeGrouper
147
+ binner: DatetimeIndex | TimedeltaIndex | PeriodIndex # depends on subclass
148
+ exclusions: frozenset[Hashable] = frozenset() # for SelectionMixin compat
149
+ _internal_names_set = set({"obj", "ax", "_indexer"})
150
+
151
+ # to the groupby descriptor
152
+ _attributes = [
153
+ "freq",
154
+ "axis",
155
+ "closed",
156
+ "label",
157
+ "convention",
158
+ "kind",
159
+ "origin",
160
+ "offset",
161
+ ]
162
+
163
+ def __init__(
164
+ self,
165
+ obj: NDFrame,
166
+ timegrouper: TimeGrouper,
167
+ axis: Axis = 0,
168
+ kind=None,
169
+ *,
170
+ gpr_index: Index,
171
+ group_keys: bool = False,
172
+ selection=None,
173
+ include_groups: bool = True,
174
+ ) -> None:
175
+ self._timegrouper = timegrouper
176
+ self.keys = None
177
+ self.sort = True
178
+ self.axis = obj._get_axis_number(axis)
179
+ self.kind = kind
180
+ self.group_keys = group_keys
181
+ self.as_index = True
182
+ self.include_groups = include_groups
183
+
184
+ self.obj, self.ax, self._indexer = self._timegrouper._set_grouper(
185
+ self._convert_obj(obj), sort=True, gpr_index=gpr_index
186
+ )
187
+ self.binner, self._grouper = self._get_binner()
188
+ self._selection = selection
189
+ if self._timegrouper.key is not None:
190
+ self.exclusions = frozenset([self._timegrouper.key])
191
+ else:
192
+ self.exclusions = frozenset()
193
+
194
+ @final
195
+ def __str__(self) -> str:
196
+ """
197
+ Provide a nice str repr of our rolling object.
198
+ """
199
+ attrs = (
200
+ f"{k}={getattr(self._timegrouper, k)}"
201
+ for k in self._attributes
202
+ if getattr(self._timegrouper, k, None) is not None
203
+ )
204
+ return f"{type(self).__name__} [{', '.join(attrs)}]"
205
+
206
+ @final
207
+ def __getattr__(self, attr: str):
208
+ if attr in self._internal_names_set:
209
+ return object.__getattribute__(self, attr)
210
+ if attr in self._attributes:
211
+ return getattr(self._timegrouper, attr)
212
+ if attr in self.obj:
213
+ return self[attr]
214
+
215
+ return object.__getattribute__(self, attr)
216
+
217
+ @final
218
+ @property
219
+ def _from_selection(self) -> bool:
220
+ """
221
+ Is the resampling from a DataFrame column or MultiIndex level.
222
+ """
223
+ # upsampling and PeriodIndex resampling do not work
224
+ # with selection, this state used to catch and raise an error
225
+ return self._timegrouper is not None and (
226
+ self._timegrouper.key is not None or self._timegrouper.level is not None
227
+ )
228
+
229
+ def _convert_obj(self, obj: NDFrameT) -> NDFrameT:
230
+ """
231
+ Provide any conversions for the object in order to correctly handle.
232
+
233
+ Parameters
234
+ ----------
235
+ obj : Series or DataFrame
236
+
237
+ Returns
238
+ -------
239
+ Series or DataFrame
240
+ """
241
+ return obj._consolidate()
242
+
243
+ def _get_binner_for_time(self):
244
+ raise AbstractMethodError(self)
245
+
246
+ @final
247
+ def _get_binner(self):
248
+ """
249
+ Create the BinGrouper, assume that self.set_grouper(obj)
250
+ has already been called.
251
+ """
252
+ binner, bins, binlabels = self._get_binner_for_time()
253
+ assert len(bins) == len(binlabels)
254
+ bin_grouper = BinGrouper(bins, binlabels, indexer=self._indexer)
255
+ return binner, bin_grouper
256
+
257
+ @final
258
+ @Substitution(
259
+ klass="Resampler",
260
+ examples="""
261
+ >>> df = pd.DataFrame({'A': [1, 2, 3, 4]},
262
+ ... index=pd.date_range('2012-08-02', periods=4))
263
+ >>> df
264
+ A
265
+ 2012-08-02 1
266
+ 2012-08-03 2
267
+ 2012-08-04 3
268
+ 2012-08-05 4
269
+
270
+ To get the difference between each 2-day period's maximum and minimum
271
+ value in one pass, you can do
272
+
273
+ >>> df.resample('2D').pipe(lambda x: x.max() - x.min())
274
+ A
275
+ 2012-08-02 1
276
+ 2012-08-04 1""",
277
+ )
278
+ @Appender(_pipe_template)
279
+ def pipe(
280
+ self,
281
+ func: Callable[..., T] | tuple[Callable[..., T], str],
282
+ *args,
283
+ **kwargs,
284
+ ) -> T:
285
+ return super().pipe(func, *args, **kwargs)
286
+
287
+ _agg_see_also_doc = dedent(
288
+ """
289
+ See Also
290
+ --------
291
+ DataFrame.groupby.aggregate : Aggregate using callable, string, dict,
292
+ or list of string/callables.
293
+ DataFrame.resample.transform : Transforms the Series on each group
294
+ based on the given function.
295
+ DataFrame.aggregate: Aggregate using one or more
296
+ operations over the specified axis.
297
+ """
298
+ )
299
+
300
+ _agg_examples_doc = dedent(
301
+ """
302
+ Examples
303
+ --------
304
+ >>> s = pd.Series([1, 2, 3, 4, 5],
305
+ ... index=pd.date_range('20130101', periods=5, freq='s'))
306
+ >>> s
307
+ 2013-01-01 00:00:00 1
308
+ 2013-01-01 00:00:01 2
309
+ 2013-01-01 00:00:02 3
310
+ 2013-01-01 00:00:03 4
311
+ 2013-01-01 00:00:04 5
312
+ Freq: s, dtype: int64
313
+
314
+ >>> r = s.resample('2s')
315
+
316
+ >>> r.agg("sum")
317
+ 2013-01-01 00:00:00 3
318
+ 2013-01-01 00:00:02 7
319
+ 2013-01-01 00:00:04 5
320
+ Freq: 2s, dtype: int64
321
+
322
+ >>> r.agg(['sum', 'mean', 'max'])
323
+ sum mean max
324
+ 2013-01-01 00:00:00 3 1.5 2
325
+ 2013-01-01 00:00:02 7 3.5 4
326
+ 2013-01-01 00:00:04 5 5.0 5
327
+
328
+ >>> r.agg({'result': lambda x: x.mean() / x.std(),
329
+ ... 'total': "sum"})
330
+ result total
331
+ 2013-01-01 00:00:00 2.121320 3
332
+ 2013-01-01 00:00:02 4.949747 7
333
+ 2013-01-01 00:00:04 NaN 5
334
+
335
+ >>> r.agg(average="mean", total="sum")
336
+ average total
337
+ 2013-01-01 00:00:00 1.5 3
338
+ 2013-01-01 00:00:02 3.5 7
339
+ 2013-01-01 00:00:04 5.0 5
340
+ """
341
+ )
342
+
343
+ @final
344
+ @doc(
345
+ _shared_docs["aggregate"],
346
+ see_also=_agg_see_also_doc,
347
+ examples=_agg_examples_doc,
348
+ klass="DataFrame",
349
+ axis="",
350
+ )
351
+ def aggregate(self, func=None, *args, **kwargs):
352
+ result = ResamplerWindowApply(self, func, args=args, kwargs=kwargs).agg()
353
+ if result is None:
354
+ how = func
355
+ result = self._groupby_and_aggregate(how, *args, **kwargs)
356
+
357
+ return result
358
+
359
+ agg = aggregate
360
+ apply = aggregate
361
+
362
+ @final
363
+ def transform(self, arg, *args, **kwargs):
364
+ """
365
+ Call function producing a like-indexed Series on each group.
366
+
367
+ Return a Series with the transformed values.
368
+
369
+ Parameters
370
+ ----------
371
+ arg : function
372
+ To apply to each group. Should return a Series with the same index.
373
+
374
+ Returns
375
+ -------
376
+ Series
377
+
378
+ Examples
379
+ --------
380
+ >>> s = pd.Series([1, 2],
381
+ ... index=pd.date_range('20180101',
382
+ ... periods=2,
383
+ ... freq='1h'))
384
+ >>> s
385
+ 2018-01-01 00:00:00 1
386
+ 2018-01-01 01:00:00 2
387
+ Freq: h, dtype: int64
388
+
389
+ >>> resampled = s.resample('15min')
390
+ >>> resampled.transform(lambda x: (x - x.mean()) / x.std())
391
+ 2018-01-01 00:00:00 NaN
392
+ 2018-01-01 01:00:00 NaN
393
+ Freq: h, dtype: float64
394
+ """
395
+ return self._selected_obj.groupby(self._timegrouper).transform(
396
+ arg, *args, **kwargs
397
+ )
398
+
399
+ def _downsample(self, f, **kwargs):
400
+ raise AbstractMethodError(self)
401
+
402
+ def _upsample(self, f, limit: int | None = None, fill_value=None):
403
+ raise AbstractMethodError(self)
404
+
405
+ def _gotitem(self, key, ndim: int, subset=None):
406
+ """
407
+ Sub-classes to define. Return a sliced object.
408
+
409
+ Parameters
410
+ ----------
411
+ key : string / list of selections
412
+ ndim : {1, 2}
413
+ requested ndim of result
414
+ subset : object, default None
415
+ subset to act on
416
+ """
417
+ grouper = self._grouper
418
+ if subset is None:
419
+ subset = self.obj
420
+ if key is not None:
421
+ subset = subset[key]
422
+ else:
423
+ # reached via Apply.agg_dict_like with selection=None and ndim=1
424
+ assert subset.ndim == 1
425
+ if ndim == 1:
426
+ assert subset.ndim == 1
427
+
428
+ grouped = get_groupby(
429
+ subset, by=None, grouper=grouper, axis=self.axis, group_keys=self.group_keys
430
+ )
431
+ return grouped
432
+
433
+ def _groupby_and_aggregate(self, how, *args, **kwargs):
434
+ """
435
+ Re-evaluate the obj with a groupby aggregation.
436
+ """
437
+ grouper = self._grouper
438
+
439
+ # Excludes `on` column when provided
440
+ obj = self._obj_with_exclusions
441
+
442
+ grouped = get_groupby(
443
+ obj, by=None, grouper=grouper, axis=self.axis, group_keys=self.group_keys
444
+ )
445
+
446
+ try:
447
+ if callable(how):
448
+ # TODO: test_resample_apply_with_additional_args fails if we go
449
+ # through the non-lambda path, not clear that it should.
450
+ func = lambda x: how(x, *args, **kwargs)
451
+ result = grouped.aggregate(func)
452
+ else:
453
+ result = grouped.aggregate(how, *args, **kwargs)
454
+ except (AttributeError, KeyError):
455
+ # we have a non-reducing function; try to evaluate
456
+ # alternatively we want to evaluate only a column of the input
457
+
458
+ # test_apply_to_one_column_of_df the function being applied references
459
+ # a DataFrame column, but aggregate_item_by_item operates column-wise
460
+ # on Series, raising AttributeError or KeyError
461
+ # (depending on whether the column lookup uses getattr/__getitem__)
462
+ result = _apply(
463
+ grouped, how, *args, include_groups=self.include_groups, **kwargs
464
+ )
465
+
466
+ except ValueError as err:
467
+ if "Must produce aggregated value" in str(err):
468
+ # raised in _aggregate_named
469
+ # see test_apply_without_aggregation, test_apply_with_mutated_index
470
+ pass
471
+ else:
472
+ raise
473
+
474
+ # we have a non-reducing function
475
+ # try to evaluate
476
+ result = _apply(
477
+ grouped, how, *args, include_groups=self.include_groups, **kwargs
478
+ )
479
+
480
+ return self._wrap_result(result)
481
+
482
+ @final
483
+ def _get_resampler_for_grouping(
484
+ self, groupby: GroupBy, key, include_groups: bool = True
485
+ ):
486
+ """
487
+ Return the correct class for resampling with groupby.
488
+ """
489
+ return self._resampler_for_grouping(
490
+ groupby=groupby, key=key, parent=self, include_groups=include_groups
491
+ )
492
+
493
+ def _wrap_result(self, result):
494
+ """
495
+ Potentially wrap any results.
496
+ """
497
+ # GH 47705
498
+ obj = self.obj
499
+ if (
500
+ isinstance(result, ABCDataFrame)
501
+ and len(result) == 0
502
+ and not isinstance(result.index, PeriodIndex)
503
+ ):
504
+ result = result.set_index(
505
+ _asfreq_compat(obj.index[:0], freq=self.freq), append=True
506
+ )
507
+
508
+ if isinstance(result, ABCSeries) and self._selection is not None:
509
+ result.name = self._selection
510
+
511
+ if isinstance(result, ABCSeries) and result.empty:
512
+ # When index is all NaT, result is empty but index is not
513
+ result.index = _asfreq_compat(obj.index[:0], freq=self.freq)
514
+ result.name = getattr(obj, "name", None)
515
+
516
+ if self._timegrouper._arrow_dtype is not None:
517
+ result.index = result.index.astype(self._timegrouper._arrow_dtype)
518
+
519
+ return result
520
+
521
+ @final
522
+ def ffill(self, limit: int | None = None):
523
+ """
524
+ Forward fill the values.
525
+
526
+ Parameters
527
+ ----------
528
+ limit : int, optional
529
+ Limit of how many values to fill.
530
+
531
+ Returns
532
+ -------
533
+ An upsampled Series.
534
+
535
+ See Also
536
+ --------
537
+ Series.fillna: Fill NA/NaN values using the specified method.
538
+ DataFrame.fillna: Fill NA/NaN values using the specified method.
539
+
540
+ Examples
541
+ --------
542
+ Here we only create a ``Series``.
543
+
544
+ >>> ser = pd.Series([1, 2, 3, 4], index=pd.DatetimeIndex(
545
+ ... ['2023-01-01', '2023-01-15', '2023-02-01', '2023-02-15']))
546
+ >>> ser
547
+ 2023-01-01 1
548
+ 2023-01-15 2
549
+ 2023-02-01 3
550
+ 2023-02-15 4
551
+ dtype: int64
552
+
553
+ Example for ``ffill`` with downsampling (we have fewer dates after resampling):
554
+
555
+ >>> ser.resample('MS').ffill()
556
+ 2023-01-01 1
557
+ 2023-02-01 3
558
+ Freq: MS, dtype: int64
559
+
560
+ Example for ``ffill`` with upsampling (fill the new dates with
561
+ the previous value):
562
+
563
+ >>> ser.resample('W').ffill()
564
+ 2023-01-01 1
565
+ 2023-01-08 1
566
+ 2023-01-15 2
567
+ 2023-01-22 2
568
+ 2023-01-29 2
569
+ 2023-02-05 3
570
+ 2023-02-12 3
571
+ 2023-02-19 4
572
+ Freq: W-SUN, dtype: int64
573
+
574
+ With upsampling and limiting (only fill the first new date with the
575
+ previous value):
576
+
577
+ >>> ser.resample('W').ffill(limit=1)
578
+ 2023-01-01 1.0
579
+ 2023-01-08 1.0
580
+ 2023-01-15 2.0
581
+ 2023-01-22 2.0
582
+ 2023-01-29 NaN
583
+ 2023-02-05 3.0
584
+ 2023-02-12 NaN
585
+ 2023-02-19 4.0
586
+ Freq: W-SUN, dtype: float64
587
+ """
588
+ return self._upsample("ffill", limit=limit)
589
+
590
+ @final
591
+ def nearest(self, limit: int | None = None):
592
+ """
593
+ Resample by using the nearest value.
594
+
595
+ When resampling data, missing values may appear (e.g., when the
596
+ resampling frequency is higher than the original frequency).
597
+ The `nearest` method will replace ``NaN`` values that appeared in
598
+ the resampled data with the value from the nearest member of the
599
+ sequence, based on the index value.
600
+ Missing values that existed in the original data will not be modified.
601
+ If `limit` is given, fill only this many values in each direction for
602
+ each of the original values.
603
+
604
+ Parameters
605
+ ----------
606
+ limit : int, optional
607
+ Limit of how many values to fill.
608
+
609
+ Returns
610
+ -------
611
+ Series or DataFrame
612
+ An upsampled Series or DataFrame with ``NaN`` values filled with
613
+ their nearest value.
614
+
615
+ See Also
616
+ --------
617
+ backfill : Backward fill the new missing values in the resampled data.
618
+ pad : Forward fill ``NaN`` values.
619
+
620
+ Examples
621
+ --------
622
+ >>> s = pd.Series([1, 2],
623
+ ... index=pd.date_range('20180101',
624
+ ... periods=2,
625
+ ... freq='1h'))
626
+ >>> s
627
+ 2018-01-01 00:00:00 1
628
+ 2018-01-01 01:00:00 2
629
+ Freq: h, dtype: int64
630
+
631
+ >>> s.resample('15min').nearest()
632
+ 2018-01-01 00:00:00 1
633
+ 2018-01-01 00:15:00 1
634
+ 2018-01-01 00:30:00 2
635
+ 2018-01-01 00:45:00 2
636
+ 2018-01-01 01:00:00 2
637
+ Freq: 15min, dtype: int64
638
+
639
+ Limit the number of upsampled values imputed by the nearest:
640
+
641
+ >>> s.resample('15min').nearest(limit=1)
642
+ 2018-01-01 00:00:00 1.0
643
+ 2018-01-01 00:15:00 1.0
644
+ 2018-01-01 00:30:00 NaN
645
+ 2018-01-01 00:45:00 2.0
646
+ 2018-01-01 01:00:00 2.0
647
+ Freq: 15min, dtype: float64
648
+ """
649
+ return self._upsample("nearest", limit=limit)
650
+
651
+ @final
652
+ def bfill(self, limit: int | None = None):
653
+ """
654
+ Backward fill the new missing values in the resampled data.
655
+
656
+ In statistics, imputation is the process of replacing missing data with
657
+ substituted values [1]_. When resampling data, missing values may
658
+ appear (e.g., when the resampling frequency is higher than the original
659
+ frequency). The backward fill will replace NaN values that appeared in
660
+ the resampled data with the next value in the original sequence.
661
+ Missing values that existed in the original data will not be modified.
662
+
663
+ Parameters
664
+ ----------
665
+ limit : int, optional
666
+ Limit of how many values to fill.
667
+
668
+ Returns
669
+ -------
670
+ Series, DataFrame
671
+ An upsampled Series or DataFrame with backward filled NaN values.
672
+
673
+ See Also
674
+ --------
675
+ bfill : Alias of backfill.
676
+ fillna : Fill NaN values using the specified method, which can be
677
+ 'backfill'.
678
+ nearest : Fill NaN values with nearest neighbor starting from center.
679
+ ffill : Forward fill NaN values.
680
+ Series.fillna : Fill NaN values in the Series using the
681
+ specified method, which can be 'backfill'.
682
+ DataFrame.fillna : Fill NaN values in the DataFrame using the
683
+ specified method, which can be 'backfill'.
684
+
685
+ References
686
+ ----------
687
+ .. [1] https://en.wikipedia.org/wiki/Imputation_(statistics)
688
+
689
+ Examples
690
+ --------
691
+ Resampling a Series:
692
+
693
+ >>> s = pd.Series([1, 2, 3],
694
+ ... index=pd.date_range('20180101', periods=3, freq='h'))
695
+ >>> s
696
+ 2018-01-01 00:00:00 1
697
+ 2018-01-01 01:00:00 2
698
+ 2018-01-01 02:00:00 3
699
+ Freq: h, dtype: int64
700
+
701
+ >>> s.resample('30min').bfill()
702
+ 2018-01-01 00:00:00 1
703
+ 2018-01-01 00:30:00 2
704
+ 2018-01-01 01:00:00 2
705
+ 2018-01-01 01:30:00 3
706
+ 2018-01-01 02:00:00 3
707
+ Freq: 30min, dtype: int64
708
+
709
+ >>> s.resample('15min').bfill(limit=2)
710
+ 2018-01-01 00:00:00 1.0
711
+ 2018-01-01 00:15:00 NaN
712
+ 2018-01-01 00:30:00 2.0
713
+ 2018-01-01 00:45:00 2.0
714
+ 2018-01-01 01:00:00 2.0
715
+ 2018-01-01 01:15:00 NaN
716
+ 2018-01-01 01:30:00 3.0
717
+ 2018-01-01 01:45:00 3.0
718
+ 2018-01-01 02:00:00 3.0
719
+ Freq: 15min, dtype: float64
720
+
721
+ Resampling a DataFrame that has missing values:
722
+
723
+ >>> df = pd.DataFrame({'a': [2, np.nan, 6], 'b': [1, 3, 5]},
724
+ ... index=pd.date_range('20180101', periods=3,
725
+ ... freq='h'))
726
+ >>> df
727
+ a b
728
+ 2018-01-01 00:00:00 2.0 1
729
+ 2018-01-01 01:00:00 NaN 3
730
+ 2018-01-01 02:00:00 6.0 5
731
+
732
+ >>> df.resample('30min').bfill()
733
+ a b
734
+ 2018-01-01 00:00:00 2.0 1
735
+ 2018-01-01 00:30:00 NaN 3
736
+ 2018-01-01 01:00:00 NaN 3
737
+ 2018-01-01 01:30:00 6.0 5
738
+ 2018-01-01 02:00:00 6.0 5
739
+
740
+ >>> df.resample('15min').bfill(limit=2)
741
+ a b
742
+ 2018-01-01 00:00:00 2.0 1.0
743
+ 2018-01-01 00:15:00 NaN NaN
744
+ 2018-01-01 00:30:00 NaN 3.0
745
+ 2018-01-01 00:45:00 NaN 3.0
746
+ 2018-01-01 01:00:00 NaN 3.0
747
+ 2018-01-01 01:15:00 NaN NaN
748
+ 2018-01-01 01:30:00 6.0 5.0
749
+ 2018-01-01 01:45:00 6.0 5.0
750
+ 2018-01-01 02:00:00 6.0 5.0
751
+ """
752
+ return self._upsample("bfill", limit=limit)
753
+
754
+ @final
755
+ def fillna(self, method, limit: int | None = None):
756
+ """
757
+ Fill missing values introduced by upsampling.
758
+
759
+ In statistics, imputation is the process of replacing missing data with
760
+ substituted values [1]_. When resampling data, missing values may
761
+ appear (e.g., when the resampling frequency is higher than the original
762
+ frequency).
763
+
764
+ Missing values that existed in the original data will
765
+ not be modified.
766
+
767
+ Parameters
768
+ ----------
769
+ method : {'pad', 'backfill', 'ffill', 'bfill', 'nearest'}
770
+ Method to use for filling holes in resampled data
771
+
772
+ * 'pad' or 'ffill': use previous valid observation to fill gap
773
+ (forward fill).
774
+ * 'backfill' or 'bfill': use next valid observation to fill gap.
775
+ * 'nearest': use nearest valid observation to fill gap.
776
+
777
+ limit : int, optional
778
+ Limit of how many consecutive missing values to fill.
779
+
780
+ Returns
781
+ -------
782
+ Series or DataFrame
783
+ An upsampled Series or DataFrame with missing values filled.
784
+
785
+ See Also
786
+ --------
787
+ bfill : Backward fill NaN values in the resampled data.
788
+ ffill : Forward fill NaN values in the resampled data.
789
+ nearest : Fill NaN values in the resampled data
790
+ with nearest neighbor starting from center.
791
+ interpolate : Fill NaN values using interpolation.
792
+ Series.fillna : Fill NaN values in the Series using the
793
+ specified method, which can be 'bfill' and 'ffill'.
794
+ DataFrame.fillna : Fill NaN values in the DataFrame using the
795
+ specified method, which can be 'bfill' and 'ffill'.
796
+
797
+ References
798
+ ----------
799
+ .. [1] https://en.wikipedia.org/wiki/Imputation_(statistics)
800
+
801
+ Examples
802
+ --------
803
+ Resampling a Series:
804
+
805
+ >>> s = pd.Series([1, 2, 3],
806
+ ... index=pd.date_range('20180101', periods=3, freq='h'))
807
+ >>> s
808
+ 2018-01-01 00:00:00 1
809
+ 2018-01-01 01:00:00 2
810
+ 2018-01-01 02:00:00 3
811
+ Freq: h, dtype: int64
812
+
813
+ Without filling the missing values you get:
814
+
815
+ >>> s.resample("30min").asfreq()
816
+ 2018-01-01 00:00:00 1.0
817
+ 2018-01-01 00:30:00 NaN
818
+ 2018-01-01 01:00:00 2.0
819
+ 2018-01-01 01:30:00 NaN
820
+ 2018-01-01 02:00:00 3.0
821
+ Freq: 30min, dtype: float64
822
+
823
+ >>> s.resample('30min').fillna("backfill")
824
+ 2018-01-01 00:00:00 1
825
+ 2018-01-01 00:30:00 2
826
+ 2018-01-01 01:00:00 2
827
+ 2018-01-01 01:30:00 3
828
+ 2018-01-01 02:00:00 3
829
+ Freq: 30min, dtype: int64
830
+
831
+ >>> s.resample('15min').fillna("backfill", limit=2)
832
+ 2018-01-01 00:00:00 1.0
833
+ 2018-01-01 00:15:00 NaN
834
+ 2018-01-01 00:30:00 2.0
835
+ 2018-01-01 00:45:00 2.0
836
+ 2018-01-01 01:00:00 2.0
837
+ 2018-01-01 01:15:00 NaN
838
+ 2018-01-01 01:30:00 3.0
839
+ 2018-01-01 01:45:00 3.0
840
+ 2018-01-01 02:00:00 3.0
841
+ Freq: 15min, dtype: float64
842
+
843
+ >>> s.resample('30min').fillna("pad")
844
+ 2018-01-01 00:00:00 1
845
+ 2018-01-01 00:30:00 1
846
+ 2018-01-01 01:00:00 2
847
+ 2018-01-01 01:30:00 2
848
+ 2018-01-01 02:00:00 3
849
+ Freq: 30min, dtype: int64
850
+
851
+ >>> s.resample('30min').fillna("nearest")
852
+ 2018-01-01 00:00:00 1
853
+ 2018-01-01 00:30:00 2
854
+ 2018-01-01 01:00:00 2
855
+ 2018-01-01 01:30:00 3
856
+ 2018-01-01 02:00:00 3
857
+ Freq: 30min, dtype: int64
858
+
859
+ Missing values present before the upsampling are not affected.
860
+
861
+ >>> sm = pd.Series([1, None, 3],
862
+ ... index=pd.date_range('20180101', periods=3, freq='h'))
863
+ >>> sm
864
+ 2018-01-01 00:00:00 1.0
865
+ 2018-01-01 01:00:00 NaN
866
+ 2018-01-01 02:00:00 3.0
867
+ Freq: h, dtype: float64
868
+
869
+ >>> sm.resample('30min').fillna('backfill')
870
+ 2018-01-01 00:00:00 1.0
871
+ 2018-01-01 00:30:00 NaN
872
+ 2018-01-01 01:00:00 NaN
873
+ 2018-01-01 01:30:00 3.0
874
+ 2018-01-01 02:00:00 3.0
875
+ Freq: 30min, dtype: float64
876
+
877
+ >>> sm.resample('30min').fillna('pad')
878
+ 2018-01-01 00:00:00 1.0
879
+ 2018-01-01 00:30:00 1.0
880
+ 2018-01-01 01:00:00 NaN
881
+ 2018-01-01 01:30:00 NaN
882
+ 2018-01-01 02:00:00 3.0
883
+ Freq: 30min, dtype: float64
884
+
885
+ >>> sm.resample('30min').fillna('nearest')
886
+ 2018-01-01 00:00:00 1.0
887
+ 2018-01-01 00:30:00 NaN
888
+ 2018-01-01 01:00:00 NaN
889
+ 2018-01-01 01:30:00 3.0
890
+ 2018-01-01 02:00:00 3.0
891
+ Freq: 30min, dtype: float64
892
+
893
+ DataFrame resampling is done column-wise. All the same options are
894
+ available.
895
+
896
+ >>> df = pd.DataFrame({'a': [2, np.nan, 6], 'b': [1, 3, 5]},
897
+ ... index=pd.date_range('20180101', periods=3,
898
+ ... freq='h'))
899
+ >>> df
900
+ a b
901
+ 2018-01-01 00:00:00 2.0 1
902
+ 2018-01-01 01:00:00 NaN 3
903
+ 2018-01-01 02:00:00 6.0 5
904
+
905
+ >>> df.resample('30min').fillna("bfill")
906
+ a b
907
+ 2018-01-01 00:00:00 2.0 1
908
+ 2018-01-01 00:30:00 NaN 3
909
+ 2018-01-01 01:00:00 NaN 3
910
+ 2018-01-01 01:30:00 6.0 5
911
+ 2018-01-01 02:00:00 6.0 5
912
+ """
913
+ warnings.warn(
914
+ f"{type(self).__name__}.fillna is deprecated and will be removed "
915
+ "in a future version. Use obj.ffill(), obj.bfill(), "
916
+ "or obj.nearest() instead.",
917
+ FutureWarning,
918
+ stacklevel=find_stack_level(),
919
+ )
920
+ return self._upsample(method, limit=limit)
921
+
922
+ @final
923
+ def interpolate(
924
+ self,
925
+ method: InterpolateOptions = "linear",
926
+ *,
927
+ axis: Axis = 0,
928
+ limit: int | None = None,
929
+ inplace: bool = False,
930
+ limit_direction: Literal["forward", "backward", "both"] = "forward",
931
+ limit_area=None,
932
+ downcast=lib.no_default,
933
+ **kwargs,
934
+ ):
935
+ """
936
+ Interpolate values between target timestamps according to different methods.
937
+
938
+ The original index is first reindexed to target timestamps
939
+ (see :meth:`core.resample.Resampler.asfreq`),
940
+ then the interpolation of ``NaN`` values via :meth:`DataFrame.interpolate`
941
+ happens.
942
+
943
+ Parameters
944
+ ----------
945
+ method : str, default 'linear'
946
+ Interpolation technique to use. One of:
947
+
948
+ * 'linear': Ignore the index and treat the values as equally
949
+ spaced. This is the only method supported on MultiIndexes.
950
+ * 'time': Works on daily and higher resolution data to interpolate
951
+ given length of interval.
952
+ * 'index', 'values': use the actual numerical values of the index.
953
+ * 'pad': Fill in NaNs using existing values.
954
+ * 'nearest', 'zero', 'slinear', 'quadratic', 'cubic',
955
+ 'barycentric', 'polynomial': Passed to
956
+ `scipy.interpolate.interp1d`, whereas 'spline' is passed to
957
+ `scipy.interpolate.UnivariateSpline`. These methods use the numerical
958
+ values of the index. Both 'polynomial' and 'spline' require that
959
+ you also specify an `order` (int), e.g.
960
+ ``df.interpolate(method='polynomial', order=5)``. Note that,
961
+ `slinear` method in Pandas refers to the Scipy first order `spline`
962
+ instead of Pandas first order `spline`.
963
+ * 'krogh', 'piecewise_polynomial', 'spline', 'pchip', 'akima',
964
+ 'cubicspline': Wrappers around the SciPy interpolation methods of
965
+ similar names. See `Notes`.
966
+ * 'from_derivatives': Refers to
967
+ `scipy.interpolate.BPoly.from_derivatives`.
968
+
969
+ axis : {{0 or 'index', 1 or 'columns', None}}, default None
970
+ Axis to interpolate along. For `Series` this parameter is unused
971
+ and defaults to 0.
972
+ limit : int, optional
973
+ Maximum number of consecutive NaNs to fill. Must be greater than
974
+ 0.
975
+ inplace : bool, default False
976
+ Update the data in place if possible.
977
+ limit_direction : {{'forward', 'backward', 'both'}}, Optional
978
+ Consecutive NaNs will be filled in this direction.
979
+
980
+ If limit is specified:
981
+ * If 'method' is 'pad' or 'ffill', 'limit_direction' must be 'forward'.
982
+ * If 'method' is 'backfill' or 'bfill', 'limit_direction' must be
983
+ 'backwards'.
984
+
985
+ If 'limit' is not specified:
986
+ * If 'method' is 'backfill' or 'bfill', the default is 'backward'
987
+ * else the default is 'forward'
988
+
989
+ raises ValueError if `limit_direction` is 'forward' or 'both' and
990
+ method is 'backfill' or 'bfill'.
991
+ raises ValueError if `limit_direction` is 'backward' or 'both' and
992
+ method is 'pad' or 'ffill'.
993
+
994
+ limit_area : {{`None`, 'inside', 'outside'}}, default None
995
+ If limit is specified, consecutive NaNs will be filled with this
996
+ restriction.
997
+
998
+ * ``None``: No fill restriction.
999
+ * 'inside': Only fill NaNs surrounded by valid values
1000
+ (interpolate).
1001
+ * 'outside': Only fill NaNs outside valid values (extrapolate).
1002
+
1003
+ downcast : optional, 'infer' or None, defaults to None
1004
+ Downcast dtypes if possible.
1005
+
1006
+ .. deprecated:: 2.1.0
1007
+
1008
+ ``**kwargs`` : optional
1009
+ Keyword arguments to pass on to the interpolating function.
1010
+
1011
+ Returns
1012
+ -------
1013
+ DataFrame or Series
1014
+ Interpolated values at the specified freq.
1015
+
1016
+ See Also
1017
+ --------
1018
+ core.resample.Resampler.asfreq: Return the values at the new freq,
1019
+ essentially a reindex.
1020
+ DataFrame.interpolate: Fill NaN values using an interpolation method.
1021
+
1022
+ Notes
1023
+ -----
1024
+ For high-frequent or non-equidistant time-series with timestamps
1025
+ the reindexing followed by interpolation may lead to information loss
1026
+ as shown in the last example.
1027
+
1028
+ Examples
1029
+ --------
1030
+
1031
+ >>> start = "2023-03-01T07:00:00"
1032
+ >>> timesteps = pd.date_range(start, periods=5, freq="s")
1033
+ >>> series = pd.Series(data=[1, -1, 2, 1, 3], index=timesteps)
1034
+ >>> series
1035
+ 2023-03-01 07:00:00 1
1036
+ 2023-03-01 07:00:01 -1
1037
+ 2023-03-01 07:00:02 2
1038
+ 2023-03-01 07:00:03 1
1039
+ 2023-03-01 07:00:04 3
1040
+ Freq: s, dtype: int64
1041
+
1042
+ Upsample the dataframe to 0.5Hz by providing the period time of 2s.
1043
+
1044
+ >>> series.resample("2s").interpolate("linear")
1045
+ 2023-03-01 07:00:00 1
1046
+ 2023-03-01 07:00:02 2
1047
+ 2023-03-01 07:00:04 3
1048
+ Freq: 2s, dtype: int64
1049
+
1050
+ Downsample the dataframe to 2Hz by providing the period time of 500ms.
1051
+
1052
+ >>> series.resample("500ms").interpolate("linear")
1053
+ 2023-03-01 07:00:00.000 1.0
1054
+ 2023-03-01 07:00:00.500 0.0
1055
+ 2023-03-01 07:00:01.000 -1.0
1056
+ 2023-03-01 07:00:01.500 0.5
1057
+ 2023-03-01 07:00:02.000 2.0
1058
+ 2023-03-01 07:00:02.500 1.5
1059
+ 2023-03-01 07:00:03.000 1.0
1060
+ 2023-03-01 07:00:03.500 2.0
1061
+ 2023-03-01 07:00:04.000 3.0
1062
+ Freq: 500ms, dtype: float64
1063
+
1064
+ Internal reindexing with ``asfreq()`` prior to interpolation leads to
1065
+ an interpolated timeseries on the basis the reindexed timestamps (anchors).
1066
+ Since not all datapoints from original series become anchors,
1067
+ it can lead to misleading interpolation results as in the following example:
1068
+
1069
+ >>> series.resample("400ms").interpolate("linear")
1070
+ 2023-03-01 07:00:00.000 1.0
1071
+ 2023-03-01 07:00:00.400 1.2
1072
+ 2023-03-01 07:00:00.800 1.4
1073
+ 2023-03-01 07:00:01.200 1.6
1074
+ 2023-03-01 07:00:01.600 1.8
1075
+ 2023-03-01 07:00:02.000 2.0
1076
+ 2023-03-01 07:00:02.400 2.2
1077
+ 2023-03-01 07:00:02.800 2.4
1078
+ 2023-03-01 07:00:03.200 2.6
1079
+ 2023-03-01 07:00:03.600 2.8
1080
+ 2023-03-01 07:00:04.000 3.0
1081
+ Freq: 400ms, dtype: float64
1082
+
1083
+ Note that the series erroneously increases between two anchors
1084
+ ``07:00:00`` and ``07:00:02``.
1085
+ """
1086
+ assert downcast is lib.no_default # just checking coverage
1087
+ result = self._upsample("asfreq")
1088
+ return result.interpolate(
1089
+ method=method,
1090
+ axis=axis,
1091
+ limit=limit,
1092
+ inplace=inplace,
1093
+ limit_direction=limit_direction,
1094
+ limit_area=limit_area,
1095
+ downcast=downcast,
1096
+ **kwargs,
1097
+ )
1098
+
1099
+ @final
1100
+ def asfreq(self, fill_value=None):
1101
+ """
1102
+ Return the values at the new freq, essentially a reindex.
1103
+
1104
+ Parameters
1105
+ ----------
1106
+ fill_value : scalar, optional
1107
+ Value to use for missing values, applied during upsampling (note
1108
+ this does not fill NaNs that already were present).
1109
+
1110
+ Returns
1111
+ -------
1112
+ DataFrame or Series
1113
+ Values at the specified freq.
1114
+
1115
+ See Also
1116
+ --------
1117
+ Series.asfreq: Convert TimeSeries to specified frequency.
1118
+ DataFrame.asfreq: Convert TimeSeries to specified frequency.
1119
+
1120
+ Examples
1121
+ --------
1122
+
1123
+ >>> ser = pd.Series([1, 2, 3, 4], index=pd.DatetimeIndex(
1124
+ ... ['2023-01-01', '2023-01-31', '2023-02-01', '2023-02-28']))
1125
+ >>> ser
1126
+ 2023-01-01 1
1127
+ 2023-01-31 2
1128
+ 2023-02-01 3
1129
+ 2023-02-28 4
1130
+ dtype: int64
1131
+ >>> ser.resample('MS').asfreq()
1132
+ 2023-01-01 1
1133
+ 2023-02-01 3
1134
+ Freq: MS, dtype: int64
1135
+ """
1136
+ return self._upsample("asfreq", fill_value=fill_value)
1137
+
1138
+ @final
1139
+ def sum(
1140
+ self,
1141
+ numeric_only: bool = False,
1142
+ min_count: int = 0,
1143
+ *args,
1144
+ **kwargs,
1145
+ ):
1146
+ """
1147
+ Compute sum of group values.
1148
+
1149
+ Parameters
1150
+ ----------
1151
+ numeric_only : bool, default False
1152
+ Include only float, int, boolean columns.
1153
+
1154
+ .. versionchanged:: 2.0.0
1155
+
1156
+ numeric_only no longer accepts ``None``.
1157
+
1158
+ min_count : int, default 0
1159
+ The required number of valid values to perform the operation. If fewer
1160
+ than ``min_count`` non-NA values are present the result will be NA.
1161
+
1162
+ Returns
1163
+ -------
1164
+ Series or DataFrame
1165
+ Computed sum of values within each group.
1166
+
1167
+ Examples
1168
+ --------
1169
+ >>> ser = pd.Series([1, 2, 3, 4], index=pd.DatetimeIndex(
1170
+ ... ['2023-01-01', '2023-01-15', '2023-02-01', '2023-02-15']))
1171
+ >>> ser
1172
+ 2023-01-01 1
1173
+ 2023-01-15 2
1174
+ 2023-02-01 3
1175
+ 2023-02-15 4
1176
+ dtype: int64
1177
+ >>> ser.resample('MS').sum()
1178
+ 2023-01-01 3
1179
+ 2023-02-01 7
1180
+ Freq: MS, dtype: int64
1181
+ """
1182
+ maybe_warn_args_and_kwargs(type(self), "sum", args, kwargs)
1183
+ nv.validate_resampler_func("sum", args, kwargs)
1184
+ return self._downsample("sum", numeric_only=numeric_only, min_count=min_count)
1185
+
1186
+ @final
1187
+ def prod(
1188
+ self,
1189
+ numeric_only: bool = False,
1190
+ min_count: int = 0,
1191
+ *args,
1192
+ **kwargs,
1193
+ ):
1194
+ """
1195
+ Compute prod of group values.
1196
+
1197
+ Parameters
1198
+ ----------
1199
+ numeric_only : bool, default False
1200
+ Include only float, int, boolean columns.
1201
+
1202
+ .. versionchanged:: 2.0.0
1203
+
1204
+ numeric_only no longer accepts ``None``.
1205
+
1206
+ min_count : int, default 0
1207
+ The required number of valid values to perform the operation. If fewer
1208
+ than ``min_count`` non-NA values are present the result will be NA.
1209
+
1210
+ Returns
1211
+ -------
1212
+ Series or DataFrame
1213
+ Computed prod of values within each group.
1214
+
1215
+ Examples
1216
+ --------
1217
+ >>> ser = pd.Series([1, 2, 3, 4], index=pd.DatetimeIndex(
1218
+ ... ['2023-01-01', '2023-01-15', '2023-02-01', '2023-02-15']))
1219
+ >>> ser
1220
+ 2023-01-01 1
1221
+ 2023-01-15 2
1222
+ 2023-02-01 3
1223
+ 2023-02-15 4
1224
+ dtype: int64
1225
+ >>> ser.resample('MS').prod()
1226
+ 2023-01-01 2
1227
+ 2023-02-01 12
1228
+ Freq: MS, dtype: int64
1229
+ """
1230
+ maybe_warn_args_and_kwargs(type(self), "prod", args, kwargs)
1231
+ nv.validate_resampler_func("prod", args, kwargs)
1232
+ return self._downsample("prod", numeric_only=numeric_only, min_count=min_count)
1233
+
1234
+ @final
1235
+ def min(
1236
+ self,
1237
+ numeric_only: bool = False,
1238
+ min_count: int = 0,
1239
+ *args,
1240
+ **kwargs,
1241
+ ):
1242
+ """
1243
+ Compute min value of group.
1244
+
1245
+ Returns
1246
+ -------
1247
+ Series or DataFrame
1248
+
1249
+ Examples
1250
+ --------
1251
+ >>> ser = pd.Series([1, 2, 3, 4], index=pd.DatetimeIndex(
1252
+ ... ['2023-01-01', '2023-01-15', '2023-02-01', '2023-02-15']))
1253
+ >>> ser
1254
+ 2023-01-01 1
1255
+ 2023-01-15 2
1256
+ 2023-02-01 3
1257
+ 2023-02-15 4
1258
+ dtype: int64
1259
+ >>> ser.resample('MS').min()
1260
+ 2023-01-01 1
1261
+ 2023-02-01 3
1262
+ Freq: MS, dtype: int64
1263
+ """
1264
+
1265
+ maybe_warn_args_and_kwargs(type(self), "min", args, kwargs)
1266
+ nv.validate_resampler_func("min", args, kwargs)
1267
+ return self._downsample("min", numeric_only=numeric_only, min_count=min_count)
1268
+
1269
+ @final
1270
+ def max(
1271
+ self,
1272
+ numeric_only: bool = False,
1273
+ min_count: int = 0,
1274
+ *args,
1275
+ **kwargs,
1276
+ ):
1277
+ """
1278
+ Compute max value of group.
1279
+
1280
+ Returns
1281
+ -------
1282
+ Series or DataFrame
1283
+
1284
+ Examples
1285
+ --------
1286
+ >>> ser = pd.Series([1, 2, 3, 4], index=pd.DatetimeIndex(
1287
+ ... ['2023-01-01', '2023-01-15', '2023-02-01', '2023-02-15']))
1288
+ >>> ser
1289
+ 2023-01-01 1
1290
+ 2023-01-15 2
1291
+ 2023-02-01 3
1292
+ 2023-02-15 4
1293
+ dtype: int64
1294
+ >>> ser.resample('MS').max()
1295
+ 2023-01-01 2
1296
+ 2023-02-01 4
1297
+ Freq: MS, dtype: int64
1298
+ """
1299
+ maybe_warn_args_and_kwargs(type(self), "max", args, kwargs)
1300
+ nv.validate_resampler_func("max", args, kwargs)
1301
+ return self._downsample("max", numeric_only=numeric_only, min_count=min_count)
1302
+
1303
+ @final
1304
+ @doc(GroupBy.first)
1305
+ def first(
1306
+ self,
1307
+ numeric_only: bool = False,
1308
+ min_count: int = 0,
1309
+ skipna: bool = True,
1310
+ *args,
1311
+ **kwargs,
1312
+ ):
1313
+ maybe_warn_args_and_kwargs(type(self), "first", args, kwargs)
1314
+ nv.validate_resampler_func("first", args, kwargs)
1315
+ return self._downsample(
1316
+ "first", numeric_only=numeric_only, min_count=min_count, skipna=skipna
1317
+ )
1318
+
1319
+ @final
1320
+ @doc(GroupBy.last)
1321
+ def last(
1322
+ self,
1323
+ numeric_only: bool = False,
1324
+ min_count: int = 0,
1325
+ skipna: bool = True,
1326
+ *args,
1327
+ **kwargs,
1328
+ ):
1329
+ maybe_warn_args_and_kwargs(type(self), "last", args, kwargs)
1330
+ nv.validate_resampler_func("last", args, kwargs)
1331
+ return self._downsample(
1332
+ "last", numeric_only=numeric_only, min_count=min_count, skipna=skipna
1333
+ )
1334
+
1335
+ @final
1336
+ @doc(GroupBy.median)
1337
+ def median(self, numeric_only: bool = False, *args, **kwargs):
1338
+ maybe_warn_args_and_kwargs(type(self), "median", args, kwargs)
1339
+ nv.validate_resampler_func("median", args, kwargs)
1340
+ return self._downsample("median", numeric_only=numeric_only)
1341
+
1342
+ @final
1343
+ def mean(
1344
+ self,
1345
+ numeric_only: bool = False,
1346
+ *args,
1347
+ **kwargs,
1348
+ ):
1349
+ """
1350
+ Compute mean of groups, excluding missing values.
1351
+
1352
+ Parameters
1353
+ ----------
1354
+ numeric_only : bool, default False
1355
+ Include only `float`, `int` or `boolean` data.
1356
+
1357
+ .. versionchanged:: 2.0.0
1358
+
1359
+ numeric_only now defaults to ``False``.
1360
+
1361
+ Returns
1362
+ -------
1363
+ DataFrame or Series
1364
+ Mean of values within each group.
1365
+
1366
+ Examples
1367
+ --------
1368
+
1369
+ >>> ser = pd.Series([1, 2, 3, 4], index=pd.DatetimeIndex(
1370
+ ... ['2023-01-01', '2023-01-15', '2023-02-01', '2023-02-15']))
1371
+ >>> ser
1372
+ 2023-01-01 1
1373
+ 2023-01-15 2
1374
+ 2023-02-01 3
1375
+ 2023-02-15 4
1376
+ dtype: int64
1377
+ >>> ser.resample('MS').mean()
1378
+ 2023-01-01 1.5
1379
+ 2023-02-01 3.5
1380
+ Freq: MS, dtype: float64
1381
+ """
1382
+ maybe_warn_args_and_kwargs(type(self), "mean", args, kwargs)
1383
+ nv.validate_resampler_func("mean", args, kwargs)
1384
+ return self._downsample("mean", numeric_only=numeric_only)
1385
+
1386
+ @final
1387
+ def std(
1388
+ self,
1389
+ ddof: int = 1,
1390
+ numeric_only: bool = False,
1391
+ *args,
1392
+ **kwargs,
1393
+ ):
1394
+ """
1395
+ Compute standard deviation of groups, excluding missing values.
1396
+
1397
+ Parameters
1398
+ ----------
1399
+ ddof : int, default 1
1400
+ Degrees of freedom.
1401
+ numeric_only : bool, default False
1402
+ Include only `float`, `int` or `boolean` data.
1403
+
1404
+ .. versionadded:: 1.5.0
1405
+
1406
+ .. versionchanged:: 2.0.0
1407
+
1408
+ numeric_only now defaults to ``False``.
1409
+
1410
+ Returns
1411
+ -------
1412
+ DataFrame or Series
1413
+ Standard deviation of values within each group.
1414
+
1415
+ Examples
1416
+ --------
1417
+
1418
+ >>> ser = pd.Series([1, 3, 2, 4, 3, 8],
1419
+ ... index=pd.DatetimeIndex(['2023-01-01',
1420
+ ... '2023-01-10',
1421
+ ... '2023-01-15',
1422
+ ... '2023-02-01',
1423
+ ... '2023-02-10',
1424
+ ... '2023-02-15']))
1425
+ >>> ser.resample('MS').std()
1426
+ 2023-01-01 1.000000
1427
+ 2023-02-01 2.645751
1428
+ Freq: MS, dtype: float64
1429
+ """
1430
+ maybe_warn_args_and_kwargs(type(self), "std", args, kwargs)
1431
+ nv.validate_resampler_func("std", args, kwargs)
1432
+ return self._downsample("std", ddof=ddof, numeric_only=numeric_only)
1433
+
1434
+ @final
1435
+ def var(
1436
+ self,
1437
+ ddof: int = 1,
1438
+ numeric_only: bool = False,
1439
+ *args,
1440
+ **kwargs,
1441
+ ):
1442
+ """
1443
+ Compute variance of groups, excluding missing values.
1444
+
1445
+ Parameters
1446
+ ----------
1447
+ ddof : int, default 1
1448
+ Degrees of freedom.
1449
+
1450
+ numeric_only : bool, default False
1451
+ Include only `float`, `int` or `boolean` data.
1452
+
1453
+ .. versionadded:: 1.5.0
1454
+
1455
+ .. versionchanged:: 2.0.0
1456
+
1457
+ numeric_only now defaults to ``False``.
1458
+
1459
+ Returns
1460
+ -------
1461
+ DataFrame or Series
1462
+ Variance of values within each group.
1463
+
1464
+ Examples
1465
+ --------
1466
+
1467
+ >>> ser = pd.Series([1, 3, 2, 4, 3, 8],
1468
+ ... index=pd.DatetimeIndex(['2023-01-01',
1469
+ ... '2023-01-10',
1470
+ ... '2023-01-15',
1471
+ ... '2023-02-01',
1472
+ ... '2023-02-10',
1473
+ ... '2023-02-15']))
1474
+ >>> ser.resample('MS').var()
1475
+ 2023-01-01 1.0
1476
+ 2023-02-01 7.0
1477
+ Freq: MS, dtype: float64
1478
+
1479
+ >>> ser.resample('MS').var(ddof=0)
1480
+ 2023-01-01 0.666667
1481
+ 2023-02-01 4.666667
1482
+ Freq: MS, dtype: float64
1483
+ """
1484
+ maybe_warn_args_and_kwargs(type(self), "var", args, kwargs)
1485
+ nv.validate_resampler_func("var", args, kwargs)
1486
+ return self._downsample("var", ddof=ddof, numeric_only=numeric_only)
1487
+
1488
+ @final
1489
+ @doc(GroupBy.sem)
1490
+ def sem(
1491
+ self,
1492
+ ddof: int = 1,
1493
+ numeric_only: bool = False,
1494
+ *args,
1495
+ **kwargs,
1496
+ ):
1497
+ maybe_warn_args_and_kwargs(type(self), "sem", args, kwargs)
1498
+ nv.validate_resampler_func("sem", args, kwargs)
1499
+ return self._downsample("sem", ddof=ddof, numeric_only=numeric_only)
1500
+
1501
+ @final
1502
+ @doc(GroupBy.ohlc)
1503
+ def ohlc(
1504
+ self,
1505
+ *args,
1506
+ **kwargs,
1507
+ ):
1508
+ maybe_warn_args_and_kwargs(type(self), "ohlc", args, kwargs)
1509
+ nv.validate_resampler_func("ohlc", args, kwargs)
1510
+
1511
+ ax = self.ax
1512
+ obj = self._obj_with_exclusions
1513
+ if len(ax) == 0:
1514
+ # GH#42902
1515
+ obj = obj.copy()
1516
+ obj.index = _asfreq_compat(obj.index, self.freq)
1517
+ if obj.ndim == 1:
1518
+ obj = obj.to_frame()
1519
+ obj = obj.reindex(["open", "high", "low", "close"], axis=1)
1520
+ else:
1521
+ mi = MultiIndex.from_product(
1522
+ [obj.columns, ["open", "high", "low", "close"]]
1523
+ )
1524
+ obj = obj.reindex(mi, axis=1)
1525
+ return obj
1526
+
1527
+ return self._downsample("ohlc")
1528
+
1529
+ @final
1530
+ @doc(SeriesGroupBy.nunique)
1531
+ def nunique(
1532
+ self,
1533
+ *args,
1534
+ **kwargs,
1535
+ ):
1536
+ maybe_warn_args_and_kwargs(type(self), "nunique", args, kwargs)
1537
+ nv.validate_resampler_func("nunique", args, kwargs)
1538
+ return self._downsample("nunique")
1539
+
1540
+ @final
1541
+ @doc(GroupBy.size)
1542
+ def size(self):
1543
+ result = self._downsample("size")
1544
+
1545
+ # If the result is a non-empty DataFrame we stack to get a Series
1546
+ # GH 46826
1547
+ if isinstance(result, ABCDataFrame) and not result.empty:
1548
+ result = result.stack(future_stack=True)
1549
+
1550
+ if not len(self.ax):
1551
+ from pandas import Series
1552
+
1553
+ if self._selected_obj.ndim == 1:
1554
+ name = self._selected_obj.name
1555
+ else:
1556
+ name = None
1557
+ result = Series([], index=result.index, dtype="int64", name=name)
1558
+ return result
1559
+
1560
+ @final
1561
+ @doc(GroupBy.count)
1562
+ def count(self):
1563
+ result = self._downsample("count")
1564
+ if not len(self.ax):
1565
+ if self._selected_obj.ndim == 1:
1566
+ result = type(self._selected_obj)(
1567
+ [], index=result.index, dtype="int64", name=self._selected_obj.name
1568
+ )
1569
+ else:
1570
+ from pandas import DataFrame
1571
+
1572
+ result = DataFrame(
1573
+ [], index=result.index, columns=result.columns, dtype="int64"
1574
+ )
1575
+
1576
+ return result
1577
+
1578
+ @final
1579
+ def quantile(self, q: float | list[float] | AnyArrayLike = 0.5, **kwargs):
1580
+ """
1581
+ Return value at the given quantile.
1582
+
1583
+ Parameters
1584
+ ----------
1585
+ q : float or array-like, default 0.5 (50% quantile)
1586
+
1587
+ Returns
1588
+ -------
1589
+ DataFrame or Series
1590
+ Quantile of values within each group.
1591
+
1592
+ See Also
1593
+ --------
1594
+ Series.quantile
1595
+ Return a series, where the index is q and the values are the quantiles.
1596
+ DataFrame.quantile
1597
+ Return a DataFrame, where the columns are the columns of self,
1598
+ and the values are the quantiles.
1599
+ DataFrameGroupBy.quantile
1600
+ Return a DataFrame, where the columns are groupby columns,
1601
+ and the values are its quantiles.
1602
+
1603
+ Examples
1604
+ --------
1605
+
1606
+ >>> ser = pd.Series([1, 3, 2, 4, 3, 8],
1607
+ ... index=pd.DatetimeIndex(['2023-01-01',
1608
+ ... '2023-01-10',
1609
+ ... '2023-01-15',
1610
+ ... '2023-02-01',
1611
+ ... '2023-02-10',
1612
+ ... '2023-02-15']))
1613
+ >>> ser.resample('MS').quantile()
1614
+ 2023-01-01 2.0
1615
+ 2023-02-01 4.0
1616
+ Freq: MS, dtype: float64
1617
+
1618
+ >>> ser.resample('MS').quantile(.25)
1619
+ 2023-01-01 1.5
1620
+ 2023-02-01 3.5
1621
+ Freq: MS, dtype: float64
1622
+ """
1623
+ return self._downsample("quantile", q=q, **kwargs)
1624
+
1625
+
1626
+ class _GroupByMixin(PandasObject, SelectionMixin):
1627
+ """
1628
+ Provide the groupby facilities.
1629
+ """
1630
+
1631
+ _attributes: list[str] # in practice the same as Resampler._attributes
1632
+ _selection: IndexLabel | None = None
1633
+ _groupby: GroupBy
1634
+ _timegrouper: TimeGrouper
1635
+
1636
+ def __init__(
1637
+ self,
1638
+ *,
1639
+ parent: Resampler,
1640
+ groupby: GroupBy,
1641
+ key=None,
1642
+ selection: IndexLabel | None = None,
1643
+ include_groups: bool = False,
1644
+ ) -> None:
1645
+ # reached via ._gotitem and _get_resampler_for_grouping
1646
+
1647
+ assert isinstance(groupby, GroupBy), type(groupby)
1648
+
1649
+ # parent is always a Resampler, sometimes a _GroupByMixin
1650
+ assert isinstance(parent, Resampler), type(parent)
1651
+
1652
+ # initialize our GroupByMixin object with
1653
+ # the resampler attributes
1654
+ for attr in self._attributes:
1655
+ setattr(self, attr, getattr(parent, attr))
1656
+ self._selection = selection
1657
+
1658
+ self.binner = parent.binner
1659
+ self.key = key
1660
+
1661
+ self._groupby = groupby
1662
+ self._timegrouper = copy.copy(parent._timegrouper)
1663
+
1664
+ self.ax = parent.ax
1665
+ self.obj = parent.obj
1666
+ self.include_groups = include_groups
1667
+
1668
+ @no_type_check
1669
+ def _apply(self, f, *args, **kwargs):
1670
+ """
1671
+ Dispatch to _upsample; we are stripping all of the _upsample kwargs and
1672
+ performing the original function call on the grouped object.
1673
+ """
1674
+
1675
+ def func(x):
1676
+ x = self._resampler_cls(x, timegrouper=self._timegrouper, gpr_index=self.ax)
1677
+
1678
+ if isinstance(f, str):
1679
+ return getattr(x, f)(**kwargs)
1680
+
1681
+ return x.apply(f, *args, **kwargs)
1682
+
1683
+ result = _apply(self._groupby, func, include_groups=self.include_groups)
1684
+ return self._wrap_result(result)
1685
+
1686
+ _upsample = _apply
1687
+ _downsample = _apply
1688
+ _groupby_and_aggregate = _apply
1689
+
1690
+ @final
1691
+ def _gotitem(self, key, ndim, subset=None):
1692
+ """
1693
+ Sub-classes to define. Return a sliced object.
1694
+
1695
+ Parameters
1696
+ ----------
1697
+ key : string / list of selections
1698
+ ndim : {1, 2}
1699
+ requested ndim of result
1700
+ subset : object, default None
1701
+ subset to act on
1702
+ """
1703
+ # create a new object to prevent aliasing
1704
+ if subset is None:
1705
+ subset = self.obj
1706
+ if key is not None:
1707
+ subset = subset[key]
1708
+ else:
1709
+ # reached via Apply.agg_dict_like with selection=None, ndim=1
1710
+ assert subset.ndim == 1
1711
+
1712
+ # Try to select from a DataFrame, falling back to a Series
1713
+ try:
1714
+ if isinstance(key, list) and self.key not in key and self.key is not None:
1715
+ key.append(self.key)
1716
+ groupby = self._groupby[key]
1717
+ except IndexError:
1718
+ groupby = self._groupby
1719
+
1720
+ selection = self._infer_selection(key, subset)
1721
+
1722
+ new_rs = type(self)(
1723
+ groupby=groupby,
1724
+ parent=cast(Resampler, self),
1725
+ selection=selection,
1726
+ )
1727
+ return new_rs
1728
+
1729
+
1730
+ class DatetimeIndexResampler(Resampler):
1731
+ ax: DatetimeIndex
1732
+
1733
+ @property
1734
+ def _resampler_for_grouping(self):
1735
+ return DatetimeIndexResamplerGroupby
1736
+
1737
+ def _get_binner_for_time(self):
1738
+ # this is how we are actually creating the bins
1739
+ if self.kind == "period":
1740
+ return self._timegrouper._get_time_period_bins(self.ax)
1741
+ return self._timegrouper._get_time_bins(self.ax)
1742
+
1743
+ def _downsample(self, how, **kwargs):
1744
+ """
1745
+ Downsample the cython defined function.
1746
+
1747
+ Parameters
1748
+ ----------
1749
+ how : string / cython mapped function
1750
+ **kwargs : kw args passed to how function
1751
+ """
1752
+ orig_how = how
1753
+ how = com.get_cython_func(how) or how
1754
+ if orig_how != how:
1755
+ warn_alias_replacement(self, orig_how, how)
1756
+ ax = self.ax
1757
+
1758
+ # Excludes `on` column when provided
1759
+ obj = self._obj_with_exclusions
1760
+
1761
+ if not len(ax):
1762
+ # reset to the new freq
1763
+ obj = obj.copy()
1764
+ obj.index = obj.index._with_freq(self.freq)
1765
+ assert obj.index.freq == self.freq, (obj.index.freq, self.freq)
1766
+ return obj
1767
+
1768
+ # do we have a regular frequency
1769
+
1770
+ # error: Item "None" of "Optional[Any]" has no attribute "binlabels"
1771
+ if (
1772
+ (ax.freq is not None or ax.inferred_freq is not None)
1773
+ and len(self._grouper.binlabels) > len(ax)
1774
+ and how is None
1775
+ ):
1776
+ # let's do an asfreq
1777
+ return self.asfreq()
1778
+
1779
+ # we are downsampling
1780
+ # we want to call the actual grouper method here
1781
+ if self.axis == 0:
1782
+ result = obj.groupby(self._grouper).aggregate(how, **kwargs)
1783
+ else:
1784
+ # test_resample_axis1
1785
+ result = obj.T.groupby(self._grouper).aggregate(how, **kwargs).T
1786
+
1787
+ return self._wrap_result(result)
1788
+
1789
+ def _adjust_binner_for_upsample(self, binner):
1790
+ """
1791
+ Adjust our binner when upsampling.
1792
+
1793
+ The range of a new index should not be outside specified range
1794
+ """
1795
+ if self.closed == "right":
1796
+ binner = binner[1:]
1797
+ else:
1798
+ binner = binner[:-1]
1799
+ return binner
1800
+
1801
+ def _upsample(self, method, limit: int | None = None, fill_value=None):
1802
+ """
1803
+ Parameters
1804
+ ----------
1805
+ method : string {'backfill', 'bfill', 'pad',
1806
+ 'ffill', 'asfreq'} method for upsampling
1807
+ limit : int, default None
1808
+ Maximum size gap to fill when reindexing
1809
+ fill_value : scalar, default None
1810
+ Value to use for missing values
1811
+
1812
+ See Also
1813
+ --------
1814
+ .fillna: Fill NA/NaN values using the specified method.
1815
+
1816
+ """
1817
+ if self.axis:
1818
+ raise AssertionError("axis must be 0")
1819
+ if self._from_selection:
1820
+ raise ValueError(
1821
+ "Upsampling from level= or on= selection "
1822
+ "is not supported, use .set_index(...) "
1823
+ "to explicitly set index to datetime-like"
1824
+ )
1825
+
1826
+ ax = self.ax
1827
+ obj = self._selected_obj
1828
+ binner = self.binner
1829
+ res_index = self._adjust_binner_for_upsample(binner)
1830
+
1831
+ # if we have the same frequency as our axis, then we are equal sampling
1832
+ if (
1833
+ limit is None
1834
+ and to_offset(ax.inferred_freq) == self.freq
1835
+ and len(obj) == len(res_index)
1836
+ ):
1837
+ result = obj.copy()
1838
+ result.index = res_index
1839
+ else:
1840
+ if method == "asfreq":
1841
+ method = None
1842
+ result = obj.reindex(
1843
+ res_index, method=method, limit=limit, fill_value=fill_value
1844
+ )
1845
+
1846
+ return self._wrap_result(result)
1847
+
1848
+ def _wrap_result(self, result):
1849
+ result = super()._wrap_result(result)
1850
+
1851
+ # we may have a different kind that we were asked originally
1852
+ # convert if needed
1853
+ if self.kind == "period" and not isinstance(result.index, PeriodIndex):
1854
+ if isinstance(result.index, MultiIndex):
1855
+ # GH 24103 - e.g. groupby resample
1856
+ if not isinstance(result.index.levels[-1], PeriodIndex):
1857
+ new_level = result.index.levels[-1].to_period(self.freq)
1858
+ result.index = result.index.set_levels(new_level, level=-1)
1859
+ else:
1860
+ result.index = result.index.to_period(self.freq)
1861
+ return result
1862
+
1863
+
1864
+ # error: Definition of "ax" in base class "_GroupByMixin" is incompatible
1865
+ # with definition in base class "DatetimeIndexResampler"
1866
+ class DatetimeIndexResamplerGroupby( # type: ignore[misc]
1867
+ _GroupByMixin, DatetimeIndexResampler
1868
+ ):
1869
+ """
1870
+ Provides a resample of a groupby implementation
1871
+ """
1872
+
1873
+ @property
1874
+ def _resampler_cls(self):
1875
+ return DatetimeIndexResampler
1876
+
1877
+
1878
+ class PeriodIndexResampler(DatetimeIndexResampler):
1879
+ # error: Incompatible types in assignment (expression has type "PeriodIndex", base
1880
+ # class "DatetimeIndexResampler" defined the type as "DatetimeIndex")
1881
+ ax: PeriodIndex # type: ignore[assignment]
1882
+
1883
+ @property
1884
+ def _resampler_for_grouping(self):
1885
+ warnings.warn(
1886
+ "Resampling a groupby with a PeriodIndex is deprecated. "
1887
+ "Cast to DatetimeIndex before resampling instead.",
1888
+ FutureWarning,
1889
+ stacklevel=find_stack_level(),
1890
+ )
1891
+ return PeriodIndexResamplerGroupby
1892
+
1893
+ def _get_binner_for_time(self):
1894
+ if self.kind == "timestamp":
1895
+ return super()._get_binner_for_time()
1896
+ return self._timegrouper._get_period_bins(self.ax)
1897
+
1898
+ def _convert_obj(self, obj: NDFrameT) -> NDFrameT:
1899
+ obj = super()._convert_obj(obj)
1900
+
1901
+ if self._from_selection:
1902
+ # see GH 14008, GH 12871
1903
+ msg = (
1904
+ "Resampling from level= or on= selection "
1905
+ "with a PeriodIndex is not currently supported, "
1906
+ "use .set_index(...) to explicitly set index"
1907
+ )
1908
+ raise NotImplementedError(msg)
1909
+
1910
+ # convert to timestamp
1911
+ if self.kind == "timestamp":
1912
+ obj = obj.to_timestamp(how=self.convention)
1913
+
1914
+ return obj
1915
+
1916
+ def _downsample(self, how, **kwargs):
1917
+ """
1918
+ Downsample the cython defined function.
1919
+
1920
+ Parameters
1921
+ ----------
1922
+ how : string / cython mapped function
1923
+ **kwargs : kw args passed to how function
1924
+ """
1925
+ # we may need to actually resample as if we are timestamps
1926
+ if self.kind == "timestamp":
1927
+ return super()._downsample(how, **kwargs)
1928
+
1929
+ orig_how = how
1930
+ how = com.get_cython_func(how) or how
1931
+ if orig_how != how:
1932
+ warn_alias_replacement(self, orig_how, how)
1933
+ ax = self.ax
1934
+
1935
+ if is_subperiod(ax.freq, self.freq):
1936
+ # Downsampling
1937
+ return self._groupby_and_aggregate(how, **kwargs)
1938
+ elif is_superperiod(ax.freq, self.freq):
1939
+ if how == "ohlc":
1940
+ # GH #13083
1941
+ # upsampling to subperiods is handled as an asfreq, which works
1942
+ # for pure aggregating/reducing methods
1943
+ # OHLC reduces along the time dimension, but creates multiple
1944
+ # values for each period -> handle by _groupby_and_aggregate()
1945
+ return self._groupby_and_aggregate(how)
1946
+ return self.asfreq()
1947
+ elif ax.freq == self.freq:
1948
+ return self.asfreq()
1949
+
1950
+ raise IncompatibleFrequency(
1951
+ f"Frequency {ax.freq} cannot be resampled to {self.freq}, "
1952
+ "as they are not sub or super periods"
1953
+ )
1954
+
1955
+ def _upsample(self, method, limit: int | None = None, fill_value=None):
1956
+ """
1957
+ Parameters
1958
+ ----------
1959
+ method : {'backfill', 'bfill', 'pad', 'ffill'}
1960
+ Method for upsampling.
1961
+ limit : int, default None
1962
+ Maximum size gap to fill when reindexing.
1963
+ fill_value : scalar, default None
1964
+ Value to use for missing values.
1965
+
1966
+ See Also
1967
+ --------
1968
+ .fillna: Fill NA/NaN values using the specified method.
1969
+
1970
+ """
1971
+ # we may need to actually resample as if we are timestamps
1972
+ if self.kind == "timestamp":
1973
+ return super()._upsample(method, limit=limit, fill_value=fill_value)
1974
+
1975
+ ax = self.ax
1976
+ obj = self.obj
1977
+ new_index = self.binner
1978
+
1979
+ # Start vs. end of period
1980
+ memb = ax.asfreq(self.freq, how=self.convention)
1981
+
1982
+ # Get the fill indexer
1983
+ if method == "asfreq":
1984
+ method = None
1985
+ indexer = memb.get_indexer(new_index, method=method, limit=limit)
1986
+ new_obj = _take_new_index(
1987
+ obj,
1988
+ indexer,
1989
+ new_index,
1990
+ axis=self.axis,
1991
+ )
1992
+ return self._wrap_result(new_obj)
1993
+
1994
+
1995
+ # error: Definition of "ax" in base class "_GroupByMixin" is incompatible with
1996
+ # definition in base class "PeriodIndexResampler"
1997
+ class PeriodIndexResamplerGroupby( # type: ignore[misc]
1998
+ _GroupByMixin, PeriodIndexResampler
1999
+ ):
2000
+ """
2001
+ Provides a resample of a groupby implementation.
2002
+ """
2003
+
2004
+ @property
2005
+ def _resampler_cls(self):
2006
+ return PeriodIndexResampler
2007
+
2008
+
2009
+ class TimedeltaIndexResampler(DatetimeIndexResampler):
2010
+ # error: Incompatible types in assignment (expression has type "TimedeltaIndex",
2011
+ # base class "DatetimeIndexResampler" defined the type as "DatetimeIndex")
2012
+ ax: TimedeltaIndex # type: ignore[assignment]
2013
+
2014
+ @property
2015
+ def _resampler_for_grouping(self):
2016
+ return TimedeltaIndexResamplerGroupby
2017
+
2018
+ def _get_binner_for_time(self):
2019
+ return self._timegrouper._get_time_delta_bins(self.ax)
2020
+
2021
+ def _adjust_binner_for_upsample(self, binner):
2022
+ """
2023
+ Adjust our binner when upsampling.
2024
+
2025
+ The range of a new index is allowed to be greater than original range
2026
+ so we don't need to change the length of a binner, GH 13022
2027
+ """
2028
+ return binner
2029
+
2030
+
2031
+ # error: Definition of "ax" in base class "_GroupByMixin" is incompatible with
2032
+ # definition in base class "DatetimeIndexResampler"
2033
+ class TimedeltaIndexResamplerGroupby( # type: ignore[misc]
2034
+ _GroupByMixin, TimedeltaIndexResampler
2035
+ ):
2036
+ """
2037
+ Provides a resample of a groupby implementation.
2038
+ """
2039
+
2040
+ @property
2041
+ def _resampler_cls(self):
2042
+ return TimedeltaIndexResampler
2043
+
2044
+
2045
+ def get_resampler(obj: Series | DataFrame, kind=None, **kwds) -> Resampler:
2046
+ """
2047
+ Create a TimeGrouper and return our resampler.
2048
+ """
2049
+ tg = TimeGrouper(obj, **kwds) # type: ignore[arg-type]
2050
+ return tg._get_resampler(obj, kind=kind)
2051
+
2052
+
2053
+ get_resampler.__doc__ = Resampler.__doc__
2054
+
2055
+
2056
+ def get_resampler_for_grouping(
2057
+ groupby: GroupBy,
2058
+ rule,
2059
+ how=None,
2060
+ fill_method=None,
2061
+ limit: int | None = None,
2062
+ kind=None,
2063
+ on=None,
2064
+ include_groups: bool = True,
2065
+ **kwargs,
2066
+ ) -> Resampler:
2067
+ """
2068
+ Return our appropriate resampler when grouping as well.
2069
+ """
2070
+ # .resample uses 'on' similar to how .groupby uses 'key'
2071
+ tg = TimeGrouper(freq=rule, key=on, **kwargs)
2072
+ resampler = tg._get_resampler(groupby.obj, kind=kind)
2073
+ return resampler._get_resampler_for_grouping(
2074
+ groupby=groupby, include_groups=include_groups, key=tg.key
2075
+ )
2076
+
2077
+
2078
+ class TimeGrouper(Grouper):
2079
+ """
2080
+ Custom groupby class for time-interval grouping.
2081
+
2082
+ Parameters
2083
+ ----------
2084
+ freq : pandas date offset or offset alias for identifying bin edges
2085
+ closed : closed end of interval; 'left' or 'right'
2086
+ label : interval boundary to use for labeling; 'left' or 'right'
2087
+ convention : {'start', 'end', 'e', 's'}
2088
+ If axis is PeriodIndex
2089
+ """
2090
+
2091
+ _attributes = Grouper._attributes + (
2092
+ "closed",
2093
+ "label",
2094
+ "how",
2095
+ "kind",
2096
+ "convention",
2097
+ "origin",
2098
+ "offset",
2099
+ )
2100
+
2101
+ origin: TimeGrouperOrigin
2102
+
2103
+ def __init__(
2104
+ self,
2105
+ obj: Grouper | None = None,
2106
+ freq: Frequency = "Min",
2107
+ key: str | None = None,
2108
+ closed: Literal["left", "right"] | None = None,
2109
+ label: Literal["left", "right"] | None = None,
2110
+ how: str = "mean",
2111
+ axis: Axis = 0,
2112
+ fill_method=None,
2113
+ limit: int | None = None,
2114
+ kind: str | None = None,
2115
+ convention: Literal["start", "end", "e", "s"] | None = None,
2116
+ origin: Literal["epoch", "start", "start_day", "end", "end_day"]
2117
+ | TimestampConvertibleTypes = "start_day",
2118
+ offset: TimedeltaConvertibleTypes | None = None,
2119
+ group_keys: bool = False,
2120
+ **kwargs,
2121
+ ) -> None:
2122
+ # Check for correctness of the keyword arguments which would
2123
+ # otherwise silently use the default if misspelled
2124
+ if label not in {None, "left", "right"}:
2125
+ raise ValueError(f"Unsupported value {label} for `label`")
2126
+ if closed not in {None, "left", "right"}:
2127
+ raise ValueError(f"Unsupported value {closed} for `closed`")
2128
+ if convention not in {None, "start", "end", "e", "s"}:
2129
+ raise ValueError(f"Unsupported value {convention} for `convention`")
2130
+
2131
+ if (
2132
+ key is None
2133
+ and obj is not None
2134
+ and isinstance(obj.index, PeriodIndex) # type: ignore[attr-defined]
2135
+ or (
2136
+ key is not None
2137
+ and obj is not None
2138
+ and getattr(obj[key], "dtype", None) == "period" # type: ignore[index]
2139
+ )
2140
+ ):
2141
+ freq = to_offset(freq, is_period=True)
2142
+ else:
2143
+ freq = to_offset(freq)
2144
+
2145
+ end_types = {"ME", "YE", "QE", "BME", "BYE", "BQE", "W"}
2146
+ rule = freq.rule_code
2147
+ if rule in end_types or ("-" in rule and rule[: rule.find("-")] in end_types):
2148
+ if closed is None:
2149
+ closed = "right"
2150
+ if label is None:
2151
+ label = "right"
2152
+ else:
2153
+ # The backward resample sets ``closed`` to ``'right'`` by default
2154
+ # since the last value should be considered as the edge point for
2155
+ # the last bin. When origin in "end" or "end_day", the value for a
2156
+ # specific ``Timestamp`` index stands for the resample result from
2157
+ # the current ``Timestamp`` minus ``freq`` to the current
2158
+ # ``Timestamp`` with a right close.
2159
+ if origin in ["end", "end_day"]:
2160
+ if closed is None:
2161
+ closed = "right"
2162
+ if label is None:
2163
+ label = "right"
2164
+ else:
2165
+ if closed is None:
2166
+ closed = "left"
2167
+ if label is None:
2168
+ label = "left"
2169
+
2170
+ self.closed = closed
2171
+ self.label = label
2172
+ self.kind = kind
2173
+ self.convention = convention if convention is not None else "e"
2174
+ self.how = how
2175
+ self.fill_method = fill_method
2176
+ self.limit = limit
2177
+ self.group_keys = group_keys
2178
+ self._arrow_dtype: ArrowDtype | None = None
2179
+
2180
+ if origin in ("epoch", "start", "start_day", "end", "end_day"):
2181
+ # error: Incompatible types in assignment (expression has type "Union[Union[
2182
+ # Timestamp, datetime, datetime64, signedinteger[_64Bit], float, str],
2183
+ # Literal['epoch', 'start', 'start_day', 'end', 'end_day']]", variable has
2184
+ # type "Union[Timestamp, Literal['epoch', 'start', 'start_day', 'end',
2185
+ # 'end_day']]")
2186
+ self.origin = origin # type: ignore[assignment]
2187
+ else:
2188
+ try:
2189
+ self.origin = Timestamp(origin)
2190
+ except (ValueError, TypeError) as err:
2191
+ raise ValueError(
2192
+ "'origin' should be equal to 'epoch', 'start', 'start_day', "
2193
+ "'end', 'end_day' or "
2194
+ f"should be a Timestamp convertible type. Got '{origin}' instead."
2195
+ ) from err
2196
+
2197
+ try:
2198
+ self.offset = Timedelta(offset) if offset is not None else None
2199
+ except (ValueError, TypeError) as err:
2200
+ raise ValueError(
2201
+ "'offset' should be a Timedelta convertible type. "
2202
+ f"Got '{offset}' instead."
2203
+ ) from err
2204
+
2205
+ # always sort time groupers
2206
+ kwargs["sort"] = True
2207
+
2208
+ super().__init__(freq=freq, key=key, axis=axis, **kwargs)
2209
+
2210
+ def _get_resampler(self, obj: NDFrame, kind=None) -> Resampler:
2211
+ """
2212
+ Return my resampler or raise if we have an invalid axis.
2213
+
2214
+ Parameters
2215
+ ----------
2216
+ obj : Series or DataFrame
2217
+ kind : string, optional
2218
+ 'period','timestamp','timedelta' are valid
2219
+
2220
+ Returns
2221
+ -------
2222
+ Resampler
2223
+
2224
+ Raises
2225
+ ------
2226
+ TypeError if incompatible axis
2227
+
2228
+ """
2229
+ _, ax, _ = self._set_grouper(obj, gpr_index=None)
2230
+ if isinstance(ax, DatetimeIndex):
2231
+ return DatetimeIndexResampler(
2232
+ obj,
2233
+ timegrouper=self,
2234
+ kind=kind,
2235
+ axis=self.axis,
2236
+ group_keys=self.group_keys,
2237
+ gpr_index=ax,
2238
+ )
2239
+ elif isinstance(ax, PeriodIndex) or kind == "period":
2240
+ if isinstance(ax, PeriodIndex):
2241
+ # GH#53481
2242
+ warnings.warn(
2243
+ "Resampling with a PeriodIndex is deprecated. "
2244
+ "Cast index to DatetimeIndex before resampling instead.",
2245
+ FutureWarning,
2246
+ stacklevel=find_stack_level(),
2247
+ )
2248
+ else:
2249
+ warnings.warn(
2250
+ "Resampling with kind='period' is deprecated. "
2251
+ "Use datetime paths instead.",
2252
+ FutureWarning,
2253
+ stacklevel=find_stack_level(),
2254
+ )
2255
+ return PeriodIndexResampler(
2256
+ obj,
2257
+ timegrouper=self,
2258
+ kind=kind,
2259
+ axis=self.axis,
2260
+ group_keys=self.group_keys,
2261
+ gpr_index=ax,
2262
+ )
2263
+ elif isinstance(ax, TimedeltaIndex):
2264
+ return TimedeltaIndexResampler(
2265
+ obj,
2266
+ timegrouper=self,
2267
+ axis=self.axis,
2268
+ group_keys=self.group_keys,
2269
+ gpr_index=ax,
2270
+ )
2271
+
2272
+ raise TypeError(
2273
+ "Only valid with DatetimeIndex, "
2274
+ "TimedeltaIndex or PeriodIndex, "
2275
+ f"but got an instance of '{type(ax).__name__}'"
2276
+ )
2277
+
2278
+ def _get_grouper(
2279
+ self, obj: NDFrameT, validate: bool = True
2280
+ ) -> tuple[BinGrouper, NDFrameT]:
2281
+ # create the resampler and return our binner
2282
+ r = self._get_resampler(obj)
2283
+ return r._grouper, cast(NDFrameT, r.obj)
2284
+
2285
+ def _get_time_bins(self, ax: DatetimeIndex):
2286
+ if not isinstance(ax, DatetimeIndex):
2287
+ raise TypeError(
2288
+ "axis must be a DatetimeIndex, but got "
2289
+ f"an instance of {type(ax).__name__}"
2290
+ )
2291
+
2292
+ if len(ax) == 0:
2293
+ binner = labels = DatetimeIndex(
2294
+ data=[], freq=self.freq, name=ax.name, dtype=ax.dtype
2295
+ )
2296
+ return binner, [], labels
2297
+
2298
+ first, last = _get_timestamp_range_edges(
2299
+ ax.min(),
2300
+ ax.max(),
2301
+ self.freq,
2302
+ unit=ax.unit,
2303
+ closed=self.closed,
2304
+ origin=self.origin,
2305
+ offset=self.offset,
2306
+ )
2307
+ # GH #12037
2308
+ # use first/last directly instead of call replace() on them
2309
+ # because replace() will swallow the nanosecond part
2310
+ # thus last bin maybe slightly before the end if the end contains
2311
+ # nanosecond part and lead to `Values falls after last bin` error
2312
+ # GH 25758: If DST lands at midnight (e.g. 'America/Havana'), user feedback
2313
+ # has noted that ambiguous=True provides the most sensible result
2314
+ binner = labels = date_range(
2315
+ freq=self.freq,
2316
+ start=first,
2317
+ end=last,
2318
+ tz=ax.tz,
2319
+ name=ax.name,
2320
+ ambiguous=True,
2321
+ nonexistent="shift_forward",
2322
+ unit=ax.unit,
2323
+ )
2324
+
2325
+ ax_values = ax.asi8
2326
+ binner, bin_edges = self._adjust_bin_edges(binner, ax_values)
2327
+
2328
+ # general version, knowing nothing about relative frequencies
2329
+ bins = lib.generate_bins_dt64(
2330
+ ax_values, bin_edges, self.closed, hasnans=ax.hasnans
2331
+ )
2332
+
2333
+ if self.closed == "right":
2334
+ labels = binner
2335
+ if self.label == "right":
2336
+ labels = labels[1:]
2337
+ elif self.label == "right":
2338
+ labels = labels[1:]
2339
+
2340
+ if ax.hasnans:
2341
+ binner = binner.insert(0, NaT)
2342
+ labels = labels.insert(0, NaT)
2343
+
2344
+ # if we end up with more labels than bins
2345
+ # adjust the labels
2346
+ # GH4076
2347
+ if len(bins) < len(labels):
2348
+ labels = labels[: len(bins)]
2349
+
2350
+ return binner, bins, labels
2351
+
2352
+ def _adjust_bin_edges(
2353
+ self, binner: DatetimeIndex, ax_values: npt.NDArray[np.int64]
2354
+ ) -> tuple[DatetimeIndex, npt.NDArray[np.int64]]:
2355
+ # Some hacks for > daily data, see #1471, #1458, #1483
2356
+
2357
+ if self.freq.name in ("BME", "ME", "W") or self.freq.name.split("-")[0] in (
2358
+ "BQE",
2359
+ "BYE",
2360
+ "QE",
2361
+ "YE",
2362
+ "W",
2363
+ ):
2364
+ # If the right end-point is on the last day of the month, roll forwards
2365
+ # until the last moment of that day. Note that we only do this for offsets
2366
+ # which correspond to the end of a super-daily period - "month start", for
2367
+ # example, is excluded.
2368
+ if self.closed == "right":
2369
+ # GH 21459, GH 9119: Adjust the bins relative to the wall time
2370
+ edges_dti = binner.tz_localize(None)
2371
+ edges_dti = (
2372
+ edges_dti
2373
+ + Timedelta(days=1, unit=edges_dti.unit).as_unit(edges_dti.unit)
2374
+ - Timedelta(1, unit=edges_dti.unit).as_unit(edges_dti.unit)
2375
+ )
2376
+ bin_edges = edges_dti.tz_localize(binner.tz).asi8
2377
+ else:
2378
+ bin_edges = binner.asi8
2379
+
2380
+ # intraday values on last day
2381
+ if bin_edges[-2] > ax_values.max():
2382
+ bin_edges = bin_edges[:-1]
2383
+ binner = binner[:-1]
2384
+ else:
2385
+ bin_edges = binner.asi8
2386
+ return binner, bin_edges
2387
+
2388
+ def _get_time_delta_bins(self, ax: TimedeltaIndex):
2389
+ if not isinstance(ax, TimedeltaIndex):
2390
+ raise TypeError(
2391
+ "axis must be a TimedeltaIndex, but got "
2392
+ f"an instance of {type(ax).__name__}"
2393
+ )
2394
+
2395
+ if not isinstance(self.freq, Tick):
2396
+ # GH#51896
2397
+ raise ValueError(
2398
+ "Resampling on a TimedeltaIndex requires fixed-duration `freq`, "
2399
+ f"e.g. '24h' or '3D', not {self.freq}"
2400
+ )
2401
+
2402
+ if not len(ax):
2403
+ binner = labels = TimedeltaIndex(data=[], freq=self.freq, name=ax.name)
2404
+ return binner, [], labels
2405
+
2406
+ start, end = ax.min(), ax.max()
2407
+
2408
+ if self.closed == "right":
2409
+ end += self.freq
2410
+
2411
+ labels = binner = timedelta_range(
2412
+ start=start, end=end, freq=self.freq, name=ax.name
2413
+ )
2414
+
2415
+ end_stamps = labels
2416
+ if self.closed == "left":
2417
+ end_stamps += self.freq
2418
+
2419
+ bins = ax.searchsorted(end_stamps, side=self.closed)
2420
+
2421
+ if self.offset:
2422
+ # GH 10530 & 31809
2423
+ labels += self.offset
2424
+
2425
+ return binner, bins, labels
2426
+
2427
+ def _get_time_period_bins(self, ax: DatetimeIndex):
2428
+ if not isinstance(ax, DatetimeIndex):
2429
+ raise TypeError(
2430
+ "axis must be a DatetimeIndex, but got "
2431
+ f"an instance of {type(ax).__name__}"
2432
+ )
2433
+
2434
+ freq = self.freq
2435
+
2436
+ if len(ax) == 0:
2437
+ binner = labels = PeriodIndex(
2438
+ data=[], freq=freq, name=ax.name, dtype=ax.dtype
2439
+ )
2440
+ return binner, [], labels
2441
+
2442
+ labels = binner = period_range(start=ax[0], end=ax[-1], freq=freq, name=ax.name)
2443
+
2444
+ end_stamps = (labels + freq).asfreq(freq, "s").to_timestamp()
2445
+ if ax.tz:
2446
+ end_stamps = end_stamps.tz_localize(ax.tz)
2447
+ bins = ax.searchsorted(end_stamps, side="left")
2448
+
2449
+ return binner, bins, labels
2450
+
2451
+ def _get_period_bins(self, ax: PeriodIndex):
2452
+ if not isinstance(ax, PeriodIndex):
2453
+ raise TypeError(
2454
+ "axis must be a PeriodIndex, but got "
2455
+ f"an instance of {type(ax).__name__}"
2456
+ )
2457
+
2458
+ memb = ax.asfreq(self.freq, how=self.convention)
2459
+
2460
+ # NaT handling as in pandas._lib.lib.generate_bins_dt64()
2461
+ nat_count = 0
2462
+ if memb.hasnans:
2463
+ # error: Incompatible types in assignment (expression has type
2464
+ # "bool_", variable has type "int") [assignment]
2465
+ nat_count = np.sum(memb._isnan) # type: ignore[assignment]
2466
+ memb = memb[~memb._isnan]
2467
+
2468
+ if not len(memb):
2469
+ # index contains no valid (non-NaT) values
2470
+ bins = np.array([], dtype=np.int64)
2471
+ binner = labels = PeriodIndex(data=[], freq=self.freq, name=ax.name)
2472
+ if len(ax) > 0:
2473
+ # index is all NaT
2474
+ binner, bins, labels = _insert_nat_bin(binner, bins, labels, len(ax))
2475
+ return binner, bins, labels
2476
+
2477
+ freq_mult = self.freq.n
2478
+
2479
+ start = ax.min().asfreq(self.freq, how=self.convention)
2480
+ end = ax.max().asfreq(self.freq, how="end")
2481
+ bin_shift = 0
2482
+
2483
+ if isinstance(self.freq, Tick):
2484
+ # GH 23882 & 31809: get adjusted bin edge labels with 'origin'
2485
+ # and 'origin' support. This call only makes sense if the freq is a
2486
+ # Tick since offset and origin are only used in those cases.
2487
+ # Not doing this check could create an extra empty bin.
2488
+ p_start, end = _get_period_range_edges(
2489
+ start,
2490
+ end,
2491
+ self.freq,
2492
+ closed=self.closed,
2493
+ origin=self.origin,
2494
+ offset=self.offset,
2495
+ )
2496
+
2497
+ # Get offset for bin edge (not label edge) adjustment
2498
+ start_offset = Period(start, self.freq) - Period(p_start, self.freq)
2499
+ # error: Item "Period" of "Union[Period, Any]" has no attribute "n"
2500
+ bin_shift = start_offset.n % freq_mult # type: ignore[union-attr]
2501
+ start = p_start
2502
+
2503
+ labels = binner = period_range(
2504
+ start=start, end=end, freq=self.freq, name=ax.name
2505
+ )
2506
+
2507
+ i8 = memb.asi8
2508
+
2509
+ # when upsampling to subperiods, we need to generate enough bins
2510
+ expected_bins_count = len(binner) * freq_mult
2511
+ i8_extend = expected_bins_count - (i8[-1] - i8[0])
2512
+ rng = np.arange(i8[0], i8[-1] + i8_extend, freq_mult)
2513
+ rng += freq_mult
2514
+ # adjust bin edge indexes to account for base
2515
+ rng -= bin_shift
2516
+
2517
+ # Wrap in PeriodArray for PeriodArray.searchsorted
2518
+ prng = type(memb._data)(rng, dtype=memb.dtype)
2519
+ bins = memb.searchsorted(prng, side="left")
2520
+
2521
+ if nat_count > 0:
2522
+ binner, bins, labels = _insert_nat_bin(binner, bins, labels, nat_count)
2523
+
2524
+ return binner, bins, labels
2525
+
2526
+ def _set_grouper(
2527
+ self, obj: NDFrameT, sort: bool = False, *, gpr_index: Index | None = None
2528
+ ) -> tuple[NDFrameT, Index, npt.NDArray[np.intp] | None]:
2529
+ obj, ax, indexer = super()._set_grouper(obj, sort, gpr_index=gpr_index)
2530
+ if isinstance(ax.dtype, ArrowDtype) and ax.dtype.kind in "Mm":
2531
+ self._arrow_dtype = ax.dtype
2532
+ ax = Index(
2533
+ cast(ArrowExtensionArray, ax.array)._maybe_convert_datelike_array()
2534
+ )
2535
+ return obj, ax, indexer
2536
+
2537
+
2538
+ def _take_new_index(
2539
+ obj: NDFrameT, indexer: npt.NDArray[np.intp], new_index: Index, axis: AxisInt = 0
2540
+ ) -> NDFrameT:
2541
+ if isinstance(obj, ABCSeries):
2542
+ new_values = algos.take_nd(obj._values, indexer)
2543
+ # error: Incompatible return value type (got "Series", expected "NDFrameT")
2544
+ return obj._constructor( # type: ignore[return-value]
2545
+ new_values, index=new_index, name=obj.name
2546
+ )
2547
+ elif isinstance(obj, ABCDataFrame):
2548
+ if axis == 1:
2549
+ raise NotImplementedError("axis 1 is not supported")
2550
+ new_mgr = obj._mgr.reindex_indexer(new_axis=new_index, indexer=indexer, axis=1)
2551
+ # error: Incompatible return value type (got "DataFrame", expected "NDFrameT")
2552
+ return obj._constructor_from_mgr(new_mgr, axes=new_mgr.axes) # type: ignore[return-value]
2553
+ else:
2554
+ raise ValueError("'obj' should be either a Series or a DataFrame")
2555
+
2556
+
2557
+ def _get_timestamp_range_edges(
2558
+ first: Timestamp,
2559
+ last: Timestamp,
2560
+ freq: BaseOffset,
2561
+ unit: str,
2562
+ closed: Literal["right", "left"] = "left",
2563
+ origin: TimeGrouperOrigin = "start_day",
2564
+ offset: Timedelta | None = None,
2565
+ ) -> tuple[Timestamp, Timestamp]:
2566
+ """
2567
+ Adjust the `first` Timestamp to the preceding Timestamp that resides on
2568
+ the provided offset. Adjust the `last` Timestamp to the following
2569
+ Timestamp that resides on the provided offset. Input Timestamps that
2570
+ already reside on the offset will be adjusted depending on the type of
2571
+ offset and the `closed` parameter.
2572
+
2573
+ Parameters
2574
+ ----------
2575
+ first : pd.Timestamp
2576
+ The beginning Timestamp of the range to be adjusted.
2577
+ last : pd.Timestamp
2578
+ The ending Timestamp of the range to be adjusted.
2579
+ freq : pd.DateOffset
2580
+ The dateoffset to which the Timestamps will be adjusted.
2581
+ closed : {'right', 'left'}, default "left"
2582
+ Which side of bin interval is closed.
2583
+ origin : {'epoch', 'start', 'start_day'} or Timestamp, default 'start_day'
2584
+ The timestamp on which to adjust the grouping. The timezone of origin must
2585
+ match the timezone of the index.
2586
+ If a timestamp is not used, these values are also supported:
2587
+
2588
+ - 'epoch': `origin` is 1970-01-01
2589
+ - 'start': `origin` is the first value of the timeseries
2590
+ - 'start_day': `origin` is the first day at midnight of the timeseries
2591
+ offset : pd.Timedelta, default is None
2592
+ An offset timedelta added to the origin.
2593
+
2594
+ Returns
2595
+ -------
2596
+ A tuple of length 2, containing the adjusted pd.Timestamp objects.
2597
+ """
2598
+ if isinstance(freq, Tick):
2599
+ index_tz = first.tz
2600
+ if isinstance(origin, Timestamp) and (origin.tz is None) != (index_tz is None):
2601
+ raise ValueError("The origin must have the same timezone as the index.")
2602
+ if origin == "epoch":
2603
+ # set the epoch based on the timezone to have similar bins results when
2604
+ # resampling on the same kind of indexes on different timezones
2605
+ origin = Timestamp("1970-01-01", tz=index_tz)
2606
+
2607
+ if isinstance(freq, Day):
2608
+ # _adjust_dates_anchored assumes 'D' means 24h, but first/last
2609
+ # might contain a DST transition (23h, 24h, or 25h).
2610
+ # So "pretend" the dates are naive when adjusting the endpoints
2611
+ first = first.tz_localize(None)
2612
+ last = last.tz_localize(None)
2613
+ if isinstance(origin, Timestamp):
2614
+ origin = origin.tz_localize(None)
2615
+
2616
+ first, last = _adjust_dates_anchored(
2617
+ first, last, freq, closed=closed, origin=origin, offset=offset, unit=unit
2618
+ )
2619
+ if isinstance(freq, Day):
2620
+ first = first.tz_localize(index_tz)
2621
+ last = last.tz_localize(index_tz)
2622
+ else:
2623
+ first = first.normalize()
2624
+ last = last.normalize()
2625
+
2626
+ if closed == "left":
2627
+ first = Timestamp(freq.rollback(first))
2628
+ else:
2629
+ first = Timestamp(first - freq)
2630
+
2631
+ last = Timestamp(last + freq)
2632
+
2633
+ return first, last
2634
+
2635
+
2636
+ def _get_period_range_edges(
2637
+ first: Period,
2638
+ last: Period,
2639
+ freq: BaseOffset,
2640
+ closed: Literal["right", "left"] = "left",
2641
+ origin: TimeGrouperOrigin = "start_day",
2642
+ offset: Timedelta | None = None,
2643
+ ) -> tuple[Period, Period]:
2644
+ """
2645
+ Adjust the provided `first` and `last` Periods to the respective Period of
2646
+ the given offset that encompasses them.
2647
+
2648
+ Parameters
2649
+ ----------
2650
+ first : pd.Period
2651
+ The beginning Period of the range to be adjusted.
2652
+ last : pd.Period
2653
+ The ending Period of the range to be adjusted.
2654
+ freq : pd.DateOffset
2655
+ The freq to which the Periods will be adjusted.
2656
+ closed : {'right', 'left'}, default "left"
2657
+ Which side of bin interval is closed.
2658
+ origin : {'epoch', 'start', 'start_day'}, Timestamp, default 'start_day'
2659
+ The timestamp on which to adjust the grouping. The timezone of origin must
2660
+ match the timezone of the index.
2661
+
2662
+ If a timestamp is not used, these values are also supported:
2663
+
2664
+ - 'epoch': `origin` is 1970-01-01
2665
+ - 'start': `origin` is the first value of the timeseries
2666
+ - 'start_day': `origin` is the first day at midnight of the timeseries
2667
+ offset : pd.Timedelta, default is None
2668
+ An offset timedelta added to the origin.
2669
+
2670
+ Returns
2671
+ -------
2672
+ A tuple of length 2, containing the adjusted pd.Period objects.
2673
+ """
2674
+ if not all(isinstance(obj, Period) for obj in [first, last]):
2675
+ raise TypeError("'first' and 'last' must be instances of type Period")
2676
+
2677
+ # GH 23882
2678
+ first_ts = first.to_timestamp()
2679
+ last_ts = last.to_timestamp()
2680
+ adjust_first = not freq.is_on_offset(first_ts)
2681
+ adjust_last = freq.is_on_offset(last_ts)
2682
+
2683
+ first_ts, last_ts = _get_timestamp_range_edges(
2684
+ first_ts, last_ts, freq, unit="ns", closed=closed, origin=origin, offset=offset
2685
+ )
2686
+
2687
+ first = (first_ts + int(adjust_first) * freq).to_period(freq)
2688
+ last = (last_ts - int(adjust_last) * freq).to_period(freq)
2689
+ return first, last
2690
+
2691
+
2692
+ def _insert_nat_bin(
2693
+ binner: PeriodIndex, bins: np.ndarray, labels: PeriodIndex, nat_count: int
2694
+ ) -> tuple[PeriodIndex, np.ndarray, PeriodIndex]:
2695
+ # NaT handling as in pandas._lib.lib.generate_bins_dt64()
2696
+ # shift bins by the number of NaT
2697
+ assert nat_count > 0
2698
+ bins += nat_count
2699
+ bins = np.insert(bins, 0, nat_count)
2700
+
2701
+ # Incompatible types in assignment (expression has type "Index", variable
2702
+ # has type "PeriodIndex")
2703
+ binner = binner.insert(0, NaT) # type: ignore[assignment]
2704
+ # Incompatible types in assignment (expression has type "Index", variable
2705
+ # has type "PeriodIndex")
2706
+ labels = labels.insert(0, NaT) # type: ignore[assignment]
2707
+ return binner, bins, labels
2708
+
2709
+
2710
+ def _adjust_dates_anchored(
2711
+ first: Timestamp,
2712
+ last: Timestamp,
2713
+ freq: Tick,
2714
+ closed: Literal["right", "left"] = "right",
2715
+ origin: TimeGrouperOrigin = "start_day",
2716
+ offset: Timedelta | None = None,
2717
+ unit: str = "ns",
2718
+ ) -> tuple[Timestamp, Timestamp]:
2719
+ # First and last offsets should be calculated from the start day to fix an
2720
+ # error cause by resampling across multiple days when a one day period is
2721
+ # not a multiple of the frequency. See GH 8683
2722
+ # To handle frequencies that are not multiple or divisible by a day we let
2723
+ # the possibility to define a fixed origin timestamp. See GH 31809
2724
+ first = first.as_unit(unit)
2725
+ last = last.as_unit(unit)
2726
+ if offset is not None:
2727
+ offset = offset.as_unit(unit)
2728
+
2729
+ freq_value = Timedelta(freq).as_unit(unit)._value
2730
+
2731
+ origin_timestamp = 0 # origin == "epoch"
2732
+ if origin == "start_day":
2733
+ origin_timestamp = first.normalize()._value
2734
+ elif origin == "start":
2735
+ origin_timestamp = first._value
2736
+ elif isinstance(origin, Timestamp):
2737
+ origin_timestamp = origin.as_unit(unit)._value
2738
+ elif origin in ["end", "end_day"]:
2739
+ origin_last = last if origin == "end" else last.ceil("D")
2740
+ sub_freq_times = (origin_last._value - first._value) // freq_value
2741
+ if closed == "left":
2742
+ sub_freq_times += 1
2743
+ first = origin_last - sub_freq_times * freq
2744
+ origin_timestamp = first._value
2745
+ origin_timestamp += offset._value if offset else 0
2746
+
2747
+ # GH 10117 & GH 19375. If first and last contain timezone information,
2748
+ # Perform the calculation in UTC in order to avoid localizing on an
2749
+ # Ambiguous or Nonexistent time.
2750
+ first_tzinfo = first.tzinfo
2751
+ last_tzinfo = last.tzinfo
2752
+ if first_tzinfo is not None:
2753
+ first = first.tz_convert("UTC")
2754
+ if last_tzinfo is not None:
2755
+ last = last.tz_convert("UTC")
2756
+
2757
+ foffset = (first._value - origin_timestamp) % freq_value
2758
+ loffset = (last._value - origin_timestamp) % freq_value
2759
+
2760
+ if closed == "right":
2761
+ if foffset > 0:
2762
+ # roll back
2763
+ fresult_int = first._value - foffset
2764
+ else:
2765
+ fresult_int = first._value - freq_value
2766
+
2767
+ if loffset > 0:
2768
+ # roll forward
2769
+ lresult_int = last._value + (freq_value - loffset)
2770
+ else:
2771
+ # already the end of the road
2772
+ lresult_int = last._value
2773
+ else: # closed == 'left'
2774
+ if foffset > 0:
2775
+ fresult_int = first._value - foffset
2776
+ else:
2777
+ # start of the road
2778
+ fresult_int = first._value
2779
+
2780
+ if loffset > 0:
2781
+ # roll forward
2782
+ lresult_int = last._value + (freq_value - loffset)
2783
+ else:
2784
+ lresult_int = last._value + freq_value
2785
+ fresult = Timestamp(fresult_int, unit=unit)
2786
+ lresult = Timestamp(lresult_int, unit=unit)
2787
+ if first_tzinfo is not None:
2788
+ fresult = fresult.tz_localize("UTC").tz_convert(first_tzinfo)
2789
+ if last_tzinfo is not None:
2790
+ lresult = lresult.tz_localize("UTC").tz_convert(last_tzinfo)
2791
+ return fresult, lresult
2792
+
2793
+
2794
+ def asfreq(
2795
+ obj: NDFrameT,
2796
+ freq,
2797
+ method=None,
2798
+ how=None,
2799
+ normalize: bool = False,
2800
+ fill_value=None,
2801
+ ) -> NDFrameT:
2802
+ """
2803
+ Utility frequency conversion method for Series/DataFrame.
2804
+
2805
+ See :meth:`pandas.NDFrame.asfreq` for full documentation.
2806
+ """
2807
+ if isinstance(obj.index, PeriodIndex):
2808
+ if method is not None:
2809
+ raise NotImplementedError("'method' argument is not supported")
2810
+
2811
+ if how is None:
2812
+ how = "E"
2813
+
2814
+ if isinstance(freq, BaseOffset):
2815
+ if hasattr(freq, "_period_dtype_code"):
2816
+ freq = freq_to_period_freqstr(freq.n, freq.name)
2817
+ else:
2818
+ raise ValueError(
2819
+ f"Invalid offset: '{freq.base}' for converting time series "
2820
+ f"with PeriodIndex."
2821
+ )
2822
+
2823
+ new_obj = obj.copy()
2824
+ new_obj.index = obj.index.asfreq(freq, how=how)
2825
+
2826
+ elif len(obj.index) == 0:
2827
+ new_obj = obj.copy()
2828
+
2829
+ new_obj.index = _asfreq_compat(obj.index, freq)
2830
+ else:
2831
+ unit = None
2832
+ if isinstance(obj.index, DatetimeIndex):
2833
+ # TODO: should we disallow non-DatetimeIndex?
2834
+ unit = obj.index.unit
2835
+ dti = date_range(obj.index.min(), obj.index.max(), freq=freq, unit=unit)
2836
+ dti.name = obj.index.name
2837
+ new_obj = obj.reindex(dti, method=method, fill_value=fill_value)
2838
+ if normalize:
2839
+ new_obj.index = new_obj.index.normalize()
2840
+
2841
+ return new_obj
2842
+
2843
+
2844
+ def _asfreq_compat(index: DatetimeIndex | PeriodIndex | TimedeltaIndex, freq):
2845
+ """
2846
+ Helper to mimic asfreq on (empty) DatetimeIndex and TimedeltaIndex.
2847
+
2848
+ Parameters
2849
+ ----------
2850
+ index : PeriodIndex, DatetimeIndex, or TimedeltaIndex
2851
+ freq : DateOffset
2852
+
2853
+ Returns
2854
+ -------
2855
+ same type as index
2856
+ """
2857
+ if len(index) != 0:
2858
+ # This should never be reached, always checked by the caller
2859
+ raise ValueError(
2860
+ "Can only set arbitrary freq for empty DatetimeIndex or TimedeltaIndex"
2861
+ )
2862
+ new_index: Index
2863
+ if isinstance(index, PeriodIndex):
2864
+ new_index = index.asfreq(freq=freq)
2865
+ elif isinstance(index, DatetimeIndex):
2866
+ new_index = DatetimeIndex([], dtype=index.dtype, freq=freq, name=index.name)
2867
+ elif isinstance(index, TimedeltaIndex):
2868
+ new_index = TimedeltaIndex([], dtype=index.dtype, freq=freq, name=index.name)
2869
+ else: # pragma: no cover
2870
+ raise TypeError(type(index))
2871
+ return new_index
2872
+
2873
+
2874
+ def maybe_warn_args_and_kwargs(cls, kernel: str, args, kwargs) -> None:
2875
+ """
2876
+ Warn for deprecation of args and kwargs in resample functions.
2877
+
2878
+ Parameters
2879
+ ----------
2880
+ cls : type
2881
+ Class to warn about.
2882
+ kernel : str
2883
+ Operation name.
2884
+ args : tuple or None
2885
+ args passed by user. Will be None if and only if kernel does not have args.
2886
+ kwargs : dict or None
2887
+ kwargs passed by user. Will be None if and only if kernel does not have kwargs.
2888
+ """
2889
+ warn_args = args is not None and len(args) > 0
2890
+ warn_kwargs = kwargs is not None and len(kwargs) > 0
2891
+ if warn_args and warn_kwargs:
2892
+ msg = "args and kwargs"
2893
+ elif warn_args:
2894
+ msg = "args"
2895
+ elif warn_kwargs:
2896
+ msg = "kwargs"
2897
+ else:
2898
+ return
2899
+ warnings.warn(
2900
+ f"Passing additional {msg} to {cls.__name__}.{kernel} has "
2901
+ "no impact on the result and is deprecated. This will "
2902
+ "raise a TypeError in a future version of pandas.",
2903
+ category=FutureWarning,
2904
+ stacklevel=find_stack_level(),
2905
+ )
2906
+
2907
+
2908
+ def _apply(
2909
+ grouped: GroupBy, how: Callable, *args, include_groups: bool, **kwargs
2910
+ ) -> DataFrame:
2911
+ # GH#7155 - rewrite warning to appear as if it came from `.resample`
2912
+ target_message = "DataFrameGroupBy.apply operated on the grouping columns"
2913
+ new_message = _apply_groupings_depr.format("DataFrameGroupBy", "resample")
2914
+ with rewrite_warning(
2915
+ target_message=target_message,
2916
+ target_category=DeprecationWarning,
2917
+ new_message=new_message,
2918
+ ):
2919
+ result = grouped.apply(how, *args, include_groups=include_groups, **kwargs)
2920
+ return result
venv/lib/python3.10/site-packages/pandas/core/roperator.py ADDED
@@ -0,0 +1,62 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ Reversed Operations not available in the stdlib operator module.
3
+ Defining these instead of using lambdas allows us to reference them by name.
4
+ """
5
+ from __future__ import annotations
6
+
7
+ import operator
8
+
9
+
10
+ def radd(left, right):
11
+ return right + left
12
+
13
+
14
+ def rsub(left, right):
15
+ return right - left
16
+
17
+
18
+ def rmul(left, right):
19
+ return right * left
20
+
21
+
22
+ def rdiv(left, right):
23
+ return right / left
24
+
25
+
26
+ def rtruediv(left, right):
27
+ return right / left
28
+
29
+
30
+ def rfloordiv(left, right):
31
+ return right // left
32
+
33
+
34
+ def rmod(left, right):
35
+ # check if right is a string as % is the string
36
+ # formatting operation; this is a TypeError
37
+ # otherwise perform the op
38
+ if isinstance(right, str):
39
+ typ = type(left).__name__
40
+ raise TypeError(f"{typ} cannot perform the operation mod")
41
+
42
+ return right % left
43
+
44
+
45
+ def rdivmod(left, right):
46
+ return divmod(right, left)
47
+
48
+
49
+ def rpow(left, right):
50
+ return right**left
51
+
52
+
53
+ def rand_(left, right):
54
+ return operator.and_(right, left)
55
+
56
+
57
+ def ror_(left, right):
58
+ return operator.or_(right, left)
59
+
60
+
61
+ def rxor(left, right):
62
+ return operator.xor(right, left)