Add files using upload-large-folder tool
Browse filesThis view is limited to 50 files because it contains too many changes.
See raw diff
- env-llmeval/lib/python3.10/site-packages/pandas/_config/__pycache__/__init__.cpython-310.pyc +0 -0
- env-llmeval/lib/python3.10/site-packages/pandas/_config/__pycache__/config.cpython-310.pyc +0 -0
- env-llmeval/lib/python3.10/site-packages/pandas/_config/__pycache__/dates.cpython-310.pyc +0 -0
- env-llmeval/lib/python3.10/site-packages/pandas/_config/__pycache__/display.cpython-310.pyc +0 -0
- env-llmeval/lib/python3.10/site-packages/pandas/_config/__pycache__/localization.cpython-310.pyc +0 -0
- env-llmeval/lib/python3.10/site-packages/pandas/_config/config.py +948 -0
- env-llmeval/lib/python3.10/site-packages/pandas/_config/dates.py +25 -0
- env-llmeval/lib/python3.10/site-packages/pandas/_config/display.py +62 -0
- env-llmeval/lib/python3.10/site-packages/pandas/core/__init__.py +0 -0
- env-llmeval/lib/python3.10/site-packages/pandas/core/accessor.py +340 -0
- env-llmeval/lib/python3.10/site-packages/pandas/core/algorithms.py +1747 -0
- env-llmeval/lib/python3.10/site-packages/pandas/core/api.py +140 -0
- env-llmeval/lib/python3.10/site-packages/pandas/core/apply.py +2062 -0
- env-llmeval/lib/python3.10/site-packages/pandas/core/arraylike.py +530 -0
- env-llmeval/lib/python3.10/site-packages/pandas/core/arrays/__init__.py +43 -0
- env-llmeval/lib/python3.10/site-packages/pandas/core/arrays/__pycache__/integer.cpython-310.pyc +0 -0
- env-llmeval/lib/python3.10/site-packages/pandas/core/arrays/_arrow_string_mixins.py +84 -0
- env-llmeval/lib/python3.10/site-packages/pandas/core/arrays/_mixins.py +547 -0
- env-llmeval/lib/python3.10/site-packages/pandas/core/arrays/_ranges.py +207 -0
- env-llmeval/lib/python3.10/site-packages/pandas/core/arrays/_utils.py +63 -0
- env-llmeval/lib/python3.10/site-packages/pandas/core/arrays/base.py +2588 -0
- env-llmeval/lib/python3.10/site-packages/pandas/core/arrays/boolean.py +407 -0
- env-llmeval/lib/python3.10/site-packages/pandas/core/arrays/categorical.py +0 -0
- env-llmeval/lib/python3.10/site-packages/pandas/core/arrays/datetimelike.py +2556 -0
- env-llmeval/lib/python3.10/site-packages/pandas/core/arrays/datetimes.py +2820 -0
- env-llmeval/lib/python3.10/site-packages/pandas/core/arrays/floating.py +173 -0
- env-llmeval/lib/python3.10/site-packages/pandas/core/arrays/integer.py +272 -0
- env-llmeval/lib/python3.10/site-packages/pandas/core/arrays/interval.py +1917 -0
- env-llmeval/lib/python3.10/site-packages/pandas/core/arrays/masked.py +1650 -0
- env-llmeval/lib/python3.10/site-packages/pandas/core/arrays/numeric.py +286 -0
- env-llmeval/lib/python3.10/site-packages/pandas/core/arrays/numpy_.py +563 -0
- env-llmeval/lib/python3.10/site-packages/pandas/core/arrays/period.py +1313 -0
- env-llmeval/lib/python3.10/site-packages/pandas/core/arrays/string_.py +657 -0
- env-llmeval/lib/python3.10/site-packages/pandas/core/arrays/string_arrow.py +715 -0
- env-llmeval/lib/python3.10/site-packages/pandas/core/arrays/timedeltas.py +1177 -0
- env-llmeval/lib/python3.10/site-packages/pandas/core/base.py +1391 -0
- env-llmeval/lib/python3.10/site-packages/pandas/core/common.py +657 -0
- env-llmeval/lib/python3.10/site-packages/pandas/core/config_init.py +924 -0
- env-llmeval/lib/python3.10/site-packages/pandas/core/construction.py +824 -0
- env-llmeval/lib/python3.10/site-packages/pandas/core/flags.py +117 -0
- env-llmeval/lib/python3.10/site-packages/pandas/core/frame.py +0 -0
- env-llmeval/lib/python3.10/site-packages/pandas/core/generic.py +0 -0
- env-llmeval/lib/python3.10/site-packages/pandas/core/groupby/__init__.py +15 -0
- env-llmeval/lib/python3.10/site-packages/pandas/core/groupby/__pycache__/__init__.cpython-310.pyc +0 -0
- env-llmeval/lib/python3.10/site-packages/pandas/core/groupby/__pycache__/base.cpython-310.pyc +0 -0
- env-llmeval/lib/python3.10/site-packages/pandas/core/groupby/__pycache__/categorical.cpython-310.pyc +0 -0
- env-llmeval/lib/python3.10/site-packages/pandas/core/groupby/__pycache__/generic.cpython-310.pyc +0 -0
- env-llmeval/lib/python3.10/site-packages/pandas/core/groupby/__pycache__/groupby.cpython-310.pyc +0 -0
- env-llmeval/lib/python3.10/site-packages/pandas/core/groupby/__pycache__/grouper.cpython-310.pyc +0 -0
- env-llmeval/lib/python3.10/site-packages/pandas/core/groupby/__pycache__/indexing.cpython-310.pyc +0 -0
env-llmeval/lib/python3.10/site-packages/pandas/_config/__pycache__/__init__.cpython-310.pyc
ADDED
Binary file (1.51 kB). View file
|
|
env-llmeval/lib/python3.10/site-packages/pandas/_config/__pycache__/config.cpython-310.pyc
ADDED
Binary file (26.4 kB). View file
|
|
env-llmeval/lib/python3.10/site-packages/pandas/_config/__pycache__/dates.cpython-310.pyc
ADDED
Binary file (745 Bytes). View file
|
|
env-llmeval/lib/python3.10/site-packages/pandas/_config/__pycache__/display.cpython-310.pyc
ADDED
Binary file (1.39 kB). View file
|
|
env-llmeval/lib/python3.10/site-packages/pandas/_config/__pycache__/localization.cpython-310.pyc
ADDED
Binary file (4.83 kB). View file
|
|
env-llmeval/lib/python3.10/site-packages/pandas/_config/config.py
ADDED
@@ -0,0 +1,948 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
"""
|
2 |
+
The config module holds package-wide configurables and provides
|
3 |
+
a uniform API for working with them.
|
4 |
+
|
5 |
+
Overview
|
6 |
+
========
|
7 |
+
|
8 |
+
This module supports the following requirements:
|
9 |
+
- options are referenced using keys in dot.notation, e.g. "x.y.option - z".
|
10 |
+
- keys are case-insensitive.
|
11 |
+
- functions should accept partial/regex keys, when unambiguous.
|
12 |
+
- options can be registered by modules at import time.
|
13 |
+
- options can be registered at init-time (via core.config_init)
|
14 |
+
- options have a default value, and (optionally) a description and
|
15 |
+
validation function associated with them.
|
16 |
+
- options can be deprecated, in which case referencing them
|
17 |
+
should produce a warning.
|
18 |
+
- deprecated options can optionally be rerouted to a replacement
|
19 |
+
so that accessing a deprecated option reroutes to a differently
|
20 |
+
named option.
|
21 |
+
- options can be reset to their default value.
|
22 |
+
- all option can be reset to their default value at once.
|
23 |
+
- all options in a certain sub - namespace can be reset at once.
|
24 |
+
- the user can set / get / reset or ask for the description of an option.
|
25 |
+
- a developer can register and mark an option as deprecated.
|
26 |
+
- you can register a callback to be invoked when the option value
|
27 |
+
is set or reset. Changing the stored value is considered misuse, but
|
28 |
+
is not verboten.
|
29 |
+
|
30 |
+
Implementation
|
31 |
+
==============
|
32 |
+
|
33 |
+
- Data is stored using nested dictionaries, and should be accessed
|
34 |
+
through the provided API.
|
35 |
+
|
36 |
+
- "Registered options" and "Deprecated options" have metadata associated
|
37 |
+
with them, which are stored in auxiliary dictionaries keyed on the
|
38 |
+
fully-qualified key, e.g. "x.y.z.option".
|
39 |
+
|
40 |
+
- the config_init module is imported by the package's __init__.py file.
|
41 |
+
placing any register_option() calls there will ensure those options
|
42 |
+
are available as soon as pandas is loaded. If you use register_option
|
43 |
+
in a module, it will only be available after that module is imported,
|
44 |
+
which you should be aware of.
|
45 |
+
|
46 |
+
- `config_prefix` is a context_manager (for use with the `with` keyword)
|
47 |
+
which can save developers some typing, see the docstring.
|
48 |
+
|
49 |
+
"""
|
50 |
+
|
51 |
+
from __future__ import annotations
|
52 |
+
|
53 |
+
from contextlib import (
|
54 |
+
ContextDecorator,
|
55 |
+
contextmanager,
|
56 |
+
)
|
57 |
+
import re
|
58 |
+
from typing import (
|
59 |
+
TYPE_CHECKING,
|
60 |
+
Any,
|
61 |
+
Callable,
|
62 |
+
Generic,
|
63 |
+
NamedTuple,
|
64 |
+
cast,
|
65 |
+
)
|
66 |
+
import warnings
|
67 |
+
|
68 |
+
from pandas._typing import (
|
69 |
+
F,
|
70 |
+
T,
|
71 |
+
)
|
72 |
+
from pandas.util._exceptions import find_stack_level
|
73 |
+
|
74 |
+
if TYPE_CHECKING:
|
75 |
+
from collections.abc import (
|
76 |
+
Generator,
|
77 |
+
Iterable,
|
78 |
+
)
|
79 |
+
|
80 |
+
|
81 |
+
class DeprecatedOption(NamedTuple):
|
82 |
+
key: str
|
83 |
+
msg: str | None
|
84 |
+
rkey: str | None
|
85 |
+
removal_ver: str | None
|
86 |
+
|
87 |
+
|
88 |
+
class RegisteredOption(NamedTuple):
|
89 |
+
key: str
|
90 |
+
defval: object
|
91 |
+
doc: str
|
92 |
+
validator: Callable[[object], Any] | None
|
93 |
+
cb: Callable[[str], Any] | None
|
94 |
+
|
95 |
+
|
96 |
+
# holds deprecated option metadata
|
97 |
+
_deprecated_options: dict[str, DeprecatedOption] = {}
|
98 |
+
|
99 |
+
# holds registered option metadata
|
100 |
+
_registered_options: dict[str, RegisteredOption] = {}
|
101 |
+
|
102 |
+
# holds the current values for registered options
|
103 |
+
_global_config: dict[str, Any] = {}
|
104 |
+
|
105 |
+
# keys which have a special meaning
|
106 |
+
_reserved_keys: list[str] = ["all"]
|
107 |
+
|
108 |
+
|
109 |
+
class OptionError(AttributeError, KeyError):
|
110 |
+
"""
|
111 |
+
Exception raised for pandas.options.
|
112 |
+
|
113 |
+
Backwards compatible with KeyError checks.
|
114 |
+
|
115 |
+
Examples
|
116 |
+
--------
|
117 |
+
>>> pd.options.context
|
118 |
+
Traceback (most recent call last):
|
119 |
+
OptionError: No such option
|
120 |
+
"""
|
121 |
+
|
122 |
+
|
123 |
+
#
|
124 |
+
# User API
|
125 |
+
|
126 |
+
|
127 |
+
def _get_single_key(pat: str, silent: bool) -> str:
|
128 |
+
keys = _select_options(pat)
|
129 |
+
if len(keys) == 0:
|
130 |
+
if not silent:
|
131 |
+
_warn_if_deprecated(pat)
|
132 |
+
raise OptionError(f"No such keys(s): {repr(pat)}")
|
133 |
+
if len(keys) > 1:
|
134 |
+
raise OptionError("Pattern matched multiple keys")
|
135 |
+
key = keys[0]
|
136 |
+
|
137 |
+
if not silent:
|
138 |
+
_warn_if_deprecated(key)
|
139 |
+
|
140 |
+
key = _translate_key(key)
|
141 |
+
|
142 |
+
return key
|
143 |
+
|
144 |
+
|
145 |
+
def _get_option(pat: str, silent: bool = False) -> Any:
|
146 |
+
key = _get_single_key(pat, silent)
|
147 |
+
|
148 |
+
# walk the nested dict
|
149 |
+
root, k = _get_root(key)
|
150 |
+
return root[k]
|
151 |
+
|
152 |
+
|
153 |
+
def _set_option(*args, **kwargs) -> None:
|
154 |
+
# must at least 1 arg deal with constraints later
|
155 |
+
nargs = len(args)
|
156 |
+
if not nargs or nargs % 2 != 0:
|
157 |
+
raise ValueError("Must provide an even number of non-keyword arguments")
|
158 |
+
|
159 |
+
# default to false
|
160 |
+
silent = kwargs.pop("silent", False)
|
161 |
+
|
162 |
+
if kwargs:
|
163 |
+
kwarg = next(iter(kwargs.keys()))
|
164 |
+
raise TypeError(f'_set_option() got an unexpected keyword argument "{kwarg}"')
|
165 |
+
|
166 |
+
for k, v in zip(args[::2], args[1::2]):
|
167 |
+
key = _get_single_key(k, silent)
|
168 |
+
|
169 |
+
o = _get_registered_option(key)
|
170 |
+
if o and o.validator:
|
171 |
+
o.validator(v)
|
172 |
+
|
173 |
+
# walk the nested dict
|
174 |
+
root, k_root = _get_root(key)
|
175 |
+
root[k_root] = v
|
176 |
+
|
177 |
+
if o.cb:
|
178 |
+
if silent:
|
179 |
+
with warnings.catch_warnings(record=True):
|
180 |
+
o.cb(key)
|
181 |
+
else:
|
182 |
+
o.cb(key)
|
183 |
+
|
184 |
+
|
185 |
+
def _describe_option(pat: str = "", _print_desc: bool = True) -> str | None:
|
186 |
+
keys = _select_options(pat)
|
187 |
+
if len(keys) == 0:
|
188 |
+
raise OptionError("No such keys(s)")
|
189 |
+
|
190 |
+
s = "\n".join([_build_option_description(k) for k in keys])
|
191 |
+
|
192 |
+
if _print_desc:
|
193 |
+
print(s)
|
194 |
+
return None
|
195 |
+
return s
|
196 |
+
|
197 |
+
|
198 |
+
def _reset_option(pat: str, silent: bool = False) -> None:
|
199 |
+
keys = _select_options(pat)
|
200 |
+
|
201 |
+
if len(keys) == 0:
|
202 |
+
raise OptionError("No such keys(s)")
|
203 |
+
|
204 |
+
if len(keys) > 1 and len(pat) < 4 and pat != "all":
|
205 |
+
raise ValueError(
|
206 |
+
"You must specify at least 4 characters when "
|
207 |
+
"resetting multiple keys, use the special keyword "
|
208 |
+
'"all" to reset all the options to their default value'
|
209 |
+
)
|
210 |
+
|
211 |
+
for k in keys:
|
212 |
+
_set_option(k, _registered_options[k].defval, silent=silent)
|
213 |
+
|
214 |
+
|
215 |
+
def get_default_val(pat: str):
|
216 |
+
key = _get_single_key(pat, silent=True)
|
217 |
+
return _get_registered_option(key).defval
|
218 |
+
|
219 |
+
|
220 |
+
class DictWrapper:
|
221 |
+
"""provide attribute-style access to a nested dict"""
|
222 |
+
|
223 |
+
d: dict[str, Any]
|
224 |
+
|
225 |
+
def __init__(self, d: dict[str, Any], prefix: str = "") -> None:
|
226 |
+
object.__setattr__(self, "d", d)
|
227 |
+
object.__setattr__(self, "prefix", prefix)
|
228 |
+
|
229 |
+
def __setattr__(self, key: str, val: Any) -> None:
|
230 |
+
prefix = object.__getattribute__(self, "prefix")
|
231 |
+
if prefix:
|
232 |
+
prefix += "."
|
233 |
+
prefix += key
|
234 |
+
# you can't set new keys
|
235 |
+
# can you can't overwrite subtrees
|
236 |
+
if key in self.d and not isinstance(self.d[key], dict):
|
237 |
+
_set_option(prefix, val)
|
238 |
+
else:
|
239 |
+
raise OptionError("You can only set the value of existing options")
|
240 |
+
|
241 |
+
def __getattr__(self, key: str):
|
242 |
+
prefix = object.__getattribute__(self, "prefix")
|
243 |
+
if prefix:
|
244 |
+
prefix += "."
|
245 |
+
prefix += key
|
246 |
+
try:
|
247 |
+
v = object.__getattribute__(self, "d")[key]
|
248 |
+
except KeyError as err:
|
249 |
+
raise OptionError("No such option") from err
|
250 |
+
if isinstance(v, dict):
|
251 |
+
return DictWrapper(v, prefix)
|
252 |
+
else:
|
253 |
+
return _get_option(prefix)
|
254 |
+
|
255 |
+
def __dir__(self) -> list[str]:
|
256 |
+
return list(self.d.keys())
|
257 |
+
|
258 |
+
|
259 |
+
# For user convenience, we'd like to have the available options described
|
260 |
+
# in the docstring. For dev convenience we'd like to generate the docstrings
|
261 |
+
# dynamically instead of maintaining them by hand. To this, we use the
|
262 |
+
# class below which wraps functions inside a callable, and converts
|
263 |
+
# __doc__ into a property function. The doctsrings below are templates
|
264 |
+
# using the py2.6+ advanced formatting syntax to plug in a concise list
|
265 |
+
# of options, and option descriptions.
|
266 |
+
|
267 |
+
|
268 |
+
class CallableDynamicDoc(Generic[T]):
|
269 |
+
def __init__(self, func: Callable[..., T], doc_tmpl: str) -> None:
|
270 |
+
self.__doc_tmpl__ = doc_tmpl
|
271 |
+
self.__func__ = func
|
272 |
+
|
273 |
+
def __call__(self, *args, **kwds) -> T:
|
274 |
+
return self.__func__(*args, **kwds)
|
275 |
+
|
276 |
+
# error: Signature of "__doc__" incompatible with supertype "object"
|
277 |
+
@property
|
278 |
+
def __doc__(self) -> str: # type: ignore[override]
|
279 |
+
opts_desc = _describe_option("all", _print_desc=False)
|
280 |
+
opts_list = pp_options_list(list(_registered_options.keys()))
|
281 |
+
return self.__doc_tmpl__.format(opts_desc=opts_desc, opts_list=opts_list)
|
282 |
+
|
283 |
+
|
284 |
+
_get_option_tmpl = """
|
285 |
+
get_option(pat)
|
286 |
+
|
287 |
+
Retrieves the value of the specified option.
|
288 |
+
|
289 |
+
Available options:
|
290 |
+
|
291 |
+
{opts_list}
|
292 |
+
|
293 |
+
Parameters
|
294 |
+
----------
|
295 |
+
pat : str
|
296 |
+
Regexp which should match a single option.
|
297 |
+
Note: partial matches are supported for convenience, but unless you use the
|
298 |
+
full option name (e.g. x.y.z.option_name), your code may break in future
|
299 |
+
versions if new options with similar names are introduced.
|
300 |
+
|
301 |
+
Returns
|
302 |
+
-------
|
303 |
+
result : the value of the option
|
304 |
+
|
305 |
+
Raises
|
306 |
+
------
|
307 |
+
OptionError : if no such option exists
|
308 |
+
|
309 |
+
Notes
|
310 |
+
-----
|
311 |
+
Please reference the :ref:`User Guide <options>` for more information.
|
312 |
+
|
313 |
+
The available options with its descriptions:
|
314 |
+
|
315 |
+
{opts_desc}
|
316 |
+
|
317 |
+
Examples
|
318 |
+
--------
|
319 |
+
>>> pd.get_option('display.max_columns') # doctest: +SKIP
|
320 |
+
4
|
321 |
+
"""
|
322 |
+
|
323 |
+
_set_option_tmpl = """
|
324 |
+
set_option(pat, value)
|
325 |
+
|
326 |
+
Sets the value of the specified option.
|
327 |
+
|
328 |
+
Available options:
|
329 |
+
|
330 |
+
{opts_list}
|
331 |
+
|
332 |
+
Parameters
|
333 |
+
----------
|
334 |
+
pat : str
|
335 |
+
Regexp which should match a single option.
|
336 |
+
Note: partial matches are supported for convenience, but unless you use the
|
337 |
+
full option name (e.g. x.y.z.option_name), your code may break in future
|
338 |
+
versions if new options with similar names are introduced.
|
339 |
+
value : object
|
340 |
+
New value of option.
|
341 |
+
|
342 |
+
Returns
|
343 |
+
-------
|
344 |
+
None
|
345 |
+
|
346 |
+
Raises
|
347 |
+
------
|
348 |
+
OptionError if no such option exists
|
349 |
+
|
350 |
+
Notes
|
351 |
+
-----
|
352 |
+
Please reference the :ref:`User Guide <options>` for more information.
|
353 |
+
|
354 |
+
The available options with its descriptions:
|
355 |
+
|
356 |
+
{opts_desc}
|
357 |
+
|
358 |
+
Examples
|
359 |
+
--------
|
360 |
+
>>> pd.set_option('display.max_columns', 4)
|
361 |
+
>>> df = pd.DataFrame([[1, 2, 3, 4, 5], [6, 7, 8, 9, 10]])
|
362 |
+
>>> df
|
363 |
+
0 1 ... 3 4
|
364 |
+
0 1 2 ... 4 5
|
365 |
+
1 6 7 ... 9 10
|
366 |
+
[2 rows x 5 columns]
|
367 |
+
>>> pd.reset_option('display.max_columns')
|
368 |
+
"""
|
369 |
+
|
370 |
+
_describe_option_tmpl = """
|
371 |
+
describe_option(pat, _print_desc=False)
|
372 |
+
|
373 |
+
Prints the description for one or more registered options.
|
374 |
+
|
375 |
+
Call with no arguments to get a listing for all registered options.
|
376 |
+
|
377 |
+
Available options:
|
378 |
+
|
379 |
+
{opts_list}
|
380 |
+
|
381 |
+
Parameters
|
382 |
+
----------
|
383 |
+
pat : str
|
384 |
+
Regexp pattern. All matching keys will have their description displayed.
|
385 |
+
_print_desc : bool, default True
|
386 |
+
If True (default) the description(s) will be printed to stdout.
|
387 |
+
Otherwise, the description(s) will be returned as a unicode string
|
388 |
+
(for testing).
|
389 |
+
|
390 |
+
Returns
|
391 |
+
-------
|
392 |
+
None by default, the description(s) as a unicode string if _print_desc
|
393 |
+
is False
|
394 |
+
|
395 |
+
Notes
|
396 |
+
-----
|
397 |
+
Please reference the :ref:`User Guide <options>` for more information.
|
398 |
+
|
399 |
+
The available options with its descriptions:
|
400 |
+
|
401 |
+
{opts_desc}
|
402 |
+
|
403 |
+
Examples
|
404 |
+
--------
|
405 |
+
>>> pd.describe_option('display.max_columns') # doctest: +SKIP
|
406 |
+
display.max_columns : int
|
407 |
+
If max_cols is exceeded, switch to truncate view...
|
408 |
+
"""
|
409 |
+
|
410 |
+
_reset_option_tmpl = """
|
411 |
+
reset_option(pat)
|
412 |
+
|
413 |
+
Reset one or more options to their default value.
|
414 |
+
|
415 |
+
Pass "all" as argument to reset all options.
|
416 |
+
|
417 |
+
Available options:
|
418 |
+
|
419 |
+
{opts_list}
|
420 |
+
|
421 |
+
Parameters
|
422 |
+
----------
|
423 |
+
pat : str/regex
|
424 |
+
If specified only options matching `prefix*` will be reset.
|
425 |
+
Note: partial matches are supported for convenience, but unless you
|
426 |
+
use the full option name (e.g. x.y.z.option_name), your code may break
|
427 |
+
in future versions if new options with similar names are introduced.
|
428 |
+
|
429 |
+
Returns
|
430 |
+
-------
|
431 |
+
None
|
432 |
+
|
433 |
+
Notes
|
434 |
+
-----
|
435 |
+
Please reference the :ref:`User Guide <options>` for more information.
|
436 |
+
|
437 |
+
The available options with its descriptions:
|
438 |
+
|
439 |
+
{opts_desc}
|
440 |
+
|
441 |
+
Examples
|
442 |
+
--------
|
443 |
+
>>> pd.reset_option('display.max_columns') # doctest: +SKIP
|
444 |
+
"""
|
445 |
+
|
446 |
+
# bind the functions with their docstrings into a Callable
|
447 |
+
# and use that as the functions exposed in pd.api
|
448 |
+
get_option = CallableDynamicDoc(_get_option, _get_option_tmpl)
|
449 |
+
set_option = CallableDynamicDoc(_set_option, _set_option_tmpl)
|
450 |
+
reset_option = CallableDynamicDoc(_reset_option, _reset_option_tmpl)
|
451 |
+
describe_option = CallableDynamicDoc(_describe_option, _describe_option_tmpl)
|
452 |
+
options = DictWrapper(_global_config)
|
453 |
+
|
454 |
+
#
|
455 |
+
# Functions for use by pandas developers, in addition to User - api
|
456 |
+
|
457 |
+
|
458 |
+
class option_context(ContextDecorator):
|
459 |
+
"""
|
460 |
+
Context manager to temporarily set options in the `with` statement context.
|
461 |
+
|
462 |
+
You need to invoke as ``option_context(pat, val, [(pat, val), ...])``.
|
463 |
+
|
464 |
+
Examples
|
465 |
+
--------
|
466 |
+
>>> from pandas import option_context
|
467 |
+
>>> with option_context('display.max_rows', 10, 'display.max_columns', 5):
|
468 |
+
... pass
|
469 |
+
"""
|
470 |
+
|
471 |
+
def __init__(self, *args) -> None:
|
472 |
+
if len(args) % 2 != 0 or len(args) < 2:
|
473 |
+
raise ValueError(
|
474 |
+
"Need to invoke as option_context(pat, val, [(pat, val), ...])."
|
475 |
+
)
|
476 |
+
|
477 |
+
self.ops = list(zip(args[::2], args[1::2]))
|
478 |
+
|
479 |
+
def __enter__(self) -> None:
|
480 |
+
self.undo = [(pat, _get_option(pat)) for pat, val in self.ops]
|
481 |
+
|
482 |
+
for pat, val in self.ops:
|
483 |
+
_set_option(pat, val, silent=True)
|
484 |
+
|
485 |
+
def __exit__(self, *args) -> None:
|
486 |
+
if self.undo:
|
487 |
+
for pat, val in self.undo:
|
488 |
+
_set_option(pat, val, silent=True)
|
489 |
+
|
490 |
+
|
491 |
+
def register_option(
|
492 |
+
key: str,
|
493 |
+
defval: object,
|
494 |
+
doc: str = "",
|
495 |
+
validator: Callable[[object], Any] | None = None,
|
496 |
+
cb: Callable[[str], Any] | None = None,
|
497 |
+
) -> None:
|
498 |
+
"""
|
499 |
+
Register an option in the package-wide pandas config object
|
500 |
+
|
501 |
+
Parameters
|
502 |
+
----------
|
503 |
+
key : str
|
504 |
+
Fully-qualified key, e.g. "x.y.option - z".
|
505 |
+
defval : object
|
506 |
+
Default value of the option.
|
507 |
+
doc : str
|
508 |
+
Description of the option.
|
509 |
+
validator : Callable, optional
|
510 |
+
Function of a single argument, should raise `ValueError` if
|
511 |
+
called with a value which is not a legal value for the option.
|
512 |
+
cb
|
513 |
+
a function of a single argument "key", which is called
|
514 |
+
immediately after an option value is set/reset. key is
|
515 |
+
the full name of the option.
|
516 |
+
|
517 |
+
Raises
|
518 |
+
------
|
519 |
+
ValueError if `validator` is specified and `defval` is not a valid value.
|
520 |
+
|
521 |
+
"""
|
522 |
+
import keyword
|
523 |
+
import tokenize
|
524 |
+
|
525 |
+
key = key.lower()
|
526 |
+
|
527 |
+
if key in _registered_options:
|
528 |
+
raise OptionError(f"Option '{key}' has already been registered")
|
529 |
+
if key in _reserved_keys:
|
530 |
+
raise OptionError(f"Option '{key}' is a reserved key")
|
531 |
+
|
532 |
+
# the default value should be legal
|
533 |
+
if validator:
|
534 |
+
validator(defval)
|
535 |
+
|
536 |
+
# walk the nested dict, creating dicts as needed along the path
|
537 |
+
path = key.split(".")
|
538 |
+
|
539 |
+
for k in path:
|
540 |
+
if not re.match("^" + tokenize.Name + "$", k):
|
541 |
+
raise ValueError(f"{k} is not a valid identifier")
|
542 |
+
if keyword.iskeyword(k):
|
543 |
+
raise ValueError(f"{k} is a python keyword")
|
544 |
+
|
545 |
+
cursor = _global_config
|
546 |
+
msg = "Path prefix to option '{option}' is already an option"
|
547 |
+
|
548 |
+
for i, p in enumerate(path[:-1]):
|
549 |
+
if not isinstance(cursor, dict):
|
550 |
+
raise OptionError(msg.format(option=".".join(path[:i])))
|
551 |
+
if p not in cursor:
|
552 |
+
cursor[p] = {}
|
553 |
+
cursor = cursor[p]
|
554 |
+
|
555 |
+
if not isinstance(cursor, dict):
|
556 |
+
raise OptionError(msg.format(option=".".join(path[:-1])))
|
557 |
+
|
558 |
+
cursor[path[-1]] = defval # initialize
|
559 |
+
|
560 |
+
# save the option metadata
|
561 |
+
_registered_options[key] = RegisteredOption(
|
562 |
+
key=key, defval=defval, doc=doc, validator=validator, cb=cb
|
563 |
+
)
|
564 |
+
|
565 |
+
|
566 |
+
def deprecate_option(
|
567 |
+
key: str,
|
568 |
+
msg: str | None = None,
|
569 |
+
rkey: str | None = None,
|
570 |
+
removal_ver: str | None = None,
|
571 |
+
) -> None:
|
572 |
+
"""
|
573 |
+
Mark option `key` as deprecated, if code attempts to access this option,
|
574 |
+
a warning will be produced, using `msg` if given, or a default message
|
575 |
+
if not.
|
576 |
+
if `rkey` is given, any access to the key will be re-routed to `rkey`.
|
577 |
+
|
578 |
+
Neither the existence of `key` nor that if `rkey` is checked. If they
|
579 |
+
do not exist, any subsequence access will fail as usual, after the
|
580 |
+
deprecation warning is given.
|
581 |
+
|
582 |
+
Parameters
|
583 |
+
----------
|
584 |
+
key : str
|
585 |
+
Name of the option to be deprecated.
|
586 |
+
must be a fully-qualified option name (e.g "x.y.z.rkey").
|
587 |
+
msg : str, optional
|
588 |
+
Warning message to output when the key is referenced.
|
589 |
+
if no message is given a default message will be emitted.
|
590 |
+
rkey : str, optional
|
591 |
+
Name of an option to reroute access to.
|
592 |
+
If specified, any referenced `key` will be
|
593 |
+
re-routed to `rkey` including set/get/reset.
|
594 |
+
rkey must be a fully-qualified option name (e.g "x.y.z.rkey").
|
595 |
+
used by the default message if no `msg` is specified.
|
596 |
+
removal_ver : str, optional
|
597 |
+
Specifies the version in which this option will
|
598 |
+
be removed. used by the default message if no `msg` is specified.
|
599 |
+
|
600 |
+
Raises
|
601 |
+
------
|
602 |
+
OptionError
|
603 |
+
If the specified key has already been deprecated.
|
604 |
+
"""
|
605 |
+
key = key.lower()
|
606 |
+
|
607 |
+
if key in _deprecated_options:
|
608 |
+
raise OptionError(f"Option '{key}' has already been defined as deprecated.")
|
609 |
+
|
610 |
+
_deprecated_options[key] = DeprecatedOption(key, msg, rkey, removal_ver)
|
611 |
+
|
612 |
+
|
613 |
+
#
|
614 |
+
# functions internal to the module
|
615 |
+
|
616 |
+
|
617 |
+
def _select_options(pat: str) -> list[str]:
|
618 |
+
"""
|
619 |
+
returns a list of keys matching `pat`
|
620 |
+
|
621 |
+
if pat=="all", returns all registered options
|
622 |
+
"""
|
623 |
+
# short-circuit for exact key
|
624 |
+
if pat in _registered_options:
|
625 |
+
return [pat]
|
626 |
+
|
627 |
+
# else look through all of them
|
628 |
+
keys = sorted(_registered_options.keys())
|
629 |
+
if pat == "all": # reserved key
|
630 |
+
return keys
|
631 |
+
|
632 |
+
return [k for k in keys if re.search(pat, k, re.I)]
|
633 |
+
|
634 |
+
|
635 |
+
def _get_root(key: str) -> tuple[dict[str, Any], str]:
|
636 |
+
path = key.split(".")
|
637 |
+
cursor = _global_config
|
638 |
+
for p in path[:-1]:
|
639 |
+
cursor = cursor[p]
|
640 |
+
return cursor, path[-1]
|
641 |
+
|
642 |
+
|
643 |
+
def _is_deprecated(key: str) -> bool:
|
644 |
+
"""Returns True if the given option has been deprecated"""
|
645 |
+
key = key.lower()
|
646 |
+
return key in _deprecated_options
|
647 |
+
|
648 |
+
|
649 |
+
def _get_deprecated_option(key: str):
|
650 |
+
"""
|
651 |
+
Retrieves the metadata for a deprecated option, if `key` is deprecated.
|
652 |
+
|
653 |
+
Returns
|
654 |
+
-------
|
655 |
+
DeprecatedOption (namedtuple) if key is deprecated, None otherwise
|
656 |
+
"""
|
657 |
+
try:
|
658 |
+
d = _deprecated_options[key]
|
659 |
+
except KeyError:
|
660 |
+
return None
|
661 |
+
else:
|
662 |
+
return d
|
663 |
+
|
664 |
+
|
665 |
+
def _get_registered_option(key: str):
|
666 |
+
"""
|
667 |
+
Retrieves the option metadata if `key` is a registered option.
|
668 |
+
|
669 |
+
Returns
|
670 |
+
-------
|
671 |
+
RegisteredOption (namedtuple) if key is deprecated, None otherwise
|
672 |
+
"""
|
673 |
+
return _registered_options.get(key)
|
674 |
+
|
675 |
+
|
676 |
+
def _translate_key(key: str) -> str:
|
677 |
+
"""
|
678 |
+
if key id deprecated and a replacement key defined, will return the
|
679 |
+
replacement key, otherwise returns `key` as - is
|
680 |
+
"""
|
681 |
+
d = _get_deprecated_option(key)
|
682 |
+
if d:
|
683 |
+
return d.rkey or key
|
684 |
+
else:
|
685 |
+
return key
|
686 |
+
|
687 |
+
|
688 |
+
def _warn_if_deprecated(key: str) -> bool:
|
689 |
+
"""
|
690 |
+
Checks if `key` is a deprecated option and if so, prints a warning.
|
691 |
+
|
692 |
+
Returns
|
693 |
+
-------
|
694 |
+
bool - True if `key` is deprecated, False otherwise.
|
695 |
+
"""
|
696 |
+
d = _get_deprecated_option(key)
|
697 |
+
if d:
|
698 |
+
if d.msg:
|
699 |
+
warnings.warn(
|
700 |
+
d.msg,
|
701 |
+
FutureWarning,
|
702 |
+
stacklevel=find_stack_level(),
|
703 |
+
)
|
704 |
+
else:
|
705 |
+
msg = f"'{key}' is deprecated"
|
706 |
+
if d.removal_ver:
|
707 |
+
msg += f" and will be removed in {d.removal_ver}"
|
708 |
+
if d.rkey:
|
709 |
+
msg += f", please use '{d.rkey}' instead."
|
710 |
+
else:
|
711 |
+
msg += ", please refrain from using it."
|
712 |
+
|
713 |
+
warnings.warn(msg, FutureWarning, stacklevel=find_stack_level())
|
714 |
+
return True
|
715 |
+
return False
|
716 |
+
|
717 |
+
|
718 |
+
def _build_option_description(k: str) -> str:
|
719 |
+
"""Builds a formatted description of a registered option and prints it"""
|
720 |
+
o = _get_registered_option(k)
|
721 |
+
d = _get_deprecated_option(k)
|
722 |
+
|
723 |
+
s = f"{k} "
|
724 |
+
|
725 |
+
if o.doc:
|
726 |
+
s += "\n".join(o.doc.strip().split("\n"))
|
727 |
+
else:
|
728 |
+
s += "No description available."
|
729 |
+
|
730 |
+
if o:
|
731 |
+
s += f"\n [default: {o.defval}] [currently: {_get_option(k, True)}]"
|
732 |
+
|
733 |
+
if d:
|
734 |
+
rkey = d.rkey or ""
|
735 |
+
s += "\n (Deprecated"
|
736 |
+
s += f", use `{rkey}` instead."
|
737 |
+
s += ")"
|
738 |
+
|
739 |
+
return s
|
740 |
+
|
741 |
+
|
742 |
+
def pp_options_list(keys: Iterable[str], width: int = 80, _print: bool = False):
|
743 |
+
"""Builds a concise listing of available options, grouped by prefix"""
|
744 |
+
from itertools import groupby
|
745 |
+
from textwrap import wrap
|
746 |
+
|
747 |
+
def pp(name: str, ks: Iterable[str]) -> list[str]:
|
748 |
+
pfx = "- " + name + ".[" if name else ""
|
749 |
+
ls = wrap(
|
750 |
+
", ".join(ks),
|
751 |
+
width,
|
752 |
+
initial_indent=pfx,
|
753 |
+
subsequent_indent=" ",
|
754 |
+
break_long_words=False,
|
755 |
+
)
|
756 |
+
if ls and ls[-1] and name:
|
757 |
+
ls[-1] = ls[-1] + "]"
|
758 |
+
return ls
|
759 |
+
|
760 |
+
ls: list[str] = []
|
761 |
+
singles = [x for x in sorted(keys) if x.find(".") < 0]
|
762 |
+
if singles:
|
763 |
+
ls += pp("", singles)
|
764 |
+
keys = [x for x in keys if x.find(".") >= 0]
|
765 |
+
|
766 |
+
for k, g in groupby(sorted(keys), lambda x: x[: x.rfind(".")]):
|
767 |
+
ks = [x[len(k) + 1 :] for x in list(g)]
|
768 |
+
ls += pp(k, ks)
|
769 |
+
s = "\n".join(ls)
|
770 |
+
if _print:
|
771 |
+
print(s)
|
772 |
+
else:
|
773 |
+
return s
|
774 |
+
|
775 |
+
|
776 |
+
#
|
777 |
+
# helpers
|
778 |
+
|
779 |
+
|
780 |
+
@contextmanager
|
781 |
+
def config_prefix(prefix: str) -> Generator[None, None, None]:
|
782 |
+
"""
|
783 |
+
contextmanager for multiple invocations of API with a common prefix
|
784 |
+
|
785 |
+
supported API functions: (register / get / set )__option
|
786 |
+
|
787 |
+
Warning: This is not thread - safe, and won't work properly if you import
|
788 |
+
the API functions into your module using the "from x import y" construct.
|
789 |
+
|
790 |
+
Example
|
791 |
+
-------
|
792 |
+
import pandas._config.config as cf
|
793 |
+
with cf.config_prefix("display.font"):
|
794 |
+
cf.register_option("color", "red")
|
795 |
+
cf.register_option("size", " 5 pt")
|
796 |
+
cf.set_option(size, " 6 pt")
|
797 |
+
cf.get_option(size)
|
798 |
+
...
|
799 |
+
|
800 |
+
etc'
|
801 |
+
|
802 |
+
will register options "display.font.color", "display.font.size", set the
|
803 |
+
value of "display.font.size"... and so on.
|
804 |
+
"""
|
805 |
+
# Note: reset_option relies on set_option, and on key directly
|
806 |
+
# it does not fit in to this monkey-patching scheme
|
807 |
+
|
808 |
+
global register_option, get_option, set_option
|
809 |
+
|
810 |
+
def wrap(func: F) -> F:
|
811 |
+
def inner(key: str, *args, **kwds):
|
812 |
+
pkey = f"{prefix}.{key}"
|
813 |
+
return func(pkey, *args, **kwds)
|
814 |
+
|
815 |
+
return cast(F, inner)
|
816 |
+
|
817 |
+
_register_option = register_option
|
818 |
+
_get_option = get_option
|
819 |
+
_set_option = set_option
|
820 |
+
set_option = wrap(set_option)
|
821 |
+
get_option = wrap(get_option)
|
822 |
+
register_option = wrap(register_option)
|
823 |
+
try:
|
824 |
+
yield
|
825 |
+
finally:
|
826 |
+
set_option = _set_option
|
827 |
+
get_option = _get_option
|
828 |
+
register_option = _register_option
|
829 |
+
|
830 |
+
|
831 |
+
# These factories and methods are handy for use as the validator
|
832 |
+
# arg in register_option
|
833 |
+
|
834 |
+
|
835 |
+
def is_type_factory(_type: type[Any]) -> Callable[[Any], None]:
|
836 |
+
"""
|
837 |
+
|
838 |
+
Parameters
|
839 |
+
----------
|
840 |
+
`_type` - a type to be compared against (e.g. type(x) == `_type`)
|
841 |
+
|
842 |
+
Returns
|
843 |
+
-------
|
844 |
+
validator - a function of a single argument x , which raises
|
845 |
+
ValueError if type(x) is not equal to `_type`
|
846 |
+
|
847 |
+
"""
|
848 |
+
|
849 |
+
def inner(x) -> None:
|
850 |
+
if type(x) != _type:
|
851 |
+
raise ValueError(f"Value must have type '{_type}'")
|
852 |
+
|
853 |
+
return inner
|
854 |
+
|
855 |
+
|
856 |
+
def is_instance_factory(_type) -> Callable[[Any], None]:
|
857 |
+
"""
|
858 |
+
|
859 |
+
Parameters
|
860 |
+
----------
|
861 |
+
`_type` - the type to be checked against
|
862 |
+
|
863 |
+
Returns
|
864 |
+
-------
|
865 |
+
validator - a function of a single argument x , which raises
|
866 |
+
ValueError if x is not an instance of `_type`
|
867 |
+
|
868 |
+
"""
|
869 |
+
if isinstance(_type, (tuple, list)):
|
870 |
+
_type = tuple(_type)
|
871 |
+
type_repr = "|".join(map(str, _type))
|
872 |
+
else:
|
873 |
+
type_repr = f"'{_type}'"
|
874 |
+
|
875 |
+
def inner(x) -> None:
|
876 |
+
if not isinstance(x, _type):
|
877 |
+
raise ValueError(f"Value must be an instance of {type_repr}")
|
878 |
+
|
879 |
+
return inner
|
880 |
+
|
881 |
+
|
882 |
+
def is_one_of_factory(legal_values) -> Callable[[Any], None]:
|
883 |
+
callables = [c for c in legal_values if callable(c)]
|
884 |
+
legal_values = [c for c in legal_values if not callable(c)]
|
885 |
+
|
886 |
+
def inner(x) -> None:
|
887 |
+
if x not in legal_values:
|
888 |
+
if not any(c(x) for c in callables):
|
889 |
+
uvals = [str(lval) for lval in legal_values]
|
890 |
+
pp_values = "|".join(uvals)
|
891 |
+
msg = f"Value must be one of {pp_values}"
|
892 |
+
if len(callables):
|
893 |
+
msg += " or a callable"
|
894 |
+
raise ValueError(msg)
|
895 |
+
|
896 |
+
return inner
|
897 |
+
|
898 |
+
|
899 |
+
def is_nonnegative_int(value: object) -> None:
|
900 |
+
"""
|
901 |
+
Verify that value is None or a positive int.
|
902 |
+
|
903 |
+
Parameters
|
904 |
+
----------
|
905 |
+
value : None or int
|
906 |
+
The `value` to be checked.
|
907 |
+
|
908 |
+
Raises
|
909 |
+
------
|
910 |
+
ValueError
|
911 |
+
When the value is not None or is a negative integer
|
912 |
+
"""
|
913 |
+
if value is None:
|
914 |
+
return
|
915 |
+
|
916 |
+
elif isinstance(value, int):
|
917 |
+
if value >= 0:
|
918 |
+
return
|
919 |
+
|
920 |
+
msg = "Value must be a nonnegative integer or None"
|
921 |
+
raise ValueError(msg)
|
922 |
+
|
923 |
+
|
924 |
+
# common type validators, for convenience
|
925 |
+
# usage: register_option(... , validator = is_int)
|
926 |
+
is_int = is_type_factory(int)
|
927 |
+
is_bool = is_type_factory(bool)
|
928 |
+
is_float = is_type_factory(float)
|
929 |
+
is_str = is_type_factory(str)
|
930 |
+
is_text = is_instance_factory((str, bytes))
|
931 |
+
|
932 |
+
|
933 |
+
def is_callable(obj) -> bool:
|
934 |
+
"""
|
935 |
+
|
936 |
+
Parameters
|
937 |
+
----------
|
938 |
+
`obj` - the object to be checked
|
939 |
+
|
940 |
+
Returns
|
941 |
+
-------
|
942 |
+
validator - returns True if object is callable
|
943 |
+
raises ValueError otherwise.
|
944 |
+
|
945 |
+
"""
|
946 |
+
if not callable(obj):
|
947 |
+
raise ValueError("Value must be a callable")
|
948 |
+
return True
|
env-llmeval/lib/python3.10/site-packages/pandas/_config/dates.py
ADDED
@@ -0,0 +1,25 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
"""
|
2 |
+
config for datetime formatting
|
3 |
+
"""
|
4 |
+
from __future__ import annotations
|
5 |
+
|
6 |
+
from pandas._config import config as cf
|
7 |
+
|
8 |
+
pc_date_dayfirst_doc = """
|
9 |
+
: boolean
|
10 |
+
When True, prints and parses dates with the day first, eg 20/01/2005
|
11 |
+
"""
|
12 |
+
|
13 |
+
pc_date_yearfirst_doc = """
|
14 |
+
: boolean
|
15 |
+
When True, prints and parses dates with the year first, eg 2005/01/20
|
16 |
+
"""
|
17 |
+
|
18 |
+
with cf.config_prefix("display"):
|
19 |
+
# Needed upstream of `_libs` because these are used in tslibs.parsing
|
20 |
+
cf.register_option(
|
21 |
+
"date_dayfirst", False, pc_date_dayfirst_doc, validator=cf.is_bool
|
22 |
+
)
|
23 |
+
cf.register_option(
|
24 |
+
"date_yearfirst", False, pc_date_yearfirst_doc, validator=cf.is_bool
|
25 |
+
)
|
env-llmeval/lib/python3.10/site-packages/pandas/_config/display.py
ADDED
@@ -0,0 +1,62 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
"""
|
2 |
+
Unopinionated display configuration.
|
3 |
+
"""
|
4 |
+
|
5 |
+
from __future__ import annotations
|
6 |
+
|
7 |
+
import locale
|
8 |
+
import sys
|
9 |
+
|
10 |
+
from pandas._config import config as cf
|
11 |
+
|
12 |
+
# -----------------------------------------------------------------------------
|
13 |
+
# Global formatting options
|
14 |
+
_initial_defencoding: str | None = None
|
15 |
+
|
16 |
+
|
17 |
+
def detect_console_encoding() -> str:
|
18 |
+
"""
|
19 |
+
Try to find the most capable encoding supported by the console.
|
20 |
+
slightly modified from the way IPython handles the same issue.
|
21 |
+
"""
|
22 |
+
global _initial_defencoding
|
23 |
+
|
24 |
+
encoding = None
|
25 |
+
try:
|
26 |
+
encoding = sys.stdout.encoding or sys.stdin.encoding
|
27 |
+
except (AttributeError, OSError):
|
28 |
+
pass
|
29 |
+
|
30 |
+
# try again for something better
|
31 |
+
if not encoding or "ascii" in encoding.lower():
|
32 |
+
try:
|
33 |
+
encoding = locale.getpreferredencoding()
|
34 |
+
except locale.Error:
|
35 |
+
# can be raised by locale.setlocale(), which is
|
36 |
+
# called by getpreferredencoding
|
37 |
+
# (on some systems, see stdlib locale docs)
|
38 |
+
pass
|
39 |
+
|
40 |
+
# when all else fails. this will usually be "ascii"
|
41 |
+
if not encoding or "ascii" in encoding.lower():
|
42 |
+
encoding = sys.getdefaultencoding()
|
43 |
+
|
44 |
+
# GH#3360, save the reported defencoding at import time
|
45 |
+
# MPL backends may change it. Make available for debugging.
|
46 |
+
if not _initial_defencoding:
|
47 |
+
_initial_defencoding = sys.getdefaultencoding()
|
48 |
+
|
49 |
+
return encoding
|
50 |
+
|
51 |
+
|
52 |
+
pc_encoding_doc = """
|
53 |
+
: str/unicode
|
54 |
+
Defaults to the detected encoding of the console.
|
55 |
+
Specifies the encoding to be used for strings returned by to_string,
|
56 |
+
these are generally strings meant to be displayed on the console.
|
57 |
+
"""
|
58 |
+
|
59 |
+
with cf.config_prefix("display"):
|
60 |
+
cf.register_option(
|
61 |
+
"encoding", detect_console_encoding(), pc_encoding_doc, validator=cf.is_text
|
62 |
+
)
|
env-llmeval/lib/python3.10/site-packages/pandas/core/__init__.py
ADDED
File without changes
|
env-llmeval/lib/python3.10/site-packages/pandas/core/accessor.py
ADDED
@@ -0,0 +1,340 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
"""
|
2 |
+
|
3 |
+
accessor.py contains base classes for implementing accessor properties
|
4 |
+
that can be mixed into or pinned onto other pandas classes.
|
5 |
+
|
6 |
+
"""
|
7 |
+
from __future__ import annotations
|
8 |
+
|
9 |
+
from typing import (
|
10 |
+
Callable,
|
11 |
+
final,
|
12 |
+
)
|
13 |
+
import warnings
|
14 |
+
|
15 |
+
from pandas.util._decorators import doc
|
16 |
+
from pandas.util._exceptions import find_stack_level
|
17 |
+
|
18 |
+
|
19 |
+
class DirNamesMixin:
|
20 |
+
_accessors: set[str] = set()
|
21 |
+
_hidden_attrs: frozenset[str] = frozenset()
|
22 |
+
|
23 |
+
@final
|
24 |
+
def _dir_deletions(self) -> set[str]:
|
25 |
+
"""
|
26 |
+
Delete unwanted __dir__ for this object.
|
27 |
+
"""
|
28 |
+
return self._accessors | self._hidden_attrs
|
29 |
+
|
30 |
+
def _dir_additions(self) -> set[str]:
|
31 |
+
"""
|
32 |
+
Add additional __dir__ for this object.
|
33 |
+
"""
|
34 |
+
return {accessor for accessor in self._accessors if hasattr(self, accessor)}
|
35 |
+
|
36 |
+
def __dir__(self) -> list[str]:
|
37 |
+
"""
|
38 |
+
Provide method name lookup and completion.
|
39 |
+
|
40 |
+
Notes
|
41 |
+
-----
|
42 |
+
Only provide 'public' methods.
|
43 |
+
"""
|
44 |
+
rv = set(super().__dir__())
|
45 |
+
rv = (rv - self._dir_deletions()) | self._dir_additions()
|
46 |
+
return sorted(rv)
|
47 |
+
|
48 |
+
|
49 |
+
class PandasDelegate:
|
50 |
+
"""
|
51 |
+
Abstract base class for delegating methods/properties.
|
52 |
+
"""
|
53 |
+
|
54 |
+
def _delegate_property_get(self, name: str, *args, **kwargs):
|
55 |
+
raise TypeError(f"You cannot access the property {name}")
|
56 |
+
|
57 |
+
def _delegate_property_set(self, name: str, value, *args, **kwargs):
|
58 |
+
raise TypeError(f"The property {name} cannot be set")
|
59 |
+
|
60 |
+
def _delegate_method(self, name: str, *args, **kwargs):
|
61 |
+
raise TypeError(f"You cannot call method {name}")
|
62 |
+
|
63 |
+
@classmethod
|
64 |
+
def _add_delegate_accessors(
|
65 |
+
cls,
|
66 |
+
delegate,
|
67 |
+
accessors: list[str],
|
68 |
+
typ: str,
|
69 |
+
overwrite: bool = False,
|
70 |
+
accessor_mapping: Callable[[str], str] = lambda x: x,
|
71 |
+
raise_on_missing: bool = True,
|
72 |
+
) -> None:
|
73 |
+
"""
|
74 |
+
Add accessors to cls from the delegate class.
|
75 |
+
|
76 |
+
Parameters
|
77 |
+
----------
|
78 |
+
cls
|
79 |
+
Class to add the methods/properties to.
|
80 |
+
delegate
|
81 |
+
Class to get methods/properties and doc-strings.
|
82 |
+
accessors : list of str
|
83 |
+
List of accessors to add.
|
84 |
+
typ : {'property', 'method'}
|
85 |
+
overwrite : bool, default False
|
86 |
+
Overwrite the method/property in the target class if it exists.
|
87 |
+
accessor_mapping: Callable, default lambda x: x
|
88 |
+
Callable to map the delegate's function to the cls' function.
|
89 |
+
raise_on_missing: bool, default True
|
90 |
+
Raise if an accessor does not exist on delegate.
|
91 |
+
False skips the missing accessor.
|
92 |
+
"""
|
93 |
+
|
94 |
+
def _create_delegator_property(name: str):
|
95 |
+
def _getter(self):
|
96 |
+
return self._delegate_property_get(name)
|
97 |
+
|
98 |
+
def _setter(self, new_values):
|
99 |
+
return self._delegate_property_set(name, new_values)
|
100 |
+
|
101 |
+
_getter.__name__ = name
|
102 |
+
_setter.__name__ = name
|
103 |
+
|
104 |
+
return property(
|
105 |
+
fget=_getter,
|
106 |
+
fset=_setter,
|
107 |
+
doc=getattr(delegate, accessor_mapping(name)).__doc__,
|
108 |
+
)
|
109 |
+
|
110 |
+
def _create_delegator_method(name: str):
|
111 |
+
def f(self, *args, **kwargs):
|
112 |
+
return self._delegate_method(name, *args, **kwargs)
|
113 |
+
|
114 |
+
f.__name__ = name
|
115 |
+
f.__doc__ = getattr(delegate, accessor_mapping(name)).__doc__
|
116 |
+
|
117 |
+
return f
|
118 |
+
|
119 |
+
for name in accessors:
|
120 |
+
if (
|
121 |
+
not raise_on_missing
|
122 |
+
and getattr(delegate, accessor_mapping(name), None) is None
|
123 |
+
):
|
124 |
+
continue
|
125 |
+
|
126 |
+
if typ == "property":
|
127 |
+
f = _create_delegator_property(name)
|
128 |
+
else:
|
129 |
+
f = _create_delegator_method(name)
|
130 |
+
|
131 |
+
# don't overwrite existing methods/properties
|
132 |
+
if overwrite or not hasattr(cls, name):
|
133 |
+
setattr(cls, name, f)
|
134 |
+
|
135 |
+
|
136 |
+
def delegate_names(
|
137 |
+
delegate,
|
138 |
+
accessors: list[str],
|
139 |
+
typ: str,
|
140 |
+
overwrite: bool = False,
|
141 |
+
accessor_mapping: Callable[[str], str] = lambda x: x,
|
142 |
+
raise_on_missing: bool = True,
|
143 |
+
):
|
144 |
+
"""
|
145 |
+
Add delegated names to a class using a class decorator. This provides
|
146 |
+
an alternative usage to directly calling `_add_delegate_accessors`
|
147 |
+
below a class definition.
|
148 |
+
|
149 |
+
Parameters
|
150 |
+
----------
|
151 |
+
delegate : object
|
152 |
+
The class to get methods/properties & doc-strings.
|
153 |
+
accessors : Sequence[str]
|
154 |
+
List of accessor to add.
|
155 |
+
typ : {'property', 'method'}
|
156 |
+
overwrite : bool, default False
|
157 |
+
Overwrite the method/property in the target class if it exists.
|
158 |
+
accessor_mapping: Callable, default lambda x: x
|
159 |
+
Callable to map the delegate's function to the cls' function.
|
160 |
+
raise_on_missing: bool, default True
|
161 |
+
Raise if an accessor does not exist on delegate.
|
162 |
+
False skips the missing accessor.
|
163 |
+
|
164 |
+
Returns
|
165 |
+
-------
|
166 |
+
callable
|
167 |
+
A class decorator.
|
168 |
+
|
169 |
+
Examples
|
170 |
+
--------
|
171 |
+
@delegate_names(Categorical, ["categories", "ordered"], "property")
|
172 |
+
class CategoricalAccessor(PandasDelegate):
|
173 |
+
[...]
|
174 |
+
"""
|
175 |
+
|
176 |
+
def add_delegate_accessors(cls):
|
177 |
+
cls._add_delegate_accessors(
|
178 |
+
delegate,
|
179 |
+
accessors,
|
180 |
+
typ,
|
181 |
+
overwrite=overwrite,
|
182 |
+
accessor_mapping=accessor_mapping,
|
183 |
+
raise_on_missing=raise_on_missing,
|
184 |
+
)
|
185 |
+
return cls
|
186 |
+
|
187 |
+
return add_delegate_accessors
|
188 |
+
|
189 |
+
|
190 |
+
# Ported with modifications from xarray; licence at LICENSES/XARRAY_LICENSE
|
191 |
+
# https://github.com/pydata/xarray/blob/master/xarray/core/extensions.py
|
192 |
+
# 1. We don't need to catch and re-raise AttributeErrors as RuntimeErrors
|
193 |
+
# 2. We use a UserWarning instead of a custom Warning
|
194 |
+
|
195 |
+
|
196 |
+
class CachedAccessor:
|
197 |
+
"""
|
198 |
+
Custom property-like object.
|
199 |
+
|
200 |
+
A descriptor for caching accessors.
|
201 |
+
|
202 |
+
Parameters
|
203 |
+
----------
|
204 |
+
name : str
|
205 |
+
Namespace that will be accessed under, e.g. ``df.foo``.
|
206 |
+
accessor : cls
|
207 |
+
Class with the extension methods.
|
208 |
+
|
209 |
+
Notes
|
210 |
+
-----
|
211 |
+
For accessor, The class's __init__ method assumes that one of
|
212 |
+
``Series``, ``DataFrame`` or ``Index`` as the
|
213 |
+
single argument ``data``.
|
214 |
+
"""
|
215 |
+
|
216 |
+
def __init__(self, name: str, accessor) -> None:
|
217 |
+
self._name = name
|
218 |
+
self._accessor = accessor
|
219 |
+
|
220 |
+
def __get__(self, obj, cls):
|
221 |
+
if obj is None:
|
222 |
+
# we're accessing the attribute of the class, i.e., Dataset.geo
|
223 |
+
return self._accessor
|
224 |
+
accessor_obj = self._accessor(obj)
|
225 |
+
# Replace the property with the accessor object. Inspired by:
|
226 |
+
# https://www.pydanny.com/cached-property.html
|
227 |
+
# We need to use object.__setattr__ because we overwrite __setattr__ on
|
228 |
+
# NDFrame
|
229 |
+
object.__setattr__(obj, self._name, accessor_obj)
|
230 |
+
return accessor_obj
|
231 |
+
|
232 |
+
|
233 |
+
@doc(klass="", others="")
|
234 |
+
def _register_accessor(name: str, cls):
|
235 |
+
"""
|
236 |
+
Register a custom accessor on {klass} objects.
|
237 |
+
|
238 |
+
Parameters
|
239 |
+
----------
|
240 |
+
name : str
|
241 |
+
Name under which the accessor should be registered. A warning is issued
|
242 |
+
if this name conflicts with a preexisting attribute.
|
243 |
+
|
244 |
+
Returns
|
245 |
+
-------
|
246 |
+
callable
|
247 |
+
A class decorator.
|
248 |
+
|
249 |
+
See Also
|
250 |
+
--------
|
251 |
+
register_dataframe_accessor : Register a custom accessor on DataFrame objects.
|
252 |
+
register_series_accessor : Register a custom accessor on Series objects.
|
253 |
+
register_index_accessor : Register a custom accessor on Index objects.
|
254 |
+
|
255 |
+
Notes
|
256 |
+
-----
|
257 |
+
When accessed, your accessor will be initialized with the pandas object
|
258 |
+
the user is interacting with. So the signature must be
|
259 |
+
|
260 |
+
.. code-block:: python
|
261 |
+
|
262 |
+
def __init__(self, pandas_object): # noqa: E999
|
263 |
+
...
|
264 |
+
|
265 |
+
For consistency with pandas methods, you should raise an ``AttributeError``
|
266 |
+
if the data passed to your accessor has an incorrect dtype.
|
267 |
+
|
268 |
+
>>> pd.Series(['a', 'b']).dt
|
269 |
+
Traceback (most recent call last):
|
270 |
+
...
|
271 |
+
AttributeError: Can only use .dt accessor with datetimelike values
|
272 |
+
|
273 |
+
Examples
|
274 |
+
--------
|
275 |
+
In your library code::
|
276 |
+
|
277 |
+
import pandas as pd
|
278 |
+
|
279 |
+
@pd.api.extensions.register_dataframe_accessor("geo")
|
280 |
+
class GeoAccessor:
|
281 |
+
def __init__(self, pandas_obj):
|
282 |
+
self._obj = pandas_obj
|
283 |
+
|
284 |
+
@property
|
285 |
+
def center(self):
|
286 |
+
# return the geographic center point of this DataFrame
|
287 |
+
lat = self._obj.latitude
|
288 |
+
lon = self._obj.longitude
|
289 |
+
return (float(lon.mean()), float(lat.mean()))
|
290 |
+
|
291 |
+
def plot(self):
|
292 |
+
# plot this array's data on a map, e.g., using Cartopy
|
293 |
+
pass
|
294 |
+
|
295 |
+
Back in an interactive IPython session:
|
296 |
+
|
297 |
+
.. code-block:: ipython
|
298 |
+
|
299 |
+
In [1]: ds = pd.DataFrame({{"longitude": np.linspace(0, 10),
|
300 |
+
...: "latitude": np.linspace(0, 20)}})
|
301 |
+
In [2]: ds.geo.center
|
302 |
+
Out[2]: (5.0, 10.0)
|
303 |
+
In [3]: ds.geo.plot() # plots data on a map
|
304 |
+
"""
|
305 |
+
|
306 |
+
def decorator(accessor):
|
307 |
+
if hasattr(cls, name):
|
308 |
+
warnings.warn(
|
309 |
+
f"registration of accessor {repr(accessor)} under name "
|
310 |
+
f"{repr(name)} for type {repr(cls)} is overriding a preexisting "
|
311 |
+
f"attribute with the same name.",
|
312 |
+
UserWarning,
|
313 |
+
stacklevel=find_stack_level(),
|
314 |
+
)
|
315 |
+
setattr(cls, name, CachedAccessor(name, accessor))
|
316 |
+
cls._accessors.add(name)
|
317 |
+
return accessor
|
318 |
+
|
319 |
+
return decorator
|
320 |
+
|
321 |
+
|
322 |
+
@doc(_register_accessor, klass="DataFrame")
|
323 |
+
def register_dataframe_accessor(name: str):
|
324 |
+
from pandas import DataFrame
|
325 |
+
|
326 |
+
return _register_accessor(name, DataFrame)
|
327 |
+
|
328 |
+
|
329 |
+
@doc(_register_accessor, klass="Series")
|
330 |
+
def register_series_accessor(name: str):
|
331 |
+
from pandas import Series
|
332 |
+
|
333 |
+
return _register_accessor(name, Series)
|
334 |
+
|
335 |
+
|
336 |
+
@doc(_register_accessor, klass="Index")
|
337 |
+
def register_index_accessor(name: str):
|
338 |
+
from pandas import Index
|
339 |
+
|
340 |
+
return _register_accessor(name, Index)
|
env-llmeval/lib/python3.10/site-packages/pandas/core/algorithms.py
ADDED
@@ -0,0 +1,1747 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
"""
|
2 |
+
Generic data algorithms. This module is experimental at the moment and not
|
3 |
+
intended for public consumption
|
4 |
+
"""
|
5 |
+
from __future__ import annotations
|
6 |
+
|
7 |
+
import decimal
|
8 |
+
import operator
|
9 |
+
from textwrap import dedent
|
10 |
+
from typing import (
|
11 |
+
TYPE_CHECKING,
|
12 |
+
Literal,
|
13 |
+
cast,
|
14 |
+
)
|
15 |
+
import warnings
|
16 |
+
|
17 |
+
import numpy as np
|
18 |
+
|
19 |
+
from pandas._libs import (
|
20 |
+
algos,
|
21 |
+
hashtable as htable,
|
22 |
+
iNaT,
|
23 |
+
lib,
|
24 |
+
)
|
25 |
+
from pandas._typing import (
|
26 |
+
AnyArrayLike,
|
27 |
+
ArrayLike,
|
28 |
+
AxisInt,
|
29 |
+
DtypeObj,
|
30 |
+
TakeIndexer,
|
31 |
+
npt,
|
32 |
+
)
|
33 |
+
from pandas.util._decorators import doc
|
34 |
+
from pandas.util._exceptions import find_stack_level
|
35 |
+
|
36 |
+
from pandas.core.dtypes.cast import (
|
37 |
+
construct_1d_object_array_from_listlike,
|
38 |
+
np_find_common_type,
|
39 |
+
)
|
40 |
+
from pandas.core.dtypes.common import (
|
41 |
+
ensure_float64,
|
42 |
+
ensure_object,
|
43 |
+
ensure_platform_int,
|
44 |
+
is_array_like,
|
45 |
+
is_bool_dtype,
|
46 |
+
is_complex_dtype,
|
47 |
+
is_dict_like,
|
48 |
+
is_extension_array_dtype,
|
49 |
+
is_float_dtype,
|
50 |
+
is_integer,
|
51 |
+
is_integer_dtype,
|
52 |
+
is_list_like,
|
53 |
+
is_object_dtype,
|
54 |
+
is_signed_integer_dtype,
|
55 |
+
needs_i8_conversion,
|
56 |
+
)
|
57 |
+
from pandas.core.dtypes.concat import concat_compat
|
58 |
+
from pandas.core.dtypes.dtypes import (
|
59 |
+
BaseMaskedDtype,
|
60 |
+
CategoricalDtype,
|
61 |
+
ExtensionDtype,
|
62 |
+
NumpyEADtype,
|
63 |
+
)
|
64 |
+
from pandas.core.dtypes.generic import (
|
65 |
+
ABCDatetimeArray,
|
66 |
+
ABCExtensionArray,
|
67 |
+
ABCIndex,
|
68 |
+
ABCMultiIndex,
|
69 |
+
ABCSeries,
|
70 |
+
ABCTimedeltaArray,
|
71 |
+
)
|
72 |
+
from pandas.core.dtypes.missing import (
|
73 |
+
isna,
|
74 |
+
na_value_for_dtype,
|
75 |
+
)
|
76 |
+
|
77 |
+
from pandas.core.array_algos.take import take_nd
|
78 |
+
from pandas.core.construction import (
|
79 |
+
array as pd_array,
|
80 |
+
ensure_wrapped_if_datetimelike,
|
81 |
+
extract_array,
|
82 |
+
)
|
83 |
+
from pandas.core.indexers import validate_indices
|
84 |
+
|
85 |
+
if TYPE_CHECKING:
|
86 |
+
from pandas._typing import (
|
87 |
+
ListLike,
|
88 |
+
NumpySorter,
|
89 |
+
NumpyValueArrayLike,
|
90 |
+
)
|
91 |
+
|
92 |
+
from pandas import (
|
93 |
+
Categorical,
|
94 |
+
Index,
|
95 |
+
Series,
|
96 |
+
)
|
97 |
+
from pandas.core.arrays import (
|
98 |
+
BaseMaskedArray,
|
99 |
+
ExtensionArray,
|
100 |
+
)
|
101 |
+
|
102 |
+
|
103 |
+
# --------------- #
|
104 |
+
# dtype access #
|
105 |
+
# --------------- #
|
106 |
+
def _ensure_data(values: ArrayLike) -> np.ndarray:
|
107 |
+
"""
|
108 |
+
routine to ensure that our data is of the correct
|
109 |
+
input dtype for lower-level routines
|
110 |
+
|
111 |
+
This will coerce:
|
112 |
+
- ints -> int64
|
113 |
+
- uint -> uint64
|
114 |
+
- bool -> uint8
|
115 |
+
- datetimelike -> i8
|
116 |
+
- datetime64tz -> i8 (in local tz)
|
117 |
+
- categorical -> codes
|
118 |
+
|
119 |
+
Parameters
|
120 |
+
----------
|
121 |
+
values : np.ndarray or ExtensionArray
|
122 |
+
|
123 |
+
Returns
|
124 |
+
-------
|
125 |
+
np.ndarray
|
126 |
+
"""
|
127 |
+
|
128 |
+
if not isinstance(values, ABCMultiIndex):
|
129 |
+
# extract_array would raise
|
130 |
+
values = extract_array(values, extract_numpy=True)
|
131 |
+
|
132 |
+
if is_object_dtype(values.dtype):
|
133 |
+
return ensure_object(np.asarray(values))
|
134 |
+
|
135 |
+
elif isinstance(values.dtype, BaseMaskedDtype):
|
136 |
+
# i.e. BooleanArray, FloatingArray, IntegerArray
|
137 |
+
values = cast("BaseMaskedArray", values)
|
138 |
+
if not values._hasna:
|
139 |
+
# No pd.NAs -> We can avoid an object-dtype cast (and copy) GH#41816
|
140 |
+
# recurse to avoid re-implementing logic for eg bool->uint8
|
141 |
+
return _ensure_data(values._data)
|
142 |
+
return np.asarray(values)
|
143 |
+
|
144 |
+
elif isinstance(values.dtype, CategoricalDtype):
|
145 |
+
# NB: cases that go through here should NOT be using _reconstruct_data
|
146 |
+
# on the back-end.
|
147 |
+
values = cast("Categorical", values)
|
148 |
+
return values.codes
|
149 |
+
|
150 |
+
elif is_bool_dtype(values.dtype):
|
151 |
+
if isinstance(values, np.ndarray):
|
152 |
+
# i.e. actually dtype == np.dtype("bool")
|
153 |
+
return np.asarray(values).view("uint8")
|
154 |
+
else:
|
155 |
+
# e.g. Sparse[bool, False] # TODO: no test cases get here
|
156 |
+
return np.asarray(values).astype("uint8", copy=False)
|
157 |
+
|
158 |
+
elif is_integer_dtype(values.dtype):
|
159 |
+
return np.asarray(values)
|
160 |
+
|
161 |
+
elif is_float_dtype(values.dtype):
|
162 |
+
# Note: checking `values.dtype == "float128"` raises on Windows and 32bit
|
163 |
+
# error: Item "ExtensionDtype" of "Union[Any, ExtensionDtype, dtype[Any]]"
|
164 |
+
# has no attribute "itemsize"
|
165 |
+
if values.dtype.itemsize in [2, 12, 16]: # type: ignore[union-attr]
|
166 |
+
# we dont (yet) have float128 hashtable support
|
167 |
+
return ensure_float64(values)
|
168 |
+
return np.asarray(values)
|
169 |
+
|
170 |
+
elif is_complex_dtype(values.dtype):
|
171 |
+
return cast(np.ndarray, values)
|
172 |
+
|
173 |
+
# datetimelike
|
174 |
+
elif needs_i8_conversion(values.dtype):
|
175 |
+
npvalues = values.view("i8")
|
176 |
+
npvalues = cast(np.ndarray, npvalues)
|
177 |
+
return npvalues
|
178 |
+
|
179 |
+
# we have failed, return object
|
180 |
+
values = np.asarray(values, dtype=object)
|
181 |
+
return ensure_object(values)
|
182 |
+
|
183 |
+
|
184 |
+
def _reconstruct_data(
|
185 |
+
values: ArrayLike, dtype: DtypeObj, original: AnyArrayLike
|
186 |
+
) -> ArrayLike:
|
187 |
+
"""
|
188 |
+
reverse of _ensure_data
|
189 |
+
|
190 |
+
Parameters
|
191 |
+
----------
|
192 |
+
values : np.ndarray or ExtensionArray
|
193 |
+
dtype : np.dtype or ExtensionDtype
|
194 |
+
original : AnyArrayLike
|
195 |
+
|
196 |
+
Returns
|
197 |
+
-------
|
198 |
+
ExtensionArray or np.ndarray
|
199 |
+
"""
|
200 |
+
if isinstance(values, ABCExtensionArray) and values.dtype == dtype:
|
201 |
+
# Catch DatetimeArray/TimedeltaArray
|
202 |
+
return values
|
203 |
+
|
204 |
+
if not isinstance(dtype, np.dtype):
|
205 |
+
# i.e. ExtensionDtype; note we have ruled out above the possibility
|
206 |
+
# that values.dtype == dtype
|
207 |
+
cls = dtype.construct_array_type()
|
208 |
+
|
209 |
+
values = cls._from_sequence(values, dtype=dtype)
|
210 |
+
|
211 |
+
else:
|
212 |
+
values = values.astype(dtype, copy=False)
|
213 |
+
|
214 |
+
return values
|
215 |
+
|
216 |
+
|
217 |
+
def _ensure_arraylike(values, func_name: str) -> ArrayLike:
|
218 |
+
"""
|
219 |
+
ensure that we are arraylike if not already
|
220 |
+
"""
|
221 |
+
if not isinstance(values, (ABCIndex, ABCSeries, ABCExtensionArray, np.ndarray)):
|
222 |
+
# GH#52986
|
223 |
+
if func_name != "isin-targets":
|
224 |
+
# Make an exception for the comps argument in isin.
|
225 |
+
warnings.warn(
|
226 |
+
f"{func_name} with argument that is not not a Series, Index, "
|
227 |
+
"ExtensionArray, or np.ndarray is deprecated and will raise in a "
|
228 |
+
"future version.",
|
229 |
+
FutureWarning,
|
230 |
+
stacklevel=find_stack_level(),
|
231 |
+
)
|
232 |
+
|
233 |
+
inferred = lib.infer_dtype(values, skipna=False)
|
234 |
+
if inferred in ["mixed", "string", "mixed-integer"]:
|
235 |
+
# "mixed-integer" to ensure we do not cast ["ss", 42] to str GH#22160
|
236 |
+
if isinstance(values, tuple):
|
237 |
+
values = list(values)
|
238 |
+
values = construct_1d_object_array_from_listlike(values)
|
239 |
+
else:
|
240 |
+
values = np.asarray(values)
|
241 |
+
return values
|
242 |
+
|
243 |
+
|
244 |
+
_hashtables = {
|
245 |
+
"complex128": htable.Complex128HashTable,
|
246 |
+
"complex64": htable.Complex64HashTable,
|
247 |
+
"float64": htable.Float64HashTable,
|
248 |
+
"float32": htable.Float32HashTable,
|
249 |
+
"uint64": htable.UInt64HashTable,
|
250 |
+
"uint32": htable.UInt32HashTable,
|
251 |
+
"uint16": htable.UInt16HashTable,
|
252 |
+
"uint8": htable.UInt8HashTable,
|
253 |
+
"int64": htable.Int64HashTable,
|
254 |
+
"int32": htable.Int32HashTable,
|
255 |
+
"int16": htable.Int16HashTable,
|
256 |
+
"int8": htable.Int8HashTable,
|
257 |
+
"string": htable.StringHashTable,
|
258 |
+
"object": htable.PyObjectHashTable,
|
259 |
+
}
|
260 |
+
|
261 |
+
|
262 |
+
def _get_hashtable_algo(values: np.ndarray):
|
263 |
+
"""
|
264 |
+
Parameters
|
265 |
+
----------
|
266 |
+
values : np.ndarray
|
267 |
+
|
268 |
+
Returns
|
269 |
+
-------
|
270 |
+
htable : HashTable subclass
|
271 |
+
values : ndarray
|
272 |
+
"""
|
273 |
+
values = _ensure_data(values)
|
274 |
+
|
275 |
+
ndtype = _check_object_for_strings(values)
|
276 |
+
hashtable = _hashtables[ndtype]
|
277 |
+
return hashtable, values
|
278 |
+
|
279 |
+
|
280 |
+
def _check_object_for_strings(values: np.ndarray) -> str:
|
281 |
+
"""
|
282 |
+
Check if we can use string hashtable instead of object hashtable.
|
283 |
+
|
284 |
+
Parameters
|
285 |
+
----------
|
286 |
+
values : ndarray
|
287 |
+
|
288 |
+
Returns
|
289 |
+
-------
|
290 |
+
str
|
291 |
+
"""
|
292 |
+
ndtype = values.dtype.name
|
293 |
+
if ndtype == "object":
|
294 |
+
# it's cheaper to use a String Hash Table than Object; we infer
|
295 |
+
# including nulls because that is the only difference between
|
296 |
+
# StringHashTable and ObjectHashtable
|
297 |
+
if lib.is_string_array(values, skipna=False):
|
298 |
+
ndtype = "string"
|
299 |
+
return ndtype
|
300 |
+
|
301 |
+
|
302 |
+
# --------------- #
|
303 |
+
# top-level algos #
|
304 |
+
# --------------- #
|
305 |
+
|
306 |
+
|
307 |
+
def unique(values):
|
308 |
+
"""
|
309 |
+
Return unique values based on a hash table.
|
310 |
+
|
311 |
+
Uniques are returned in order of appearance. This does NOT sort.
|
312 |
+
|
313 |
+
Significantly faster than numpy.unique for long enough sequences.
|
314 |
+
Includes NA values.
|
315 |
+
|
316 |
+
Parameters
|
317 |
+
----------
|
318 |
+
values : 1d array-like
|
319 |
+
|
320 |
+
Returns
|
321 |
+
-------
|
322 |
+
numpy.ndarray or ExtensionArray
|
323 |
+
|
324 |
+
The return can be:
|
325 |
+
|
326 |
+
* Index : when the input is an Index
|
327 |
+
* Categorical : when the input is a Categorical dtype
|
328 |
+
* ndarray : when the input is a Series/ndarray
|
329 |
+
|
330 |
+
Return numpy.ndarray or ExtensionArray.
|
331 |
+
|
332 |
+
See Also
|
333 |
+
--------
|
334 |
+
Index.unique : Return unique values from an Index.
|
335 |
+
Series.unique : Return unique values of Series object.
|
336 |
+
|
337 |
+
Examples
|
338 |
+
--------
|
339 |
+
>>> pd.unique(pd.Series([2, 1, 3, 3]))
|
340 |
+
array([2, 1, 3])
|
341 |
+
|
342 |
+
>>> pd.unique(pd.Series([2] + [1] * 5))
|
343 |
+
array([2, 1])
|
344 |
+
|
345 |
+
>>> pd.unique(pd.Series([pd.Timestamp("20160101"), pd.Timestamp("20160101")]))
|
346 |
+
array(['2016-01-01T00:00:00.000000000'], dtype='datetime64[ns]')
|
347 |
+
|
348 |
+
>>> pd.unique(
|
349 |
+
... pd.Series(
|
350 |
+
... [
|
351 |
+
... pd.Timestamp("20160101", tz="US/Eastern"),
|
352 |
+
... pd.Timestamp("20160101", tz="US/Eastern"),
|
353 |
+
... ]
|
354 |
+
... )
|
355 |
+
... )
|
356 |
+
<DatetimeArray>
|
357 |
+
['2016-01-01 00:00:00-05:00']
|
358 |
+
Length: 1, dtype: datetime64[ns, US/Eastern]
|
359 |
+
|
360 |
+
>>> pd.unique(
|
361 |
+
... pd.Index(
|
362 |
+
... [
|
363 |
+
... pd.Timestamp("20160101", tz="US/Eastern"),
|
364 |
+
... pd.Timestamp("20160101", tz="US/Eastern"),
|
365 |
+
... ]
|
366 |
+
... )
|
367 |
+
... )
|
368 |
+
DatetimeIndex(['2016-01-01 00:00:00-05:00'],
|
369 |
+
dtype='datetime64[ns, US/Eastern]',
|
370 |
+
freq=None)
|
371 |
+
|
372 |
+
>>> pd.unique(np.array(list("baabc"), dtype="O"))
|
373 |
+
array(['b', 'a', 'c'], dtype=object)
|
374 |
+
|
375 |
+
An unordered Categorical will return categories in the
|
376 |
+
order of appearance.
|
377 |
+
|
378 |
+
>>> pd.unique(pd.Series(pd.Categorical(list("baabc"))))
|
379 |
+
['b', 'a', 'c']
|
380 |
+
Categories (3, object): ['a', 'b', 'c']
|
381 |
+
|
382 |
+
>>> pd.unique(pd.Series(pd.Categorical(list("baabc"), categories=list("abc"))))
|
383 |
+
['b', 'a', 'c']
|
384 |
+
Categories (3, object): ['a', 'b', 'c']
|
385 |
+
|
386 |
+
An ordered Categorical preserves the category ordering.
|
387 |
+
|
388 |
+
>>> pd.unique(
|
389 |
+
... pd.Series(
|
390 |
+
... pd.Categorical(list("baabc"), categories=list("abc"), ordered=True)
|
391 |
+
... )
|
392 |
+
... )
|
393 |
+
['b', 'a', 'c']
|
394 |
+
Categories (3, object): ['a' < 'b' < 'c']
|
395 |
+
|
396 |
+
An array of tuples
|
397 |
+
|
398 |
+
>>> pd.unique(pd.Series([("a", "b"), ("b", "a"), ("a", "c"), ("b", "a")]).values)
|
399 |
+
array([('a', 'b'), ('b', 'a'), ('a', 'c')], dtype=object)
|
400 |
+
"""
|
401 |
+
return unique_with_mask(values)
|
402 |
+
|
403 |
+
|
404 |
+
def nunique_ints(values: ArrayLike) -> int:
|
405 |
+
"""
|
406 |
+
Return the number of unique values for integer array-likes.
|
407 |
+
|
408 |
+
Significantly faster than pandas.unique for long enough sequences.
|
409 |
+
No checks are done to ensure input is integral.
|
410 |
+
|
411 |
+
Parameters
|
412 |
+
----------
|
413 |
+
values : 1d array-like
|
414 |
+
|
415 |
+
Returns
|
416 |
+
-------
|
417 |
+
int : The number of unique values in ``values``
|
418 |
+
"""
|
419 |
+
if len(values) == 0:
|
420 |
+
return 0
|
421 |
+
values = _ensure_data(values)
|
422 |
+
# bincount requires intp
|
423 |
+
result = (np.bincount(values.ravel().astype("intp")) != 0).sum()
|
424 |
+
return result
|
425 |
+
|
426 |
+
|
427 |
+
def unique_with_mask(values, mask: npt.NDArray[np.bool_] | None = None):
|
428 |
+
"""See algorithms.unique for docs. Takes a mask for masked arrays."""
|
429 |
+
values = _ensure_arraylike(values, func_name="unique")
|
430 |
+
|
431 |
+
if isinstance(values.dtype, ExtensionDtype):
|
432 |
+
# Dispatch to extension dtype's unique.
|
433 |
+
return values.unique()
|
434 |
+
|
435 |
+
original = values
|
436 |
+
hashtable, values = _get_hashtable_algo(values)
|
437 |
+
|
438 |
+
table = hashtable(len(values))
|
439 |
+
if mask is None:
|
440 |
+
uniques = table.unique(values)
|
441 |
+
uniques = _reconstruct_data(uniques, original.dtype, original)
|
442 |
+
return uniques
|
443 |
+
|
444 |
+
else:
|
445 |
+
uniques, mask = table.unique(values, mask=mask)
|
446 |
+
uniques = _reconstruct_data(uniques, original.dtype, original)
|
447 |
+
assert mask is not None # for mypy
|
448 |
+
return uniques, mask.astype("bool")
|
449 |
+
|
450 |
+
|
451 |
+
unique1d = unique
|
452 |
+
|
453 |
+
|
454 |
+
_MINIMUM_COMP_ARR_LEN = 1_000_000
|
455 |
+
|
456 |
+
|
457 |
+
def isin(comps: ListLike, values: ListLike) -> npt.NDArray[np.bool_]:
|
458 |
+
"""
|
459 |
+
Compute the isin boolean array.
|
460 |
+
|
461 |
+
Parameters
|
462 |
+
----------
|
463 |
+
comps : list-like
|
464 |
+
values : list-like
|
465 |
+
|
466 |
+
Returns
|
467 |
+
-------
|
468 |
+
ndarray[bool]
|
469 |
+
Same length as `comps`.
|
470 |
+
"""
|
471 |
+
if not is_list_like(comps):
|
472 |
+
raise TypeError(
|
473 |
+
"only list-like objects are allowed to be passed "
|
474 |
+
f"to isin(), you passed a `{type(comps).__name__}`"
|
475 |
+
)
|
476 |
+
if not is_list_like(values):
|
477 |
+
raise TypeError(
|
478 |
+
"only list-like objects are allowed to be passed "
|
479 |
+
f"to isin(), you passed a `{type(values).__name__}`"
|
480 |
+
)
|
481 |
+
|
482 |
+
if not isinstance(values, (ABCIndex, ABCSeries, ABCExtensionArray, np.ndarray)):
|
483 |
+
orig_values = list(values)
|
484 |
+
values = _ensure_arraylike(orig_values, func_name="isin-targets")
|
485 |
+
|
486 |
+
if (
|
487 |
+
len(values) > 0
|
488 |
+
and values.dtype.kind in "iufcb"
|
489 |
+
and not is_signed_integer_dtype(comps)
|
490 |
+
):
|
491 |
+
# GH#46485 Use object to avoid upcast to float64 later
|
492 |
+
# TODO: Share with _find_common_type_compat
|
493 |
+
values = construct_1d_object_array_from_listlike(orig_values)
|
494 |
+
|
495 |
+
elif isinstance(values, ABCMultiIndex):
|
496 |
+
# Avoid raising in extract_array
|
497 |
+
values = np.array(values)
|
498 |
+
else:
|
499 |
+
values = extract_array(values, extract_numpy=True, extract_range=True)
|
500 |
+
|
501 |
+
comps_array = _ensure_arraylike(comps, func_name="isin")
|
502 |
+
comps_array = extract_array(comps_array, extract_numpy=True)
|
503 |
+
if not isinstance(comps_array, np.ndarray):
|
504 |
+
# i.e. Extension Array
|
505 |
+
return comps_array.isin(values)
|
506 |
+
|
507 |
+
elif needs_i8_conversion(comps_array.dtype):
|
508 |
+
# Dispatch to DatetimeLikeArrayMixin.isin
|
509 |
+
return pd_array(comps_array).isin(values)
|
510 |
+
elif needs_i8_conversion(values.dtype) and not is_object_dtype(comps_array.dtype):
|
511 |
+
# e.g. comps_array are integers and values are datetime64s
|
512 |
+
return np.zeros(comps_array.shape, dtype=bool)
|
513 |
+
# TODO: not quite right ... Sparse/Categorical
|
514 |
+
elif needs_i8_conversion(values.dtype):
|
515 |
+
return isin(comps_array, values.astype(object))
|
516 |
+
|
517 |
+
elif isinstance(values.dtype, ExtensionDtype):
|
518 |
+
return isin(np.asarray(comps_array), np.asarray(values))
|
519 |
+
|
520 |
+
# GH16012
|
521 |
+
# Ensure np.isin doesn't get object types or it *may* throw an exception
|
522 |
+
# Albeit hashmap has O(1) look-up (vs. O(logn) in sorted array),
|
523 |
+
# isin is faster for small sizes
|
524 |
+
if (
|
525 |
+
len(comps_array) > _MINIMUM_COMP_ARR_LEN
|
526 |
+
and len(values) <= 26
|
527 |
+
and comps_array.dtype != object
|
528 |
+
):
|
529 |
+
# If the values include nan we need to check for nan explicitly
|
530 |
+
# since np.nan it not equal to np.nan
|
531 |
+
if isna(values).any():
|
532 |
+
|
533 |
+
def f(c, v):
|
534 |
+
return np.logical_or(np.isin(c, v).ravel(), np.isnan(c))
|
535 |
+
|
536 |
+
else:
|
537 |
+
f = lambda a, b: np.isin(a, b).ravel()
|
538 |
+
|
539 |
+
else:
|
540 |
+
common = np_find_common_type(values.dtype, comps_array.dtype)
|
541 |
+
values = values.astype(common, copy=False)
|
542 |
+
comps_array = comps_array.astype(common, copy=False)
|
543 |
+
f = htable.ismember
|
544 |
+
|
545 |
+
return f(comps_array, values)
|
546 |
+
|
547 |
+
|
548 |
+
def factorize_array(
|
549 |
+
values: np.ndarray,
|
550 |
+
use_na_sentinel: bool = True,
|
551 |
+
size_hint: int | None = None,
|
552 |
+
na_value: object = None,
|
553 |
+
mask: npt.NDArray[np.bool_] | None = None,
|
554 |
+
) -> tuple[npt.NDArray[np.intp], np.ndarray]:
|
555 |
+
"""
|
556 |
+
Factorize a numpy array to codes and uniques.
|
557 |
+
|
558 |
+
This doesn't do any coercion of types or unboxing before factorization.
|
559 |
+
|
560 |
+
Parameters
|
561 |
+
----------
|
562 |
+
values : ndarray
|
563 |
+
use_na_sentinel : bool, default True
|
564 |
+
If True, the sentinel -1 will be used for NaN values. If False,
|
565 |
+
NaN values will be encoded as non-negative integers and will not drop the
|
566 |
+
NaN from the uniques of the values.
|
567 |
+
size_hint : int, optional
|
568 |
+
Passed through to the hashtable's 'get_labels' method
|
569 |
+
na_value : object, optional
|
570 |
+
A value in `values` to consider missing. Note: only use this
|
571 |
+
parameter when you know that you don't have any values pandas would
|
572 |
+
consider missing in the array (NaN for float data, iNaT for
|
573 |
+
datetimes, etc.).
|
574 |
+
mask : ndarray[bool], optional
|
575 |
+
If not None, the mask is used as indicator for missing values
|
576 |
+
(True = missing, False = valid) instead of `na_value` or
|
577 |
+
condition "val != val".
|
578 |
+
|
579 |
+
Returns
|
580 |
+
-------
|
581 |
+
codes : ndarray[np.intp]
|
582 |
+
uniques : ndarray
|
583 |
+
"""
|
584 |
+
original = values
|
585 |
+
if values.dtype.kind in "mM":
|
586 |
+
# _get_hashtable_algo will cast dt64/td64 to i8 via _ensure_data, so we
|
587 |
+
# need to do the same to na_value. We are assuming here that the passed
|
588 |
+
# na_value is an appropriately-typed NaT.
|
589 |
+
# e.g. test_where_datetimelike_categorical
|
590 |
+
na_value = iNaT
|
591 |
+
|
592 |
+
hash_klass, values = _get_hashtable_algo(values)
|
593 |
+
|
594 |
+
table = hash_klass(size_hint or len(values))
|
595 |
+
uniques, codes = table.factorize(
|
596 |
+
values,
|
597 |
+
na_sentinel=-1,
|
598 |
+
na_value=na_value,
|
599 |
+
mask=mask,
|
600 |
+
ignore_na=use_na_sentinel,
|
601 |
+
)
|
602 |
+
|
603 |
+
# re-cast e.g. i8->dt64/td64, uint8->bool
|
604 |
+
uniques = _reconstruct_data(uniques, original.dtype, original)
|
605 |
+
|
606 |
+
codes = ensure_platform_int(codes)
|
607 |
+
return codes, uniques
|
608 |
+
|
609 |
+
|
610 |
+
@doc(
|
611 |
+
values=dedent(
|
612 |
+
"""\
|
613 |
+
values : sequence
|
614 |
+
A 1-D sequence. Sequences that aren't pandas objects are
|
615 |
+
coerced to ndarrays before factorization.
|
616 |
+
"""
|
617 |
+
),
|
618 |
+
sort=dedent(
|
619 |
+
"""\
|
620 |
+
sort : bool, default False
|
621 |
+
Sort `uniques` and shuffle `codes` to maintain the
|
622 |
+
relationship.
|
623 |
+
"""
|
624 |
+
),
|
625 |
+
size_hint=dedent(
|
626 |
+
"""\
|
627 |
+
size_hint : int, optional
|
628 |
+
Hint to the hashtable sizer.
|
629 |
+
"""
|
630 |
+
),
|
631 |
+
)
|
632 |
+
def factorize(
|
633 |
+
values,
|
634 |
+
sort: bool = False,
|
635 |
+
use_na_sentinel: bool = True,
|
636 |
+
size_hint: int | None = None,
|
637 |
+
) -> tuple[np.ndarray, np.ndarray | Index]:
|
638 |
+
"""
|
639 |
+
Encode the object as an enumerated type or categorical variable.
|
640 |
+
|
641 |
+
This method is useful for obtaining a numeric representation of an
|
642 |
+
array when all that matters is identifying distinct values. `factorize`
|
643 |
+
is available as both a top-level function :func:`pandas.factorize`,
|
644 |
+
and as a method :meth:`Series.factorize` and :meth:`Index.factorize`.
|
645 |
+
|
646 |
+
Parameters
|
647 |
+
----------
|
648 |
+
{values}{sort}
|
649 |
+
use_na_sentinel : bool, default True
|
650 |
+
If True, the sentinel -1 will be used for NaN values. If False,
|
651 |
+
NaN values will be encoded as non-negative integers and will not drop the
|
652 |
+
NaN from the uniques of the values.
|
653 |
+
|
654 |
+
.. versionadded:: 1.5.0
|
655 |
+
{size_hint}\
|
656 |
+
|
657 |
+
Returns
|
658 |
+
-------
|
659 |
+
codes : ndarray
|
660 |
+
An integer ndarray that's an indexer into `uniques`.
|
661 |
+
``uniques.take(codes)`` will have the same values as `values`.
|
662 |
+
uniques : ndarray, Index, or Categorical
|
663 |
+
The unique valid values. When `values` is Categorical, `uniques`
|
664 |
+
is a Categorical. When `values` is some other pandas object, an
|
665 |
+
`Index` is returned. Otherwise, a 1-D ndarray is returned.
|
666 |
+
|
667 |
+
.. note::
|
668 |
+
|
669 |
+
Even if there's a missing value in `values`, `uniques` will
|
670 |
+
*not* contain an entry for it.
|
671 |
+
|
672 |
+
See Also
|
673 |
+
--------
|
674 |
+
cut : Discretize continuous-valued array.
|
675 |
+
unique : Find the unique value in an array.
|
676 |
+
|
677 |
+
Notes
|
678 |
+
-----
|
679 |
+
Reference :ref:`the user guide <reshaping.factorize>` for more examples.
|
680 |
+
|
681 |
+
Examples
|
682 |
+
--------
|
683 |
+
These examples all show factorize as a top-level method like
|
684 |
+
``pd.factorize(values)``. The results are identical for methods like
|
685 |
+
:meth:`Series.factorize`.
|
686 |
+
|
687 |
+
>>> codes, uniques = pd.factorize(np.array(['b', 'b', 'a', 'c', 'b'], dtype="O"))
|
688 |
+
>>> codes
|
689 |
+
array([0, 0, 1, 2, 0])
|
690 |
+
>>> uniques
|
691 |
+
array(['b', 'a', 'c'], dtype=object)
|
692 |
+
|
693 |
+
With ``sort=True``, the `uniques` will be sorted, and `codes` will be
|
694 |
+
shuffled so that the relationship is the maintained.
|
695 |
+
|
696 |
+
>>> codes, uniques = pd.factorize(np.array(['b', 'b', 'a', 'c', 'b'], dtype="O"),
|
697 |
+
... sort=True)
|
698 |
+
>>> codes
|
699 |
+
array([1, 1, 0, 2, 1])
|
700 |
+
>>> uniques
|
701 |
+
array(['a', 'b', 'c'], dtype=object)
|
702 |
+
|
703 |
+
When ``use_na_sentinel=True`` (the default), missing values are indicated in
|
704 |
+
the `codes` with the sentinel value ``-1`` and missing values are not
|
705 |
+
included in `uniques`.
|
706 |
+
|
707 |
+
>>> codes, uniques = pd.factorize(np.array(['b', None, 'a', 'c', 'b'], dtype="O"))
|
708 |
+
>>> codes
|
709 |
+
array([ 0, -1, 1, 2, 0])
|
710 |
+
>>> uniques
|
711 |
+
array(['b', 'a', 'c'], dtype=object)
|
712 |
+
|
713 |
+
Thus far, we've only factorized lists (which are internally coerced to
|
714 |
+
NumPy arrays). When factorizing pandas objects, the type of `uniques`
|
715 |
+
will differ. For Categoricals, a `Categorical` is returned.
|
716 |
+
|
717 |
+
>>> cat = pd.Categorical(['a', 'a', 'c'], categories=['a', 'b', 'c'])
|
718 |
+
>>> codes, uniques = pd.factorize(cat)
|
719 |
+
>>> codes
|
720 |
+
array([0, 0, 1])
|
721 |
+
>>> uniques
|
722 |
+
['a', 'c']
|
723 |
+
Categories (3, object): ['a', 'b', 'c']
|
724 |
+
|
725 |
+
Notice that ``'b'`` is in ``uniques.categories``, despite not being
|
726 |
+
present in ``cat.values``.
|
727 |
+
|
728 |
+
For all other pandas objects, an Index of the appropriate type is
|
729 |
+
returned.
|
730 |
+
|
731 |
+
>>> cat = pd.Series(['a', 'a', 'c'])
|
732 |
+
>>> codes, uniques = pd.factorize(cat)
|
733 |
+
>>> codes
|
734 |
+
array([0, 0, 1])
|
735 |
+
>>> uniques
|
736 |
+
Index(['a', 'c'], dtype='object')
|
737 |
+
|
738 |
+
If NaN is in the values, and we want to include NaN in the uniques of the
|
739 |
+
values, it can be achieved by setting ``use_na_sentinel=False``.
|
740 |
+
|
741 |
+
>>> values = np.array([1, 2, 1, np.nan])
|
742 |
+
>>> codes, uniques = pd.factorize(values) # default: use_na_sentinel=True
|
743 |
+
>>> codes
|
744 |
+
array([ 0, 1, 0, -1])
|
745 |
+
>>> uniques
|
746 |
+
array([1., 2.])
|
747 |
+
|
748 |
+
>>> codes, uniques = pd.factorize(values, use_na_sentinel=False)
|
749 |
+
>>> codes
|
750 |
+
array([0, 1, 0, 2])
|
751 |
+
>>> uniques
|
752 |
+
array([ 1., 2., nan])
|
753 |
+
"""
|
754 |
+
# Implementation notes: This method is responsible for 3 things
|
755 |
+
# 1.) coercing data to array-like (ndarray, Index, extension array)
|
756 |
+
# 2.) factorizing codes and uniques
|
757 |
+
# 3.) Maybe boxing the uniques in an Index
|
758 |
+
#
|
759 |
+
# Step 2 is dispatched to extension types (like Categorical). They are
|
760 |
+
# responsible only for factorization. All data coercion, sorting and boxing
|
761 |
+
# should happen here.
|
762 |
+
if isinstance(values, (ABCIndex, ABCSeries)):
|
763 |
+
return values.factorize(sort=sort, use_na_sentinel=use_na_sentinel)
|
764 |
+
|
765 |
+
values = _ensure_arraylike(values, func_name="factorize")
|
766 |
+
original = values
|
767 |
+
|
768 |
+
if (
|
769 |
+
isinstance(values, (ABCDatetimeArray, ABCTimedeltaArray))
|
770 |
+
and values.freq is not None
|
771 |
+
):
|
772 |
+
# The presence of 'freq' means we can fast-path sorting and know there
|
773 |
+
# aren't NAs
|
774 |
+
codes, uniques = values.factorize(sort=sort)
|
775 |
+
return codes, uniques
|
776 |
+
|
777 |
+
elif not isinstance(values, np.ndarray):
|
778 |
+
# i.e. ExtensionArray
|
779 |
+
codes, uniques = values.factorize(use_na_sentinel=use_na_sentinel)
|
780 |
+
|
781 |
+
else:
|
782 |
+
values = np.asarray(values) # convert DTA/TDA/MultiIndex
|
783 |
+
|
784 |
+
if not use_na_sentinel and values.dtype == object:
|
785 |
+
# factorize can now handle differentiating various types of null values.
|
786 |
+
# These can only occur when the array has object dtype.
|
787 |
+
# However, for backwards compatibility we only use the null for the
|
788 |
+
# provided dtype. This may be revisited in the future, see GH#48476.
|
789 |
+
null_mask = isna(values)
|
790 |
+
if null_mask.any():
|
791 |
+
na_value = na_value_for_dtype(values.dtype, compat=False)
|
792 |
+
# Don't modify (potentially user-provided) array
|
793 |
+
values = np.where(null_mask, na_value, values)
|
794 |
+
|
795 |
+
codes, uniques = factorize_array(
|
796 |
+
values,
|
797 |
+
use_na_sentinel=use_na_sentinel,
|
798 |
+
size_hint=size_hint,
|
799 |
+
)
|
800 |
+
|
801 |
+
if sort and len(uniques) > 0:
|
802 |
+
uniques, codes = safe_sort(
|
803 |
+
uniques,
|
804 |
+
codes,
|
805 |
+
use_na_sentinel=use_na_sentinel,
|
806 |
+
assume_unique=True,
|
807 |
+
verify=False,
|
808 |
+
)
|
809 |
+
|
810 |
+
uniques = _reconstruct_data(uniques, original.dtype, original)
|
811 |
+
|
812 |
+
return codes, uniques
|
813 |
+
|
814 |
+
|
815 |
+
def value_counts(
|
816 |
+
values,
|
817 |
+
sort: bool = True,
|
818 |
+
ascending: bool = False,
|
819 |
+
normalize: bool = False,
|
820 |
+
bins=None,
|
821 |
+
dropna: bool = True,
|
822 |
+
) -> Series:
|
823 |
+
"""
|
824 |
+
Compute a histogram of the counts of non-null values.
|
825 |
+
|
826 |
+
Parameters
|
827 |
+
----------
|
828 |
+
values : ndarray (1-d)
|
829 |
+
sort : bool, default True
|
830 |
+
Sort by values
|
831 |
+
ascending : bool, default False
|
832 |
+
Sort in ascending order
|
833 |
+
normalize: bool, default False
|
834 |
+
If True then compute a relative histogram
|
835 |
+
bins : integer, optional
|
836 |
+
Rather than count values, group them into half-open bins,
|
837 |
+
convenience for pd.cut, only works with numeric data
|
838 |
+
dropna : bool, default True
|
839 |
+
Don't include counts of NaN
|
840 |
+
|
841 |
+
Returns
|
842 |
+
-------
|
843 |
+
Series
|
844 |
+
"""
|
845 |
+
warnings.warn(
|
846 |
+
# GH#53493
|
847 |
+
"pandas.value_counts is deprecated and will be removed in a "
|
848 |
+
"future version. Use pd.Series(obj).value_counts() instead.",
|
849 |
+
FutureWarning,
|
850 |
+
stacklevel=find_stack_level(),
|
851 |
+
)
|
852 |
+
return value_counts_internal(
|
853 |
+
values,
|
854 |
+
sort=sort,
|
855 |
+
ascending=ascending,
|
856 |
+
normalize=normalize,
|
857 |
+
bins=bins,
|
858 |
+
dropna=dropna,
|
859 |
+
)
|
860 |
+
|
861 |
+
|
862 |
+
def value_counts_internal(
|
863 |
+
values,
|
864 |
+
sort: bool = True,
|
865 |
+
ascending: bool = False,
|
866 |
+
normalize: bool = False,
|
867 |
+
bins=None,
|
868 |
+
dropna: bool = True,
|
869 |
+
) -> Series:
|
870 |
+
from pandas import (
|
871 |
+
Index,
|
872 |
+
Series,
|
873 |
+
)
|
874 |
+
|
875 |
+
index_name = getattr(values, "name", None)
|
876 |
+
name = "proportion" if normalize else "count"
|
877 |
+
|
878 |
+
if bins is not None:
|
879 |
+
from pandas.core.reshape.tile import cut
|
880 |
+
|
881 |
+
if isinstance(values, Series):
|
882 |
+
values = values._values
|
883 |
+
|
884 |
+
try:
|
885 |
+
ii = cut(values, bins, include_lowest=True)
|
886 |
+
except TypeError as err:
|
887 |
+
raise TypeError("bins argument only works with numeric data.") from err
|
888 |
+
|
889 |
+
# count, remove nulls (from the index), and but the bins
|
890 |
+
result = ii.value_counts(dropna=dropna)
|
891 |
+
result.name = name
|
892 |
+
result = result[result.index.notna()]
|
893 |
+
result.index = result.index.astype("interval")
|
894 |
+
result = result.sort_index()
|
895 |
+
|
896 |
+
# if we are dropna and we have NO values
|
897 |
+
if dropna and (result._values == 0).all():
|
898 |
+
result = result.iloc[0:0]
|
899 |
+
|
900 |
+
# normalizing is by len of all (regardless of dropna)
|
901 |
+
counts = np.array([len(ii)])
|
902 |
+
|
903 |
+
else:
|
904 |
+
if is_extension_array_dtype(values):
|
905 |
+
# handle Categorical and sparse,
|
906 |
+
result = Series(values, copy=False)._values.value_counts(dropna=dropna)
|
907 |
+
result.name = name
|
908 |
+
result.index.name = index_name
|
909 |
+
counts = result._values
|
910 |
+
if not isinstance(counts, np.ndarray):
|
911 |
+
# e.g. ArrowExtensionArray
|
912 |
+
counts = np.asarray(counts)
|
913 |
+
|
914 |
+
elif isinstance(values, ABCMultiIndex):
|
915 |
+
# GH49558
|
916 |
+
levels = list(range(values.nlevels))
|
917 |
+
result = (
|
918 |
+
Series(index=values, name=name)
|
919 |
+
.groupby(level=levels, dropna=dropna)
|
920 |
+
.size()
|
921 |
+
)
|
922 |
+
result.index.names = values.names
|
923 |
+
counts = result._values
|
924 |
+
|
925 |
+
else:
|
926 |
+
values = _ensure_arraylike(values, func_name="value_counts")
|
927 |
+
keys, counts, _ = value_counts_arraylike(values, dropna)
|
928 |
+
if keys.dtype == np.float16:
|
929 |
+
keys = keys.astype(np.float32)
|
930 |
+
|
931 |
+
# For backwards compatibility, we let Index do its normal type
|
932 |
+
# inference, _except_ for if if infers from object to bool.
|
933 |
+
idx = Index(keys)
|
934 |
+
if idx.dtype == bool and keys.dtype == object:
|
935 |
+
idx = idx.astype(object)
|
936 |
+
elif (
|
937 |
+
idx.dtype != keys.dtype # noqa: PLR1714 # # pylint: disable=R1714
|
938 |
+
and idx.dtype != "string[pyarrow_numpy]"
|
939 |
+
):
|
940 |
+
warnings.warn(
|
941 |
+
# GH#56161
|
942 |
+
"The behavior of value_counts with object-dtype is deprecated. "
|
943 |
+
"In a future version, this will *not* perform dtype inference "
|
944 |
+
"on the resulting index. To retain the old behavior, use "
|
945 |
+
"`result.index = result.index.infer_objects()`",
|
946 |
+
FutureWarning,
|
947 |
+
stacklevel=find_stack_level(),
|
948 |
+
)
|
949 |
+
idx.name = index_name
|
950 |
+
|
951 |
+
result = Series(counts, index=idx, name=name, copy=False)
|
952 |
+
|
953 |
+
if sort:
|
954 |
+
result = result.sort_values(ascending=ascending)
|
955 |
+
|
956 |
+
if normalize:
|
957 |
+
result = result / counts.sum()
|
958 |
+
|
959 |
+
return result
|
960 |
+
|
961 |
+
|
962 |
+
# Called once from SparseArray, otherwise could be private
|
963 |
+
def value_counts_arraylike(
|
964 |
+
values: np.ndarray, dropna: bool, mask: npt.NDArray[np.bool_] | None = None
|
965 |
+
) -> tuple[ArrayLike, npt.NDArray[np.int64], int]:
|
966 |
+
"""
|
967 |
+
Parameters
|
968 |
+
----------
|
969 |
+
values : np.ndarray
|
970 |
+
dropna : bool
|
971 |
+
mask : np.ndarray[bool] or None, default None
|
972 |
+
|
973 |
+
Returns
|
974 |
+
-------
|
975 |
+
uniques : np.ndarray
|
976 |
+
counts : np.ndarray[np.int64]
|
977 |
+
"""
|
978 |
+
original = values
|
979 |
+
values = _ensure_data(values)
|
980 |
+
|
981 |
+
keys, counts, na_counter = htable.value_count(values, dropna, mask=mask)
|
982 |
+
|
983 |
+
if needs_i8_conversion(original.dtype):
|
984 |
+
# datetime, timedelta, or period
|
985 |
+
|
986 |
+
if dropna:
|
987 |
+
mask = keys != iNaT
|
988 |
+
keys, counts = keys[mask], counts[mask]
|
989 |
+
|
990 |
+
res_keys = _reconstruct_data(keys, original.dtype, original)
|
991 |
+
return res_keys, counts, na_counter
|
992 |
+
|
993 |
+
|
994 |
+
def duplicated(
|
995 |
+
values: ArrayLike,
|
996 |
+
keep: Literal["first", "last", False] = "first",
|
997 |
+
mask: npt.NDArray[np.bool_] | None = None,
|
998 |
+
) -> npt.NDArray[np.bool_]:
|
999 |
+
"""
|
1000 |
+
Return boolean ndarray denoting duplicate values.
|
1001 |
+
|
1002 |
+
Parameters
|
1003 |
+
----------
|
1004 |
+
values : np.ndarray or ExtensionArray
|
1005 |
+
Array over which to check for duplicate values.
|
1006 |
+
keep : {'first', 'last', False}, default 'first'
|
1007 |
+
- ``first`` : Mark duplicates as ``True`` except for the first
|
1008 |
+
occurrence.
|
1009 |
+
- ``last`` : Mark duplicates as ``True`` except for the last
|
1010 |
+
occurrence.
|
1011 |
+
- False : Mark all duplicates as ``True``.
|
1012 |
+
mask : ndarray[bool], optional
|
1013 |
+
array indicating which elements to exclude from checking
|
1014 |
+
|
1015 |
+
Returns
|
1016 |
+
-------
|
1017 |
+
duplicated : ndarray[bool]
|
1018 |
+
"""
|
1019 |
+
values = _ensure_data(values)
|
1020 |
+
return htable.duplicated(values, keep=keep, mask=mask)
|
1021 |
+
|
1022 |
+
|
1023 |
+
def mode(
|
1024 |
+
values: ArrayLike, dropna: bool = True, mask: npt.NDArray[np.bool_] | None = None
|
1025 |
+
) -> ArrayLike:
|
1026 |
+
"""
|
1027 |
+
Returns the mode(s) of an array.
|
1028 |
+
|
1029 |
+
Parameters
|
1030 |
+
----------
|
1031 |
+
values : array-like
|
1032 |
+
Array over which to check for duplicate values.
|
1033 |
+
dropna : bool, default True
|
1034 |
+
Don't consider counts of NaN/NaT.
|
1035 |
+
|
1036 |
+
Returns
|
1037 |
+
-------
|
1038 |
+
np.ndarray or ExtensionArray
|
1039 |
+
"""
|
1040 |
+
values = _ensure_arraylike(values, func_name="mode")
|
1041 |
+
original = values
|
1042 |
+
|
1043 |
+
if needs_i8_conversion(values.dtype):
|
1044 |
+
# Got here with ndarray; dispatch to DatetimeArray/TimedeltaArray.
|
1045 |
+
values = ensure_wrapped_if_datetimelike(values)
|
1046 |
+
values = cast("ExtensionArray", values)
|
1047 |
+
return values._mode(dropna=dropna)
|
1048 |
+
|
1049 |
+
values = _ensure_data(values)
|
1050 |
+
|
1051 |
+
npresult, res_mask = htable.mode(values, dropna=dropna, mask=mask)
|
1052 |
+
if res_mask is not None:
|
1053 |
+
return npresult, res_mask # type: ignore[return-value]
|
1054 |
+
|
1055 |
+
try:
|
1056 |
+
npresult = np.sort(npresult)
|
1057 |
+
except TypeError as err:
|
1058 |
+
warnings.warn(
|
1059 |
+
f"Unable to sort modes: {err}",
|
1060 |
+
stacklevel=find_stack_level(),
|
1061 |
+
)
|
1062 |
+
|
1063 |
+
result = _reconstruct_data(npresult, original.dtype, original)
|
1064 |
+
return result
|
1065 |
+
|
1066 |
+
|
1067 |
+
def rank(
|
1068 |
+
values: ArrayLike,
|
1069 |
+
axis: AxisInt = 0,
|
1070 |
+
method: str = "average",
|
1071 |
+
na_option: str = "keep",
|
1072 |
+
ascending: bool = True,
|
1073 |
+
pct: bool = False,
|
1074 |
+
) -> npt.NDArray[np.float64]:
|
1075 |
+
"""
|
1076 |
+
Rank the values along a given axis.
|
1077 |
+
|
1078 |
+
Parameters
|
1079 |
+
----------
|
1080 |
+
values : np.ndarray or ExtensionArray
|
1081 |
+
Array whose values will be ranked. The number of dimensions in this
|
1082 |
+
array must not exceed 2.
|
1083 |
+
axis : int, default 0
|
1084 |
+
Axis over which to perform rankings.
|
1085 |
+
method : {'average', 'min', 'max', 'first', 'dense'}, default 'average'
|
1086 |
+
The method by which tiebreaks are broken during the ranking.
|
1087 |
+
na_option : {'keep', 'top'}, default 'keep'
|
1088 |
+
The method by which NaNs are placed in the ranking.
|
1089 |
+
- ``keep``: rank each NaN value with a NaN ranking
|
1090 |
+
- ``top``: replace each NaN with either +/- inf so that they
|
1091 |
+
there are ranked at the top
|
1092 |
+
ascending : bool, default True
|
1093 |
+
Whether or not the elements should be ranked in ascending order.
|
1094 |
+
pct : bool, default False
|
1095 |
+
Whether or not to the display the returned rankings in integer form
|
1096 |
+
(e.g. 1, 2, 3) or in percentile form (e.g. 0.333..., 0.666..., 1).
|
1097 |
+
"""
|
1098 |
+
is_datetimelike = needs_i8_conversion(values.dtype)
|
1099 |
+
values = _ensure_data(values)
|
1100 |
+
|
1101 |
+
if values.ndim == 1:
|
1102 |
+
ranks = algos.rank_1d(
|
1103 |
+
values,
|
1104 |
+
is_datetimelike=is_datetimelike,
|
1105 |
+
ties_method=method,
|
1106 |
+
ascending=ascending,
|
1107 |
+
na_option=na_option,
|
1108 |
+
pct=pct,
|
1109 |
+
)
|
1110 |
+
elif values.ndim == 2:
|
1111 |
+
ranks = algos.rank_2d(
|
1112 |
+
values,
|
1113 |
+
axis=axis,
|
1114 |
+
is_datetimelike=is_datetimelike,
|
1115 |
+
ties_method=method,
|
1116 |
+
ascending=ascending,
|
1117 |
+
na_option=na_option,
|
1118 |
+
pct=pct,
|
1119 |
+
)
|
1120 |
+
else:
|
1121 |
+
raise TypeError("Array with ndim > 2 are not supported.")
|
1122 |
+
|
1123 |
+
return ranks
|
1124 |
+
|
1125 |
+
|
1126 |
+
# ---- #
|
1127 |
+
# take #
|
1128 |
+
# ---- #
|
1129 |
+
|
1130 |
+
|
1131 |
+
def take(
|
1132 |
+
arr,
|
1133 |
+
indices: TakeIndexer,
|
1134 |
+
axis: AxisInt = 0,
|
1135 |
+
allow_fill: bool = False,
|
1136 |
+
fill_value=None,
|
1137 |
+
):
|
1138 |
+
"""
|
1139 |
+
Take elements from an array.
|
1140 |
+
|
1141 |
+
Parameters
|
1142 |
+
----------
|
1143 |
+
arr : array-like or scalar value
|
1144 |
+
Non array-likes (sequences/scalars without a dtype) are coerced
|
1145 |
+
to an ndarray.
|
1146 |
+
|
1147 |
+
.. deprecated:: 2.1.0
|
1148 |
+
Passing an argument other than a numpy.ndarray, ExtensionArray,
|
1149 |
+
Index, or Series is deprecated.
|
1150 |
+
|
1151 |
+
indices : sequence of int or one-dimensional np.ndarray of int
|
1152 |
+
Indices to be taken.
|
1153 |
+
axis : int, default 0
|
1154 |
+
The axis over which to select values.
|
1155 |
+
allow_fill : bool, default False
|
1156 |
+
How to handle negative values in `indices`.
|
1157 |
+
|
1158 |
+
* False: negative values in `indices` indicate positional indices
|
1159 |
+
from the right (the default). This is similar to :func:`numpy.take`.
|
1160 |
+
|
1161 |
+
* True: negative values in `indices` indicate
|
1162 |
+
missing values. These values are set to `fill_value`. Any other
|
1163 |
+
negative values raise a ``ValueError``.
|
1164 |
+
|
1165 |
+
fill_value : any, optional
|
1166 |
+
Fill value to use for NA-indices when `allow_fill` is True.
|
1167 |
+
This may be ``None``, in which case the default NA value for
|
1168 |
+
the type (``self.dtype.na_value``) is used.
|
1169 |
+
|
1170 |
+
For multi-dimensional `arr`, each *element* is filled with
|
1171 |
+
`fill_value`.
|
1172 |
+
|
1173 |
+
Returns
|
1174 |
+
-------
|
1175 |
+
ndarray or ExtensionArray
|
1176 |
+
Same type as the input.
|
1177 |
+
|
1178 |
+
Raises
|
1179 |
+
------
|
1180 |
+
IndexError
|
1181 |
+
When `indices` is out of bounds for the array.
|
1182 |
+
ValueError
|
1183 |
+
When the indexer contains negative values other than ``-1``
|
1184 |
+
and `allow_fill` is True.
|
1185 |
+
|
1186 |
+
Notes
|
1187 |
+
-----
|
1188 |
+
When `allow_fill` is False, `indices` may be whatever dimensionality
|
1189 |
+
is accepted by NumPy for `arr`.
|
1190 |
+
|
1191 |
+
When `allow_fill` is True, `indices` should be 1-D.
|
1192 |
+
|
1193 |
+
See Also
|
1194 |
+
--------
|
1195 |
+
numpy.take : Take elements from an array along an axis.
|
1196 |
+
|
1197 |
+
Examples
|
1198 |
+
--------
|
1199 |
+
>>> import pandas as pd
|
1200 |
+
|
1201 |
+
With the default ``allow_fill=False``, negative numbers indicate
|
1202 |
+
positional indices from the right.
|
1203 |
+
|
1204 |
+
>>> pd.api.extensions.take(np.array([10, 20, 30]), [0, 0, -1])
|
1205 |
+
array([10, 10, 30])
|
1206 |
+
|
1207 |
+
Setting ``allow_fill=True`` will place `fill_value` in those positions.
|
1208 |
+
|
1209 |
+
>>> pd.api.extensions.take(np.array([10, 20, 30]), [0, 0, -1], allow_fill=True)
|
1210 |
+
array([10., 10., nan])
|
1211 |
+
|
1212 |
+
>>> pd.api.extensions.take(np.array([10, 20, 30]), [0, 0, -1], allow_fill=True,
|
1213 |
+
... fill_value=-10)
|
1214 |
+
array([ 10, 10, -10])
|
1215 |
+
"""
|
1216 |
+
if not isinstance(arr, (np.ndarray, ABCExtensionArray, ABCIndex, ABCSeries)):
|
1217 |
+
# GH#52981
|
1218 |
+
warnings.warn(
|
1219 |
+
"pd.api.extensions.take accepting non-standard inputs is deprecated "
|
1220 |
+
"and will raise in a future version. Pass either a numpy.ndarray, "
|
1221 |
+
"ExtensionArray, Index, or Series instead.",
|
1222 |
+
FutureWarning,
|
1223 |
+
stacklevel=find_stack_level(),
|
1224 |
+
)
|
1225 |
+
|
1226 |
+
if not is_array_like(arr):
|
1227 |
+
arr = np.asarray(arr)
|
1228 |
+
|
1229 |
+
indices = ensure_platform_int(indices)
|
1230 |
+
|
1231 |
+
if allow_fill:
|
1232 |
+
# Pandas style, -1 means NA
|
1233 |
+
validate_indices(indices, arr.shape[axis])
|
1234 |
+
result = take_nd(
|
1235 |
+
arr, indices, axis=axis, allow_fill=True, fill_value=fill_value
|
1236 |
+
)
|
1237 |
+
else:
|
1238 |
+
# NumPy style
|
1239 |
+
result = arr.take(indices, axis=axis)
|
1240 |
+
return result
|
1241 |
+
|
1242 |
+
|
1243 |
+
# ------------ #
|
1244 |
+
# searchsorted #
|
1245 |
+
# ------------ #
|
1246 |
+
|
1247 |
+
|
1248 |
+
def searchsorted(
|
1249 |
+
arr: ArrayLike,
|
1250 |
+
value: NumpyValueArrayLike | ExtensionArray,
|
1251 |
+
side: Literal["left", "right"] = "left",
|
1252 |
+
sorter: NumpySorter | None = None,
|
1253 |
+
) -> npt.NDArray[np.intp] | np.intp:
|
1254 |
+
"""
|
1255 |
+
Find indices where elements should be inserted to maintain order.
|
1256 |
+
|
1257 |
+
Find the indices into a sorted array `arr` (a) such that, if the
|
1258 |
+
corresponding elements in `value` were inserted before the indices,
|
1259 |
+
the order of `arr` would be preserved.
|
1260 |
+
|
1261 |
+
Assuming that `arr` is sorted:
|
1262 |
+
|
1263 |
+
====== ================================
|
1264 |
+
`side` returned index `i` satisfies
|
1265 |
+
====== ================================
|
1266 |
+
left ``arr[i-1] < value <= self[i]``
|
1267 |
+
right ``arr[i-1] <= value < self[i]``
|
1268 |
+
====== ================================
|
1269 |
+
|
1270 |
+
Parameters
|
1271 |
+
----------
|
1272 |
+
arr: np.ndarray, ExtensionArray, Series
|
1273 |
+
Input array. If `sorter` is None, then it must be sorted in
|
1274 |
+
ascending order, otherwise `sorter` must be an array of indices
|
1275 |
+
that sort it.
|
1276 |
+
value : array-like or scalar
|
1277 |
+
Values to insert into `arr`.
|
1278 |
+
side : {'left', 'right'}, optional
|
1279 |
+
If 'left', the index of the first suitable location found is given.
|
1280 |
+
If 'right', return the last such index. If there is no suitable
|
1281 |
+
index, return either 0 or N (where N is the length of `self`).
|
1282 |
+
sorter : 1-D array-like, optional
|
1283 |
+
Optional array of integer indices that sort array a into ascending
|
1284 |
+
order. They are typically the result of argsort.
|
1285 |
+
|
1286 |
+
Returns
|
1287 |
+
-------
|
1288 |
+
array of ints or int
|
1289 |
+
If value is array-like, array of insertion points.
|
1290 |
+
If value is scalar, a single integer.
|
1291 |
+
|
1292 |
+
See Also
|
1293 |
+
--------
|
1294 |
+
numpy.searchsorted : Similar method from NumPy.
|
1295 |
+
"""
|
1296 |
+
if sorter is not None:
|
1297 |
+
sorter = ensure_platform_int(sorter)
|
1298 |
+
|
1299 |
+
if (
|
1300 |
+
isinstance(arr, np.ndarray)
|
1301 |
+
and arr.dtype.kind in "iu"
|
1302 |
+
and (is_integer(value) or is_integer_dtype(value))
|
1303 |
+
):
|
1304 |
+
# if `arr` and `value` have different dtypes, `arr` would be
|
1305 |
+
# recast by numpy, causing a slow search.
|
1306 |
+
# Before searching below, we therefore try to give `value` the
|
1307 |
+
# same dtype as `arr`, while guarding against integer overflows.
|
1308 |
+
iinfo = np.iinfo(arr.dtype.type)
|
1309 |
+
value_arr = np.array([value]) if is_integer(value) else np.array(value)
|
1310 |
+
if (value_arr >= iinfo.min).all() and (value_arr <= iinfo.max).all():
|
1311 |
+
# value within bounds, so no overflow, so can convert value dtype
|
1312 |
+
# to dtype of arr
|
1313 |
+
dtype = arr.dtype
|
1314 |
+
else:
|
1315 |
+
dtype = value_arr.dtype
|
1316 |
+
|
1317 |
+
if is_integer(value):
|
1318 |
+
# We know that value is int
|
1319 |
+
value = cast(int, dtype.type(value))
|
1320 |
+
else:
|
1321 |
+
value = pd_array(cast(ArrayLike, value), dtype=dtype)
|
1322 |
+
else:
|
1323 |
+
# E.g. if `arr` is an array with dtype='datetime64[ns]'
|
1324 |
+
# and `value` is a pd.Timestamp, we may need to convert value
|
1325 |
+
arr = ensure_wrapped_if_datetimelike(arr)
|
1326 |
+
|
1327 |
+
# Argument 1 to "searchsorted" of "ndarray" has incompatible type
|
1328 |
+
# "Union[NumpyValueArrayLike, ExtensionArray]"; expected "NumpyValueArrayLike"
|
1329 |
+
return arr.searchsorted(value, side=side, sorter=sorter) # type: ignore[arg-type]
|
1330 |
+
|
1331 |
+
|
1332 |
+
# ---- #
|
1333 |
+
# diff #
|
1334 |
+
# ---- #
|
1335 |
+
|
1336 |
+
_diff_special = {"float64", "float32", "int64", "int32", "int16", "int8"}
|
1337 |
+
|
1338 |
+
|
1339 |
+
def diff(arr, n: int, axis: AxisInt = 0):
|
1340 |
+
"""
|
1341 |
+
difference of n between self,
|
1342 |
+
analogous to s-s.shift(n)
|
1343 |
+
|
1344 |
+
Parameters
|
1345 |
+
----------
|
1346 |
+
arr : ndarray or ExtensionArray
|
1347 |
+
n : int
|
1348 |
+
number of periods
|
1349 |
+
axis : {0, 1}
|
1350 |
+
axis to shift on
|
1351 |
+
stacklevel : int, default 3
|
1352 |
+
The stacklevel for the lost dtype warning.
|
1353 |
+
|
1354 |
+
Returns
|
1355 |
+
-------
|
1356 |
+
shifted
|
1357 |
+
"""
|
1358 |
+
|
1359 |
+
n = int(n)
|
1360 |
+
na = np.nan
|
1361 |
+
dtype = arr.dtype
|
1362 |
+
|
1363 |
+
is_bool = is_bool_dtype(dtype)
|
1364 |
+
if is_bool:
|
1365 |
+
op = operator.xor
|
1366 |
+
else:
|
1367 |
+
op = operator.sub
|
1368 |
+
|
1369 |
+
if isinstance(dtype, NumpyEADtype):
|
1370 |
+
# NumpyExtensionArray cannot necessarily hold shifted versions of itself.
|
1371 |
+
arr = arr.to_numpy()
|
1372 |
+
dtype = arr.dtype
|
1373 |
+
|
1374 |
+
if not isinstance(arr, np.ndarray):
|
1375 |
+
# i.e ExtensionArray
|
1376 |
+
if hasattr(arr, f"__{op.__name__}__"):
|
1377 |
+
if axis != 0:
|
1378 |
+
raise ValueError(f"cannot diff {type(arr).__name__} on axis={axis}")
|
1379 |
+
return op(arr, arr.shift(n))
|
1380 |
+
else:
|
1381 |
+
raise TypeError(
|
1382 |
+
f"{type(arr).__name__} has no 'diff' method. "
|
1383 |
+
"Convert to a suitable dtype prior to calling 'diff'."
|
1384 |
+
)
|
1385 |
+
|
1386 |
+
is_timedelta = False
|
1387 |
+
if arr.dtype.kind in "mM":
|
1388 |
+
dtype = np.int64
|
1389 |
+
arr = arr.view("i8")
|
1390 |
+
na = iNaT
|
1391 |
+
is_timedelta = True
|
1392 |
+
|
1393 |
+
elif is_bool:
|
1394 |
+
# We have to cast in order to be able to hold np.nan
|
1395 |
+
dtype = np.object_
|
1396 |
+
|
1397 |
+
elif dtype.kind in "iu":
|
1398 |
+
# We have to cast in order to be able to hold np.nan
|
1399 |
+
|
1400 |
+
# int8, int16 are incompatible with float64,
|
1401 |
+
# see https://github.com/cython/cython/issues/2646
|
1402 |
+
if arr.dtype.name in ["int8", "int16"]:
|
1403 |
+
dtype = np.float32
|
1404 |
+
else:
|
1405 |
+
dtype = np.float64
|
1406 |
+
|
1407 |
+
orig_ndim = arr.ndim
|
1408 |
+
if orig_ndim == 1:
|
1409 |
+
# reshape so we can always use algos.diff_2d
|
1410 |
+
arr = arr.reshape(-1, 1)
|
1411 |
+
# TODO: require axis == 0
|
1412 |
+
|
1413 |
+
dtype = np.dtype(dtype)
|
1414 |
+
out_arr = np.empty(arr.shape, dtype=dtype)
|
1415 |
+
|
1416 |
+
na_indexer = [slice(None)] * 2
|
1417 |
+
na_indexer[axis] = slice(None, n) if n >= 0 else slice(n, None)
|
1418 |
+
out_arr[tuple(na_indexer)] = na
|
1419 |
+
|
1420 |
+
if arr.dtype.name in _diff_special:
|
1421 |
+
# TODO: can diff_2d dtype specialization troubles be fixed by defining
|
1422 |
+
# out_arr inside diff_2d?
|
1423 |
+
algos.diff_2d(arr, out_arr, n, axis, datetimelike=is_timedelta)
|
1424 |
+
else:
|
1425 |
+
# To keep mypy happy, _res_indexer is a list while res_indexer is
|
1426 |
+
# a tuple, ditto for lag_indexer.
|
1427 |
+
_res_indexer = [slice(None)] * 2
|
1428 |
+
_res_indexer[axis] = slice(n, None) if n >= 0 else slice(None, n)
|
1429 |
+
res_indexer = tuple(_res_indexer)
|
1430 |
+
|
1431 |
+
_lag_indexer = [slice(None)] * 2
|
1432 |
+
_lag_indexer[axis] = slice(None, -n) if n > 0 else slice(-n, None)
|
1433 |
+
lag_indexer = tuple(_lag_indexer)
|
1434 |
+
|
1435 |
+
out_arr[res_indexer] = op(arr[res_indexer], arr[lag_indexer])
|
1436 |
+
|
1437 |
+
if is_timedelta:
|
1438 |
+
out_arr = out_arr.view("timedelta64[ns]")
|
1439 |
+
|
1440 |
+
if orig_ndim == 1:
|
1441 |
+
out_arr = out_arr[:, 0]
|
1442 |
+
return out_arr
|
1443 |
+
|
1444 |
+
|
1445 |
+
# --------------------------------------------------------------------
|
1446 |
+
# Helper functions
|
1447 |
+
|
1448 |
+
|
1449 |
+
# Note: safe_sort is in algorithms.py instead of sorting.py because it is
|
1450 |
+
# low-dependency, is used in this module, and used private methods from
|
1451 |
+
# this module.
|
1452 |
+
def safe_sort(
|
1453 |
+
values: Index | ArrayLike,
|
1454 |
+
codes: npt.NDArray[np.intp] | None = None,
|
1455 |
+
use_na_sentinel: bool = True,
|
1456 |
+
assume_unique: bool = False,
|
1457 |
+
verify: bool = True,
|
1458 |
+
) -> AnyArrayLike | tuple[AnyArrayLike, np.ndarray]:
|
1459 |
+
"""
|
1460 |
+
Sort ``values`` and reorder corresponding ``codes``.
|
1461 |
+
|
1462 |
+
``values`` should be unique if ``codes`` is not None.
|
1463 |
+
Safe for use with mixed types (int, str), orders ints before strs.
|
1464 |
+
|
1465 |
+
Parameters
|
1466 |
+
----------
|
1467 |
+
values : list-like
|
1468 |
+
Sequence; must be unique if ``codes`` is not None.
|
1469 |
+
codes : np.ndarray[intp] or None, default None
|
1470 |
+
Indices to ``values``. All out of bound indices are treated as
|
1471 |
+
"not found" and will be masked with ``-1``.
|
1472 |
+
use_na_sentinel : bool, default True
|
1473 |
+
If True, the sentinel -1 will be used for NaN values. If False,
|
1474 |
+
NaN values will be encoded as non-negative integers and will not drop the
|
1475 |
+
NaN from the uniques of the values.
|
1476 |
+
assume_unique : bool, default False
|
1477 |
+
When True, ``values`` are assumed to be unique, which can speed up
|
1478 |
+
the calculation. Ignored when ``codes`` is None.
|
1479 |
+
verify : bool, default True
|
1480 |
+
Check if codes are out of bound for the values and put out of bound
|
1481 |
+
codes equal to ``-1``. If ``verify=False``, it is assumed there
|
1482 |
+
are no out of bound codes. Ignored when ``codes`` is None.
|
1483 |
+
|
1484 |
+
Returns
|
1485 |
+
-------
|
1486 |
+
ordered : AnyArrayLike
|
1487 |
+
Sorted ``values``
|
1488 |
+
new_codes : ndarray
|
1489 |
+
Reordered ``codes``; returned when ``codes`` is not None.
|
1490 |
+
|
1491 |
+
Raises
|
1492 |
+
------
|
1493 |
+
TypeError
|
1494 |
+
* If ``values`` is not list-like or if ``codes`` is neither None
|
1495 |
+
nor list-like
|
1496 |
+
* If ``values`` cannot be sorted
|
1497 |
+
ValueError
|
1498 |
+
* If ``codes`` is not None and ``values`` contain duplicates.
|
1499 |
+
"""
|
1500 |
+
if not isinstance(values, (np.ndarray, ABCExtensionArray, ABCIndex)):
|
1501 |
+
raise TypeError(
|
1502 |
+
"Only np.ndarray, ExtensionArray, and Index objects are allowed to "
|
1503 |
+
"be passed to safe_sort as values"
|
1504 |
+
)
|
1505 |
+
|
1506 |
+
sorter = None
|
1507 |
+
ordered: AnyArrayLike
|
1508 |
+
|
1509 |
+
if (
|
1510 |
+
not isinstance(values.dtype, ExtensionDtype)
|
1511 |
+
and lib.infer_dtype(values, skipna=False) == "mixed-integer"
|
1512 |
+
):
|
1513 |
+
ordered = _sort_mixed(values)
|
1514 |
+
else:
|
1515 |
+
try:
|
1516 |
+
sorter = values.argsort()
|
1517 |
+
ordered = values.take(sorter)
|
1518 |
+
except (TypeError, decimal.InvalidOperation):
|
1519 |
+
# Previous sorters failed or were not applicable, try `_sort_mixed`
|
1520 |
+
# which would work, but which fails for special case of 1d arrays
|
1521 |
+
# with tuples.
|
1522 |
+
if values.size and isinstance(values[0], tuple):
|
1523 |
+
# error: Argument 1 to "_sort_tuples" has incompatible type
|
1524 |
+
# "Union[Index, ExtensionArray, ndarray[Any, Any]]"; expected
|
1525 |
+
# "ndarray[Any, Any]"
|
1526 |
+
ordered = _sort_tuples(values) # type: ignore[arg-type]
|
1527 |
+
else:
|
1528 |
+
ordered = _sort_mixed(values)
|
1529 |
+
|
1530 |
+
# codes:
|
1531 |
+
|
1532 |
+
if codes is None:
|
1533 |
+
return ordered
|
1534 |
+
|
1535 |
+
if not is_list_like(codes):
|
1536 |
+
raise TypeError(
|
1537 |
+
"Only list-like objects or None are allowed to "
|
1538 |
+
"be passed to safe_sort as codes"
|
1539 |
+
)
|
1540 |
+
codes = ensure_platform_int(np.asarray(codes))
|
1541 |
+
|
1542 |
+
if not assume_unique and not len(unique(values)) == len(values):
|
1543 |
+
raise ValueError("values should be unique if codes is not None")
|
1544 |
+
|
1545 |
+
if sorter is None:
|
1546 |
+
# mixed types
|
1547 |
+
# error: Argument 1 to "_get_hashtable_algo" has incompatible type
|
1548 |
+
# "Union[Index, ExtensionArray, ndarray[Any, Any]]"; expected
|
1549 |
+
# "ndarray[Any, Any]"
|
1550 |
+
hash_klass, values = _get_hashtable_algo(values) # type: ignore[arg-type]
|
1551 |
+
t = hash_klass(len(values))
|
1552 |
+
t.map_locations(values)
|
1553 |
+
sorter = ensure_platform_int(t.lookup(ordered))
|
1554 |
+
|
1555 |
+
if use_na_sentinel:
|
1556 |
+
# take_nd is faster, but only works for na_sentinels of -1
|
1557 |
+
order2 = sorter.argsort()
|
1558 |
+
if verify:
|
1559 |
+
mask = (codes < -len(values)) | (codes >= len(values))
|
1560 |
+
codes[mask] = 0
|
1561 |
+
else:
|
1562 |
+
mask = None
|
1563 |
+
new_codes = take_nd(order2, codes, fill_value=-1)
|
1564 |
+
else:
|
1565 |
+
reverse_indexer = np.empty(len(sorter), dtype=int)
|
1566 |
+
reverse_indexer.put(sorter, np.arange(len(sorter)))
|
1567 |
+
# Out of bound indices will be masked with `-1` next, so we
|
1568 |
+
# may deal with them here without performance loss using `mode='wrap'`
|
1569 |
+
new_codes = reverse_indexer.take(codes, mode="wrap")
|
1570 |
+
|
1571 |
+
if use_na_sentinel:
|
1572 |
+
mask = codes == -1
|
1573 |
+
if verify:
|
1574 |
+
mask = mask | (codes < -len(values)) | (codes >= len(values))
|
1575 |
+
|
1576 |
+
if use_na_sentinel and mask is not None:
|
1577 |
+
np.putmask(new_codes, mask, -1)
|
1578 |
+
|
1579 |
+
return ordered, ensure_platform_int(new_codes)
|
1580 |
+
|
1581 |
+
|
1582 |
+
def _sort_mixed(values) -> AnyArrayLike:
|
1583 |
+
"""order ints before strings before nulls in 1d arrays"""
|
1584 |
+
str_pos = np.array([isinstance(x, str) for x in values], dtype=bool)
|
1585 |
+
null_pos = np.array([isna(x) for x in values], dtype=bool)
|
1586 |
+
num_pos = ~str_pos & ~null_pos
|
1587 |
+
str_argsort = np.argsort(values[str_pos])
|
1588 |
+
num_argsort = np.argsort(values[num_pos])
|
1589 |
+
# convert boolean arrays to positional indices, then order by underlying values
|
1590 |
+
str_locs = str_pos.nonzero()[0].take(str_argsort)
|
1591 |
+
num_locs = num_pos.nonzero()[0].take(num_argsort)
|
1592 |
+
null_locs = null_pos.nonzero()[0]
|
1593 |
+
locs = np.concatenate([num_locs, str_locs, null_locs])
|
1594 |
+
return values.take(locs)
|
1595 |
+
|
1596 |
+
|
1597 |
+
def _sort_tuples(values: np.ndarray) -> np.ndarray:
|
1598 |
+
"""
|
1599 |
+
Convert array of tuples (1d) to array of arrays (2d).
|
1600 |
+
We need to keep the columns separately as they contain different types and
|
1601 |
+
nans (can't use `np.sort` as it may fail when str and nan are mixed in a
|
1602 |
+
column as types cannot be compared).
|
1603 |
+
"""
|
1604 |
+
from pandas.core.internals.construction import to_arrays
|
1605 |
+
from pandas.core.sorting import lexsort_indexer
|
1606 |
+
|
1607 |
+
arrays, _ = to_arrays(values, None)
|
1608 |
+
indexer = lexsort_indexer(arrays, orders=True)
|
1609 |
+
return values[indexer]
|
1610 |
+
|
1611 |
+
|
1612 |
+
def union_with_duplicates(
|
1613 |
+
lvals: ArrayLike | Index, rvals: ArrayLike | Index
|
1614 |
+
) -> ArrayLike | Index:
|
1615 |
+
"""
|
1616 |
+
Extracts the union from lvals and rvals with respect to duplicates and nans in
|
1617 |
+
both arrays.
|
1618 |
+
|
1619 |
+
Parameters
|
1620 |
+
----------
|
1621 |
+
lvals: np.ndarray or ExtensionArray
|
1622 |
+
left values which is ordered in front.
|
1623 |
+
rvals: np.ndarray or ExtensionArray
|
1624 |
+
right values ordered after lvals.
|
1625 |
+
|
1626 |
+
Returns
|
1627 |
+
-------
|
1628 |
+
np.ndarray or ExtensionArray
|
1629 |
+
Containing the unsorted union of both arrays.
|
1630 |
+
|
1631 |
+
Notes
|
1632 |
+
-----
|
1633 |
+
Caller is responsible for ensuring lvals.dtype == rvals.dtype.
|
1634 |
+
"""
|
1635 |
+
from pandas import Series
|
1636 |
+
|
1637 |
+
with warnings.catch_warnings():
|
1638 |
+
# filter warning from object dtype inference; we will end up discarding
|
1639 |
+
# the index here, so the deprecation does not affect the end result here.
|
1640 |
+
warnings.filterwarnings(
|
1641 |
+
"ignore",
|
1642 |
+
"The behavior of value_counts with object-dtype is deprecated",
|
1643 |
+
category=FutureWarning,
|
1644 |
+
)
|
1645 |
+
l_count = value_counts_internal(lvals, dropna=False)
|
1646 |
+
r_count = value_counts_internal(rvals, dropna=False)
|
1647 |
+
l_count, r_count = l_count.align(r_count, fill_value=0)
|
1648 |
+
final_count = np.maximum(l_count.values, r_count.values)
|
1649 |
+
final_count = Series(final_count, index=l_count.index, dtype="int", copy=False)
|
1650 |
+
if isinstance(lvals, ABCMultiIndex) and isinstance(rvals, ABCMultiIndex):
|
1651 |
+
unique_vals = lvals.append(rvals).unique()
|
1652 |
+
else:
|
1653 |
+
if isinstance(lvals, ABCIndex):
|
1654 |
+
lvals = lvals._values
|
1655 |
+
if isinstance(rvals, ABCIndex):
|
1656 |
+
rvals = rvals._values
|
1657 |
+
# error: List item 0 has incompatible type "Union[ExtensionArray,
|
1658 |
+
# ndarray[Any, Any], Index]"; expected "Union[ExtensionArray,
|
1659 |
+
# ndarray[Any, Any]]"
|
1660 |
+
combined = concat_compat([lvals, rvals]) # type: ignore[list-item]
|
1661 |
+
unique_vals = unique(combined)
|
1662 |
+
unique_vals = ensure_wrapped_if_datetimelike(unique_vals)
|
1663 |
+
repeats = final_count.reindex(unique_vals).values
|
1664 |
+
return np.repeat(unique_vals, repeats)
|
1665 |
+
|
1666 |
+
|
1667 |
+
def map_array(
|
1668 |
+
arr: ArrayLike,
|
1669 |
+
mapper,
|
1670 |
+
na_action: Literal["ignore"] | None = None,
|
1671 |
+
convert: bool = True,
|
1672 |
+
) -> np.ndarray | ExtensionArray | Index:
|
1673 |
+
"""
|
1674 |
+
Map values using an input mapping or function.
|
1675 |
+
|
1676 |
+
Parameters
|
1677 |
+
----------
|
1678 |
+
mapper : function, dict, or Series
|
1679 |
+
Mapping correspondence.
|
1680 |
+
na_action : {None, 'ignore'}, default None
|
1681 |
+
If 'ignore', propagate NA values, without passing them to the
|
1682 |
+
mapping correspondence.
|
1683 |
+
convert : bool, default True
|
1684 |
+
Try to find better dtype for elementwise function results. If
|
1685 |
+
False, leave as dtype=object.
|
1686 |
+
|
1687 |
+
Returns
|
1688 |
+
-------
|
1689 |
+
Union[ndarray, Index, ExtensionArray]
|
1690 |
+
The output of the mapping function applied to the array.
|
1691 |
+
If the function returns a tuple with more than one element
|
1692 |
+
a MultiIndex will be returned.
|
1693 |
+
"""
|
1694 |
+
if na_action not in (None, "ignore"):
|
1695 |
+
msg = f"na_action must either be 'ignore' or None, {na_action} was passed"
|
1696 |
+
raise ValueError(msg)
|
1697 |
+
|
1698 |
+
# we can fastpath dict/Series to an efficient map
|
1699 |
+
# as we know that we are not going to have to yield
|
1700 |
+
# python types
|
1701 |
+
if is_dict_like(mapper):
|
1702 |
+
if isinstance(mapper, dict) and hasattr(mapper, "__missing__"):
|
1703 |
+
# If a dictionary subclass defines a default value method,
|
1704 |
+
# convert mapper to a lookup function (GH #15999).
|
1705 |
+
dict_with_default = mapper
|
1706 |
+
mapper = lambda x: dict_with_default[
|
1707 |
+
np.nan if isinstance(x, float) and np.isnan(x) else x
|
1708 |
+
]
|
1709 |
+
else:
|
1710 |
+
# Dictionary does not have a default. Thus it's safe to
|
1711 |
+
# convert to an Series for efficiency.
|
1712 |
+
# we specify the keys here to handle the
|
1713 |
+
# possibility that they are tuples
|
1714 |
+
|
1715 |
+
# The return value of mapping with an empty mapper is
|
1716 |
+
# expected to be pd.Series(np.nan, ...). As np.nan is
|
1717 |
+
# of dtype float64 the return value of this method should
|
1718 |
+
# be float64 as well
|
1719 |
+
from pandas import Series
|
1720 |
+
|
1721 |
+
if len(mapper) == 0:
|
1722 |
+
mapper = Series(mapper, dtype=np.float64)
|
1723 |
+
else:
|
1724 |
+
mapper = Series(mapper)
|
1725 |
+
|
1726 |
+
if isinstance(mapper, ABCSeries):
|
1727 |
+
if na_action == "ignore":
|
1728 |
+
mapper = mapper[mapper.index.notna()]
|
1729 |
+
|
1730 |
+
# Since values were input this means we came from either
|
1731 |
+
# a dict or a series and mapper should be an index
|
1732 |
+
indexer = mapper.index.get_indexer(arr)
|
1733 |
+
new_values = take_nd(mapper._values, indexer)
|
1734 |
+
|
1735 |
+
return new_values
|
1736 |
+
|
1737 |
+
if not len(arr):
|
1738 |
+
return arr.copy()
|
1739 |
+
|
1740 |
+
# we must convert to python types
|
1741 |
+
values = arr.astype(object, copy=False)
|
1742 |
+
if na_action is None:
|
1743 |
+
return lib.map_infer(values, mapper, convert=convert)
|
1744 |
+
else:
|
1745 |
+
return lib.map_infer_mask(
|
1746 |
+
values, mapper, mask=isna(values).view(np.uint8), convert=convert
|
1747 |
+
)
|
env-llmeval/lib/python3.10/site-packages/pandas/core/api.py
ADDED
@@ -0,0 +1,140 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from pandas._libs import (
|
2 |
+
NaT,
|
3 |
+
Period,
|
4 |
+
Timedelta,
|
5 |
+
Timestamp,
|
6 |
+
)
|
7 |
+
from pandas._libs.missing import NA
|
8 |
+
|
9 |
+
from pandas.core.dtypes.dtypes import (
|
10 |
+
ArrowDtype,
|
11 |
+
CategoricalDtype,
|
12 |
+
DatetimeTZDtype,
|
13 |
+
IntervalDtype,
|
14 |
+
PeriodDtype,
|
15 |
+
)
|
16 |
+
from pandas.core.dtypes.missing import (
|
17 |
+
isna,
|
18 |
+
isnull,
|
19 |
+
notna,
|
20 |
+
notnull,
|
21 |
+
)
|
22 |
+
|
23 |
+
from pandas.core.algorithms import (
|
24 |
+
factorize,
|
25 |
+
unique,
|
26 |
+
value_counts,
|
27 |
+
)
|
28 |
+
from pandas.core.arrays import Categorical
|
29 |
+
from pandas.core.arrays.boolean import BooleanDtype
|
30 |
+
from pandas.core.arrays.floating import (
|
31 |
+
Float32Dtype,
|
32 |
+
Float64Dtype,
|
33 |
+
)
|
34 |
+
from pandas.core.arrays.integer import (
|
35 |
+
Int8Dtype,
|
36 |
+
Int16Dtype,
|
37 |
+
Int32Dtype,
|
38 |
+
Int64Dtype,
|
39 |
+
UInt8Dtype,
|
40 |
+
UInt16Dtype,
|
41 |
+
UInt32Dtype,
|
42 |
+
UInt64Dtype,
|
43 |
+
)
|
44 |
+
from pandas.core.arrays.string_ import StringDtype
|
45 |
+
from pandas.core.construction import array
|
46 |
+
from pandas.core.flags import Flags
|
47 |
+
from pandas.core.groupby import (
|
48 |
+
Grouper,
|
49 |
+
NamedAgg,
|
50 |
+
)
|
51 |
+
from pandas.core.indexes.api import (
|
52 |
+
CategoricalIndex,
|
53 |
+
DatetimeIndex,
|
54 |
+
Index,
|
55 |
+
IntervalIndex,
|
56 |
+
MultiIndex,
|
57 |
+
PeriodIndex,
|
58 |
+
RangeIndex,
|
59 |
+
TimedeltaIndex,
|
60 |
+
)
|
61 |
+
from pandas.core.indexes.datetimes import (
|
62 |
+
bdate_range,
|
63 |
+
date_range,
|
64 |
+
)
|
65 |
+
from pandas.core.indexes.interval import (
|
66 |
+
Interval,
|
67 |
+
interval_range,
|
68 |
+
)
|
69 |
+
from pandas.core.indexes.period import period_range
|
70 |
+
from pandas.core.indexes.timedeltas import timedelta_range
|
71 |
+
from pandas.core.indexing import IndexSlice
|
72 |
+
from pandas.core.series import Series
|
73 |
+
from pandas.core.tools.datetimes import to_datetime
|
74 |
+
from pandas.core.tools.numeric import to_numeric
|
75 |
+
from pandas.core.tools.timedeltas import to_timedelta
|
76 |
+
|
77 |
+
from pandas.io.formats.format import set_eng_float_format
|
78 |
+
from pandas.tseries.offsets import DateOffset
|
79 |
+
|
80 |
+
# DataFrame needs to be imported after NamedAgg to avoid a circular import
|
81 |
+
from pandas.core.frame import DataFrame # isort:skip
|
82 |
+
|
83 |
+
__all__ = [
|
84 |
+
"array",
|
85 |
+
"ArrowDtype",
|
86 |
+
"bdate_range",
|
87 |
+
"BooleanDtype",
|
88 |
+
"Categorical",
|
89 |
+
"CategoricalDtype",
|
90 |
+
"CategoricalIndex",
|
91 |
+
"DataFrame",
|
92 |
+
"DateOffset",
|
93 |
+
"date_range",
|
94 |
+
"DatetimeIndex",
|
95 |
+
"DatetimeTZDtype",
|
96 |
+
"factorize",
|
97 |
+
"Flags",
|
98 |
+
"Float32Dtype",
|
99 |
+
"Float64Dtype",
|
100 |
+
"Grouper",
|
101 |
+
"Index",
|
102 |
+
"IndexSlice",
|
103 |
+
"Int16Dtype",
|
104 |
+
"Int32Dtype",
|
105 |
+
"Int64Dtype",
|
106 |
+
"Int8Dtype",
|
107 |
+
"Interval",
|
108 |
+
"IntervalDtype",
|
109 |
+
"IntervalIndex",
|
110 |
+
"interval_range",
|
111 |
+
"isna",
|
112 |
+
"isnull",
|
113 |
+
"MultiIndex",
|
114 |
+
"NA",
|
115 |
+
"NamedAgg",
|
116 |
+
"NaT",
|
117 |
+
"notna",
|
118 |
+
"notnull",
|
119 |
+
"Period",
|
120 |
+
"PeriodDtype",
|
121 |
+
"PeriodIndex",
|
122 |
+
"period_range",
|
123 |
+
"RangeIndex",
|
124 |
+
"Series",
|
125 |
+
"set_eng_float_format",
|
126 |
+
"StringDtype",
|
127 |
+
"Timedelta",
|
128 |
+
"TimedeltaIndex",
|
129 |
+
"timedelta_range",
|
130 |
+
"Timestamp",
|
131 |
+
"to_datetime",
|
132 |
+
"to_numeric",
|
133 |
+
"to_timedelta",
|
134 |
+
"UInt16Dtype",
|
135 |
+
"UInt32Dtype",
|
136 |
+
"UInt64Dtype",
|
137 |
+
"UInt8Dtype",
|
138 |
+
"unique",
|
139 |
+
"value_counts",
|
140 |
+
]
|
env-llmeval/lib/python3.10/site-packages/pandas/core/apply.py
ADDED
@@ -0,0 +1,2062 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from __future__ import annotations
|
2 |
+
|
3 |
+
import abc
|
4 |
+
from collections import defaultdict
|
5 |
+
import functools
|
6 |
+
from functools import partial
|
7 |
+
import inspect
|
8 |
+
from typing import (
|
9 |
+
TYPE_CHECKING,
|
10 |
+
Any,
|
11 |
+
Callable,
|
12 |
+
Literal,
|
13 |
+
cast,
|
14 |
+
)
|
15 |
+
import warnings
|
16 |
+
|
17 |
+
import numpy as np
|
18 |
+
|
19 |
+
from pandas._config import option_context
|
20 |
+
|
21 |
+
from pandas._libs import lib
|
22 |
+
from pandas._libs.internals import BlockValuesRefs
|
23 |
+
from pandas._typing import (
|
24 |
+
AggFuncType,
|
25 |
+
AggFuncTypeBase,
|
26 |
+
AggFuncTypeDict,
|
27 |
+
AggObjType,
|
28 |
+
Axis,
|
29 |
+
AxisInt,
|
30 |
+
NDFrameT,
|
31 |
+
npt,
|
32 |
+
)
|
33 |
+
from pandas.compat._optional import import_optional_dependency
|
34 |
+
from pandas.errors import SpecificationError
|
35 |
+
from pandas.util._decorators import cache_readonly
|
36 |
+
from pandas.util._exceptions import find_stack_level
|
37 |
+
|
38 |
+
from pandas.core.dtypes.cast import is_nested_object
|
39 |
+
from pandas.core.dtypes.common import (
|
40 |
+
is_dict_like,
|
41 |
+
is_extension_array_dtype,
|
42 |
+
is_list_like,
|
43 |
+
is_numeric_dtype,
|
44 |
+
is_sequence,
|
45 |
+
)
|
46 |
+
from pandas.core.dtypes.dtypes import (
|
47 |
+
CategoricalDtype,
|
48 |
+
ExtensionDtype,
|
49 |
+
)
|
50 |
+
from pandas.core.dtypes.generic import (
|
51 |
+
ABCDataFrame,
|
52 |
+
ABCNDFrame,
|
53 |
+
ABCSeries,
|
54 |
+
)
|
55 |
+
|
56 |
+
from pandas.core._numba.executor import generate_apply_looper
|
57 |
+
import pandas.core.common as com
|
58 |
+
from pandas.core.construction import ensure_wrapped_if_datetimelike
|
59 |
+
|
60 |
+
if TYPE_CHECKING:
|
61 |
+
from collections.abc import (
|
62 |
+
Generator,
|
63 |
+
Hashable,
|
64 |
+
Iterable,
|
65 |
+
MutableMapping,
|
66 |
+
Sequence,
|
67 |
+
)
|
68 |
+
|
69 |
+
from pandas import (
|
70 |
+
DataFrame,
|
71 |
+
Index,
|
72 |
+
Series,
|
73 |
+
)
|
74 |
+
from pandas.core.groupby import GroupBy
|
75 |
+
from pandas.core.resample import Resampler
|
76 |
+
from pandas.core.window.rolling import BaseWindow
|
77 |
+
|
78 |
+
|
79 |
+
ResType = dict[int, Any]
|
80 |
+
|
81 |
+
|
82 |
+
def frame_apply(
|
83 |
+
obj: DataFrame,
|
84 |
+
func: AggFuncType,
|
85 |
+
axis: Axis = 0,
|
86 |
+
raw: bool = False,
|
87 |
+
result_type: str | None = None,
|
88 |
+
by_row: Literal[False, "compat"] = "compat",
|
89 |
+
engine: str = "python",
|
90 |
+
engine_kwargs: dict[str, bool] | None = None,
|
91 |
+
args=None,
|
92 |
+
kwargs=None,
|
93 |
+
) -> FrameApply:
|
94 |
+
"""construct and return a row or column based frame apply object"""
|
95 |
+
axis = obj._get_axis_number(axis)
|
96 |
+
klass: type[FrameApply]
|
97 |
+
if axis == 0:
|
98 |
+
klass = FrameRowApply
|
99 |
+
elif axis == 1:
|
100 |
+
klass = FrameColumnApply
|
101 |
+
|
102 |
+
_, func, _, _ = reconstruct_func(func, **kwargs)
|
103 |
+
assert func is not None
|
104 |
+
|
105 |
+
return klass(
|
106 |
+
obj,
|
107 |
+
func,
|
108 |
+
raw=raw,
|
109 |
+
result_type=result_type,
|
110 |
+
by_row=by_row,
|
111 |
+
engine=engine,
|
112 |
+
engine_kwargs=engine_kwargs,
|
113 |
+
args=args,
|
114 |
+
kwargs=kwargs,
|
115 |
+
)
|
116 |
+
|
117 |
+
|
118 |
+
class Apply(metaclass=abc.ABCMeta):
|
119 |
+
axis: AxisInt
|
120 |
+
|
121 |
+
def __init__(
|
122 |
+
self,
|
123 |
+
obj: AggObjType,
|
124 |
+
func: AggFuncType,
|
125 |
+
raw: bool,
|
126 |
+
result_type: str | None,
|
127 |
+
*,
|
128 |
+
by_row: Literal[False, "compat", "_compat"] = "compat",
|
129 |
+
engine: str = "python",
|
130 |
+
engine_kwargs: dict[str, bool] | None = None,
|
131 |
+
args,
|
132 |
+
kwargs,
|
133 |
+
) -> None:
|
134 |
+
self.obj = obj
|
135 |
+
self.raw = raw
|
136 |
+
|
137 |
+
assert by_row is False or by_row in ["compat", "_compat"]
|
138 |
+
self.by_row = by_row
|
139 |
+
|
140 |
+
self.args = args or ()
|
141 |
+
self.kwargs = kwargs or {}
|
142 |
+
|
143 |
+
self.engine = engine
|
144 |
+
self.engine_kwargs = {} if engine_kwargs is None else engine_kwargs
|
145 |
+
|
146 |
+
if result_type not in [None, "reduce", "broadcast", "expand"]:
|
147 |
+
raise ValueError(
|
148 |
+
"invalid value for result_type, must be one "
|
149 |
+
"of {None, 'reduce', 'broadcast', 'expand'}"
|
150 |
+
)
|
151 |
+
|
152 |
+
self.result_type = result_type
|
153 |
+
|
154 |
+
self.func = func
|
155 |
+
|
156 |
+
@abc.abstractmethod
|
157 |
+
def apply(self) -> DataFrame | Series:
|
158 |
+
pass
|
159 |
+
|
160 |
+
@abc.abstractmethod
|
161 |
+
def agg_or_apply_list_like(
|
162 |
+
self, op_name: Literal["agg", "apply"]
|
163 |
+
) -> DataFrame | Series:
|
164 |
+
pass
|
165 |
+
|
166 |
+
@abc.abstractmethod
|
167 |
+
def agg_or_apply_dict_like(
|
168 |
+
self, op_name: Literal["agg", "apply"]
|
169 |
+
) -> DataFrame | Series:
|
170 |
+
pass
|
171 |
+
|
172 |
+
def agg(self) -> DataFrame | Series | None:
|
173 |
+
"""
|
174 |
+
Provide an implementation for the aggregators.
|
175 |
+
|
176 |
+
Returns
|
177 |
+
-------
|
178 |
+
Result of aggregation, or None if agg cannot be performed by
|
179 |
+
this method.
|
180 |
+
"""
|
181 |
+
obj = self.obj
|
182 |
+
func = self.func
|
183 |
+
args = self.args
|
184 |
+
kwargs = self.kwargs
|
185 |
+
|
186 |
+
if isinstance(func, str):
|
187 |
+
return self.apply_str()
|
188 |
+
|
189 |
+
if is_dict_like(func):
|
190 |
+
return self.agg_dict_like()
|
191 |
+
elif is_list_like(func):
|
192 |
+
# we require a list, but not a 'str'
|
193 |
+
return self.agg_list_like()
|
194 |
+
|
195 |
+
if callable(func):
|
196 |
+
f = com.get_cython_func(func)
|
197 |
+
if f and not args and not kwargs:
|
198 |
+
warn_alias_replacement(obj, func, f)
|
199 |
+
return getattr(obj, f)()
|
200 |
+
|
201 |
+
# caller can react
|
202 |
+
return None
|
203 |
+
|
204 |
+
def transform(self) -> DataFrame | Series:
|
205 |
+
"""
|
206 |
+
Transform a DataFrame or Series.
|
207 |
+
|
208 |
+
Returns
|
209 |
+
-------
|
210 |
+
DataFrame or Series
|
211 |
+
Result of applying ``func`` along the given axis of the
|
212 |
+
Series or DataFrame.
|
213 |
+
|
214 |
+
Raises
|
215 |
+
------
|
216 |
+
ValueError
|
217 |
+
If the transform function fails or does not transform.
|
218 |
+
"""
|
219 |
+
obj = self.obj
|
220 |
+
func = self.func
|
221 |
+
axis = self.axis
|
222 |
+
args = self.args
|
223 |
+
kwargs = self.kwargs
|
224 |
+
|
225 |
+
is_series = obj.ndim == 1
|
226 |
+
|
227 |
+
if obj._get_axis_number(axis) == 1:
|
228 |
+
assert not is_series
|
229 |
+
return obj.T.transform(func, 0, *args, **kwargs).T
|
230 |
+
|
231 |
+
if is_list_like(func) and not is_dict_like(func):
|
232 |
+
func = cast(list[AggFuncTypeBase], func)
|
233 |
+
# Convert func equivalent dict
|
234 |
+
if is_series:
|
235 |
+
func = {com.get_callable_name(v) or v: v for v in func}
|
236 |
+
else:
|
237 |
+
func = {col: func for col in obj}
|
238 |
+
|
239 |
+
if is_dict_like(func):
|
240 |
+
func = cast(AggFuncTypeDict, func)
|
241 |
+
return self.transform_dict_like(func)
|
242 |
+
|
243 |
+
# func is either str or callable
|
244 |
+
func = cast(AggFuncTypeBase, func)
|
245 |
+
try:
|
246 |
+
result = self.transform_str_or_callable(func)
|
247 |
+
except TypeError:
|
248 |
+
raise
|
249 |
+
except Exception as err:
|
250 |
+
raise ValueError("Transform function failed") from err
|
251 |
+
|
252 |
+
# Functions that transform may return empty Series/DataFrame
|
253 |
+
# when the dtype is not appropriate
|
254 |
+
if (
|
255 |
+
isinstance(result, (ABCSeries, ABCDataFrame))
|
256 |
+
and result.empty
|
257 |
+
and not obj.empty
|
258 |
+
):
|
259 |
+
raise ValueError("Transform function failed")
|
260 |
+
# error: Argument 1 to "__get__" of "AxisProperty" has incompatible type
|
261 |
+
# "Union[Series, DataFrame, GroupBy[Any], SeriesGroupBy,
|
262 |
+
# DataFrameGroupBy, BaseWindow, Resampler]"; expected "Union[DataFrame,
|
263 |
+
# Series]"
|
264 |
+
if not isinstance(result, (ABCSeries, ABCDataFrame)) or not result.index.equals(
|
265 |
+
obj.index # type: ignore[arg-type]
|
266 |
+
):
|
267 |
+
raise ValueError("Function did not transform")
|
268 |
+
|
269 |
+
return result
|
270 |
+
|
271 |
+
def transform_dict_like(self, func) -> DataFrame:
|
272 |
+
"""
|
273 |
+
Compute transform in the case of a dict-like func
|
274 |
+
"""
|
275 |
+
from pandas.core.reshape.concat import concat
|
276 |
+
|
277 |
+
obj = self.obj
|
278 |
+
args = self.args
|
279 |
+
kwargs = self.kwargs
|
280 |
+
|
281 |
+
# transform is currently only for Series/DataFrame
|
282 |
+
assert isinstance(obj, ABCNDFrame)
|
283 |
+
|
284 |
+
if len(func) == 0:
|
285 |
+
raise ValueError("No transform functions were provided")
|
286 |
+
|
287 |
+
func = self.normalize_dictlike_arg("transform", obj, func)
|
288 |
+
|
289 |
+
results: dict[Hashable, DataFrame | Series] = {}
|
290 |
+
for name, how in func.items():
|
291 |
+
colg = obj._gotitem(name, ndim=1)
|
292 |
+
results[name] = colg.transform(how, 0, *args, **kwargs)
|
293 |
+
return concat(results, axis=1)
|
294 |
+
|
295 |
+
def transform_str_or_callable(self, func) -> DataFrame | Series:
|
296 |
+
"""
|
297 |
+
Compute transform in the case of a string or callable func
|
298 |
+
"""
|
299 |
+
obj = self.obj
|
300 |
+
args = self.args
|
301 |
+
kwargs = self.kwargs
|
302 |
+
|
303 |
+
if isinstance(func, str):
|
304 |
+
return self._apply_str(obj, func, *args, **kwargs)
|
305 |
+
|
306 |
+
if not args and not kwargs:
|
307 |
+
f = com.get_cython_func(func)
|
308 |
+
if f:
|
309 |
+
warn_alias_replacement(obj, func, f)
|
310 |
+
return getattr(obj, f)()
|
311 |
+
|
312 |
+
# Two possible ways to use a UDF - apply or call directly
|
313 |
+
try:
|
314 |
+
return obj.apply(func, args=args, **kwargs)
|
315 |
+
except Exception:
|
316 |
+
return func(obj, *args, **kwargs)
|
317 |
+
|
318 |
+
def agg_list_like(self) -> DataFrame | Series:
|
319 |
+
"""
|
320 |
+
Compute aggregation in the case of a list-like argument.
|
321 |
+
|
322 |
+
Returns
|
323 |
+
-------
|
324 |
+
Result of aggregation.
|
325 |
+
"""
|
326 |
+
return self.agg_or_apply_list_like(op_name="agg")
|
327 |
+
|
328 |
+
def compute_list_like(
|
329 |
+
self,
|
330 |
+
op_name: Literal["agg", "apply"],
|
331 |
+
selected_obj: Series | DataFrame,
|
332 |
+
kwargs: dict[str, Any],
|
333 |
+
) -> tuple[list[Hashable] | Index, list[Any]]:
|
334 |
+
"""
|
335 |
+
Compute agg/apply results for like-like input.
|
336 |
+
|
337 |
+
Parameters
|
338 |
+
----------
|
339 |
+
op_name : {"agg", "apply"}
|
340 |
+
Operation being performed.
|
341 |
+
selected_obj : Series or DataFrame
|
342 |
+
Data to perform operation on.
|
343 |
+
kwargs : dict
|
344 |
+
Keyword arguments to pass to the functions.
|
345 |
+
|
346 |
+
Returns
|
347 |
+
-------
|
348 |
+
keys : list[Hashable] or Index
|
349 |
+
Index labels for result.
|
350 |
+
results : list
|
351 |
+
Data for result. When aggregating with a Series, this can contain any
|
352 |
+
Python objects.
|
353 |
+
"""
|
354 |
+
func = cast(list[AggFuncTypeBase], self.func)
|
355 |
+
obj = self.obj
|
356 |
+
|
357 |
+
results = []
|
358 |
+
keys = []
|
359 |
+
|
360 |
+
# degenerate case
|
361 |
+
if selected_obj.ndim == 1:
|
362 |
+
for a in func:
|
363 |
+
colg = obj._gotitem(selected_obj.name, ndim=1, subset=selected_obj)
|
364 |
+
args = (
|
365 |
+
[self.axis, *self.args]
|
366 |
+
if include_axis(op_name, colg)
|
367 |
+
else self.args
|
368 |
+
)
|
369 |
+
new_res = getattr(colg, op_name)(a, *args, **kwargs)
|
370 |
+
results.append(new_res)
|
371 |
+
|
372 |
+
# make sure we find a good name
|
373 |
+
name = com.get_callable_name(a) or a
|
374 |
+
keys.append(name)
|
375 |
+
|
376 |
+
else:
|
377 |
+
indices = []
|
378 |
+
for index, col in enumerate(selected_obj):
|
379 |
+
colg = obj._gotitem(col, ndim=1, subset=selected_obj.iloc[:, index])
|
380 |
+
args = (
|
381 |
+
[self.axis, *self.args]
|
382 |
+
if include_axis(op_name, colg)
|
383 |
+
else self.args
|
384 |
+
)
|
385 |
+
new_res = getattr(colg, op_name)(func, *args, **kwargs)
|
386 |
+
results.append(new_res)
|
387 |
+
indices.append(index)
|
388 |
+
# error: Incompatible types in assignment (expression has type "Any |
|
389 |
+
# Index", variable has type "list[Any | Callable[..., Any] | str]")
|
390 |
+
keys = selected_obj.columns.take(indices) # type: ignore[assignment]
|
391 |
+
|
392 |
+
return keys, results
|
393 |
+
|
394 |
+
def wrap_results_list_like(
|
395 |
+
self, keys: Iterable[Hashable], results: list[Series | DataFrame]
|
396 |
+
):
|
397 |
+
from pandas.core.reshape.concat import concat
|
398 |
+
|
399 |
+
obj = self.obj
|
400 |
+
|
401 |
+
try:
|
402 |
+
return concat(results, keys=keys, axis=1, sort=False)
|
403 |
+
except TypeError as err:
|
404 |
+
# we are concatting non-NDFrame objects,
|
405 |
+
# e.g. a list of scalars
|
406 |
+
from pandas import Series
|
407 |
+
|
408 |
+
result = Series(results, index=keys, name=obj.name)
|
409 |
+
if is_nested_object(result):
|
410 |
+
raise ValueError(
|
411 |
+
"cannot combine transform and aggregation operations"
|
412 |
+
) from err
|
413 |
+
return result
|
414 |
+
|
415 |
+
def agg_dict_like(self) -> DataFrame | Series:
|
416 |
+
"""
|
417 |
+
Compute aggregation in the case of a dict-like argument.
|
418 |
+
|
419 |
+
Returns
|
420 |
+
-------
|
421 |
+
Result of aggregation.
|
422 |
+
"""
|
423 |
+
return self.agg_or_apply_dict_like(op_name="agg")
|
424 |
+
|
425 |
+
def compute_dict_like(
|
426 |
+
self,
|
427 |
+
op_name: Literal["agg", "apply"],
|
428 |
+
selected_obj: Series | DataFrame,
|
429 |
+
selection: Hashable | Sequence[Hashable],
|
430 |
+
kwargs: dict[str, Any],
|
431 |
+
) -> tuple[list[Hashable], list[Any]]:
|
432 |
+
"""
|
433 |
+
Compute agg/apply results for dict-like input.
|
434 |
+
|
435 |
+
Parameters
|
436 |
+
----------
|
437 |
+
op_name : {"agg", "apply"}
|
438 |
+
Operation being performed.
|
439 |
+
selected_obj : Series or DataFrame
|
440 |
+
Data to perform operation on.
|
441 |
+
selection : hashable or sequence of hashables
|
442 |
+
Used by GroupBy, Window, and Resample if selection is applied to the object.
|
443 |
+
kwargs : dict
|
444 |
+
Keyword arguments to pass to the functions.
|
445 |
+
|
446 |
+
Returns
|
447 |
+
-------
|
448 |
+
keys : list[hashable]
|
449 |
+
Index labels for result.
|
450 |
+
results : list
|
451 |
+
Data for result. When aggregating with a Series, this can contain any
|
452 |
+
Python object.
|
453 |
+
"""
|
454 |
+
from pandas.core.groupby.generic import (
|
455 |
+
DataFrameGroupBy,
|
456 |
+
SeriesGroupBy,
|
457 |
+
)
|
458 |
+
|
459 |
+
obj = self.obj
|
460 |
+
is_groupby = isinstance(obj, (DataFrameGroupBy, SeriesGroupBy))
|
461 |
+
func = cast(AggFuncTypeDict, self.func)
|
462 |
+
func = self.normalize_dictlike_arg(op_name, selected_obj, func)
|
463 |
+
|
464 |
+
is_non_unique_col = (
|
465 |
+
selected_obj.ndim == 2
|
466 |
+
and selected_obj.columns.nunique() < len(selected_obj.columns)
|
467 |
+
)
|
468 |
+
|
469 |
+
if selected_obj.ndim == 1:
|
470 |
+
# key only used for output
|
471 |
+
colg = obj._gotitem(selection, ndim=1)
|
472 |
+
results = [getattr(colg, op_name)(how, **kwargs) for _, how in func.items()]
|
473 |
+
keys = list(func.keys())
|
474 |
+
elif not is_groupby and is_non_unique_col:
|
475 |
+
# key used for column selection and output
|
476 |
+
# GH#51099
|
477 |
+
results = []
|
478 |
+
keys = []
|
479 |
+
for key, how in func.items():
|
480 |
+
indices = selected_obj.columns.get_indexer_for([key])
|
481 |
+
labels = selected_obj.columns.take(indices)
|
482 |
+
label_to_indices = defaultdict(list)
|
483 |
+
for index, label in zip(indices, labels):
|
484 |
+
label_to_indices[label].append(index)
|
485 |
+
|
486 |
+
key_data = [
|
487 |
+
getattr(selected_obj._ixs(indice, axis=1), op_name)(how, **kwargs)
|
488 |
+
for label, indices in label_to_indices.items()
|
489 |
+
for indice in indices
|
490 |
+
]
|
491 |
+
|
492 |
+
keys += [key] * len(key_data)
|
493 |
+
results += key_data
|
494 |
+
else:
|
495 |
+
# key used for column selection and output
|
496 |
+
results = [
|
497 |
+
getattr(obj._gotitem(key, ndim=1), op_name)(how, **kwargs)
|
498 |
+
for key, how in func.items()
|
499 |
+
]
|
500 |
+
keys = list(func.keys())
|
501 |
+
|
502 |
+
return keys, results
|
503 |
+
|
504 |
+
def wrap_results_dict_like(
|
505 |
+
self,
|
506 |
+
selected_obj: Series | DataFrame,
|
507 |
+
result_index: list[Hashable],
|
508 |
+
result_data: list,
|
509 |
+
):
|
510 |
+
from pandas import Index
|
511 |
+
from pandas.core.reshape.concat import concat
|
512 |
+
|
513 |
+
obj = self.obj
|
514 |
+
|
515 |
+
# Avoid making two isinstance calls in all and any below
|
516 |
+
is_ndframe = [isinstance(r, ABCNDFrame) for r in result_data]
|
517 |
+
|
518 |
+
if all(is_ndframe):
|
519 |
+
results = dict(zip(result_index, result_data))
|
520 |
+
keys_to_use: Iterable[Hashable]
|
521 |
+
keys_to_use = [k for k in result_index if not results[k].empty]
|
522 |
+
# Have to check, if at least one DataFrame is not empty.
|
523 |
+
keys_to_use = keys_to_use if keys_to_use != [] else result_index
|
524 |
+
if selected_obj.ndim == 2:
|
525 |
+
# keys are columns, so we can preserve names
|
526 |
+
ktu = Index(keys_to_use)
|
527 |
+
ktu._set_names(selected_obj.columns.names)
|
528 |
+
keys_to_use = ktu
|
529 |
+
|
530 |
+
axis: AxisInt = 0 if isinstance(obj, ABCSeries) else 1
|
531 |
+
result = concat(
|
532 |
+
{k: results[k] for k in keys_to_use},
|
533 |
+
axis=axis,
|
534 |
+
keys=keys_to_use,
|
535 |
+
)
|
536 |
+
elif any(is_ndframe):
|
537 |
+
# There is a mix of NDFrames and scalars
|
538 |
+
raise ValueError(
|
539 |
+
"cannot perform both aggregation "
|
540 |
+
"and transformation operations "
|
541 |
+
"simultaneously"
|
542 |
+
)
|
543 |
+
else:
|
544 |
+
from pandas import Series
|
545 |
+
|
546 |
+
# we have a list of scalars
|
547 |
+
# GH 36212 use name only if obj is a series
|
548 |
+
if obj.ndim == 1:
|
549 |
+
obj = cast("Series", obj)
|
550 |
+
name = obj.name
|
551 |
+
else:
|
552 |
+
name = None
|
553 |
+
|
554 |
+
result = Series(result_data, index=result_index, name=name)
|
555 |
+
|
556 |
+
return result
|
557 |
+
|
558 |
+
def apply_str(self) -> DataFrame | Series:
|
559 |
+
"""
|
560 |
+
Compute apply in case of a string.
|
561 |
+
|
562 |
+
Returns
|
563 |
+
-------
|
564 |
+
result: Series or DataFrame
|
565 |
+
"""
|
566 |
+
# Caller is responsible for checking isinstance(self.f, str)
|
567 |
+
func = cast(str, self.func)
|
568 |
+
|
569 |
+
obj = self.obj
|
570 |
+
|
571 |
+
from pandas.core.groupby.generic import (
|
572 |
+
DataFrameGroupBy,
|
573 |
+
SeriesGroupBy,
|
574 |
+
)
|
575 |
+
|
576 |
+
# Support for `frame.transform('method')`
|
577 |
+
# Some methods (shift, etc.) require the axis argument, others
|
578 |
+
# don't, so inspect and insert if necessary.
|
579 |
+
method = getattr(obj, func, None)
|
580 |
+
if callable(method):
|
581 |
+
sig = inspect.getfullargspec(method)
|
582 |
+
arg_names = (*sig.args, *sig.kwonlyargs)
|
583 |
+
if self.axis != 0 and (
|
584 |
+
"axis" not in arg_names or func in ("corrwith", "skew")
|
585 |
+
):
|
586 |
+
raise ValueError(f"Operation {func} does not support axis=1")
|
587 |
+
if "axis" in arg_names:
|
588 |
+
if isinstance(obj, (SeriesGroupBy, DataFrameGroupBy)):
|
589 |
+
# Try to avoid FutureWarning for deprecated axis keyword;
|
590 |
+
# If self.axis matches the axis we would get by not passing
|
591 |
+
# axis, we safely exclude the keyword.
|
592 |
+
|
593 |
+
default_axis = 0
|
594 |
+
if func in ["idxmax", "idxmin"]:
|
595 |
+
# DataFrameGroupBy.idxmax, idxmin axis defaults to self.axis,
|
596 |
+
# whereas other axis keywords default to 0
|
597 |
+
default_axis = self.obj.axis
|
598 |
+
|
599 |
+
if default_axis != self.axis:
|
600 |
+
self.kwargs["axis"] = self.axis
|
601 |
+
else:
|
602 |
+
self.kwargs["axis"] = self.axis
|
603 |
+
return self._apply_str(obj, func, *self.args, **self.kwargs)
|
604 |
+
|
605 |
+
def apply_list_or_dict_like(self) -> DataFrame | Series:
|
606 |
+
"""
|
607 |
+
Compute apply in case of a list-like or dict-like.
|
608 |
+
|
609 |
+
Returns
|
610 |
+
-------
|
611 |
+
result: Series, DataFrame, or None
|
612 |
+
Result when self.func is a list-like or dict-like, None otherwise.
|
613 |
+
"""
|
614 |
+
|
615 |
+
if self.engine == "numba":
|
616 |
+
raise NotImplementedError(
|
617 |
+
"The 'numba' engine doesn't support list-like/"
|
618 |
+
"dict likes of callables yet."
|
619 |
+
)
|
620 |
+
|
621 |
+
if self.axis == 1 and isinstance(self.obj, ABCDataFrame):
|
622 |
+
return self.obj.T.apply(self.func, 0, args=self.args, **self.kwargs).T
|
623 |
+
|
624 |
+
func = self.func
|
625 |
+
kwargs = self.kwargs
|
626 |
+
|
627 |
+
if is_dict_like(func):
|
628 |
+
result = self.agg_or_apply_dict_like(op_name="apply")
|
629 |
+
else:
|
630 |
+
result = self.agg_or_apply_list_like(op_name="apply")
|
631 |
+
|
632 |
+
result = reconstruct_and_relabel_result(result, func, **kwargs)
|
633 |
+
|
634 |
+
return result
|
635 |
+
|
636 |
+
def normalize_dictlike_arg(
|
637 |
+
self, how: str, obj: DataFrame | Series, func: AggFuncTypeDict
|
638 |
+
) -> AggFuncTypeDict:
|
639 |
+
"""
|
640 |
+
Handler for dict-like argument.
|
641 |
+
|
642 |
+
Ensures that necessary columns exist if obj is a DataFrame, and
|
643 |
+
that a nested renamer is not passed. Also normalizes to all lists
|
644 |
+
when values consists of a mix of list and non-lists.
|
645 |
+
"""
|
646 |
+
assert how in ("apply", "agg", "transform")
|
647 |
+
|
648 |
+
# Can't use func.values(); wouldn't work for a Series
|
649 |
+
if (
|
650 |
+
how == "agg"
|
651 |
+
and isinstance(obj, ABCSeries)
|
652 |
+
and any(is_list_like(v) for _, v in func.items())
|
653 |
+
) or (any(is_dict_like(v) for _, v in func.items())):
|
654 |
+
# GH 15931 - deprecation of renaming keys
|
655 |
+
raise SpecificationError("nested renamer is not supported")
|
656 |
+
|
657 |
+
if obj.ndim != 1:
|
658 |
+
# Check for missing columns on a frame
|
659 |
+
from pandas import Index
|
660 |
+
|
661 |
+
cols = Index(list(func.keys())).difference(obj.columns, sort=True)
|
662 |
+
if len(cols) > 0:
|
663 |
+
raise KeyError(f"Column(s) {list(cols)} do not exist")
|
664 |
+
|
665 |
+
aggregator_types = (list, tuple, dict)
|
666 |
+
|
667 |
+
# if we have a dict of any non-scalars
|
668 |
+
# eg. {'A' : ['mean']}, normalize all to
|
669 |
+
# be list-likes
|
670 |
+
# Cannot use func.values() because arg may be a Series
|
671 |
+
if any(isinstance(x, aggregator_types) for _, x in func.items()):
|
672 |
+
new_func: AggFuncTypeDict = {}
|
673 |
+
for k, v in func.items():
|
674 |
+
if not isinstance(v, aggregator_types):
|
675 |
+
new_func[k] = [v]
|
676 |
+
else:
|
677 |
+
new_func[k] = v
|
678 |
+
func = new_func
|
679 |
+
return func
|
680 |
+
|
681 |
+
def _apply_str(self, obj, func: str, *args, **kwargs):
|
682 |
+
"""
|
683 |
+
if arg is a string, then try to operate on it:
|
684 |
+
- try to find a function (or attribute) on obj
|
685 |
+
- try to find a numpy function
|
686 |
+
- raise
|
687 |
+
"""
|
688 |
+
assert isinstance(func, str)
|
689 |
+
|
690 |
+
if hasattr(obj, func):
|
691 |
+
f = getattr(obj, func)
|
692 |
+
if callable(f):
|
693 |
+
return f(*args, **kwargs)
|
694 |
+
|
695 |
+
# people may aggregate on a non-callable attribute
|
696 |
+
# but don't let them think they can pass args to it
|
697 |
+
assert len(args) == 0
|
698 |
+
assert len([kwarg for kwarg in kwargs if kwarg not in ["axis"]]) == 0
|
699 |
+
return f
|
700 |
+
elif hasattr(np, func) and hasattr(obj, "__array__"):
|
701 |
+
# in particular exclude Window
|
702 |
+
f = getattr(np, func)
|
703 |
+
return f(obj, *args, **kwargs)
|
704 |
+
else:
|
705 |
+
msg = f"'{func}' is not a valid function for '{type(obj).__name__}' object"
|
706 |
+
raise AttributeError(msg)
|
707 |
+
|
708 |
+
|
709 |
+
class NDFrameApply(Apply):
|
710 |
+
"""
|
711 |
+
Methods shared by FrameApply and SeriesApply but
|
712 |
+
not GroupByApply or ResamplerWindowApply
|
713 |
+
"""
|
714 |
+
|
715 |
+
obj: DataFrame | Series
|
716 |
+
|
717 |
+
@property
|
718 |
+
def index(self) -> Index:
|
719 |
+
return self.obj.index
|
720 |
+
|
721 |
+
@property
|
722 |
+
def agg_axis(self) -> Index:
|
723 |
+
return self.obj._get_agg_axis(self.axis)
|
724 |
+
|
725 |
+
def agg_or_apply_list_like(
|
726 |
+
self, op_name: Literal["agg", "apply"]
|
727 |
+
) -> DataFrame | Series:
|
728 |
+
obj = self.obj
|
729 |
+
kwargs = self.kwargs
|
730 |
+
|
731 |
+
if op_name == "apply":
|
732 |
+
if isinstance(self, FrameApply):
|
733 |
+
by_row = self.by_row
|
734 |
+
|
735 |
+
elif isinstance(self, SeriesApply):
|
736 |
+
by_row = "_compat" if self.by_row else False
|
737 |
+
else:
|
738 |
+
by_row = False
|
739 |
+
kwargs = {**kwargs, "by_row": by_row}
|
740 |
+
|
741 |
+
if getattr(obj, "axis", 0) == 1:
|
742 |
+
raise NotImplementedError("axis other than 0 is not supported")
|
743 |
+
|
744 |
+
keys, results = self.compute_list_like(op_name, obj, kwargs)
|
745 |
+
result = self.wrap_results_list_like(keys, results)
|
746 |
+
return result
|
747 |
+
|
748 |
+
def agg_or_apply_dict_like(
|
749 |
+
self, op_name: Literal["agg", "apply"]
|
750 |
+
) -> DataFrame | Series:
|
751 |
+
assert op_name in ["agg", "apply"]
|
752 |
+
obj = self.obj
|
753 |
+
|
754 |
+
kwargs = {}
|
755 |
+
if op_name == "apply":
|
756 |
+
by_row = "_compat" if self.by_row else False
|
757 |
+
kwargs.update({"by_row": by_row})
|
758 |
+
|
759 |
+
if getattr(obj, "axis", 0) == 1:
|
760 |
+
raise NotImplementedError("axis other than 0 is not supported")
|
761 |
+
|
762 |
+
selection = None
|
763 |
+
result_index, result_data = self.compute_dict_like(
|
764 |
+
op_name, obj, selection, kwargs
|
765 |
+
)
|
766 |
+
result = self.wrap_results_dict_like(obj, result_index, result_data)
|
767 |
+
return result
|
768 |
+
|
769 |
+
|
770 |
+
class FrameApply(NDFrameApply):
|
771 |
+
obj: DataFrame
|
772 |
+
|
773 |
+
def __init__(
|
774 |
+
self,
|
775 |
+
obj: AggObjType,
|
776 |
+
func: AggFuncType,
|
777 |
+
raw: bool,
|
778 |
+
result_type: str | None,
|
779 |
+
*,
|
780 |
+
by_row: Literal[False, "compat"] = False,
|
781 |
+
engine: str = "python",
|
782 |
+
engine_kwargs: dict[str, bool] | None = None,
|
783 |
+
args,
|
784 |
+
kwargs,
|
785 |
+
) -> None:
|
786 |
+
if by_row is not False and by_row != "compat":
|
787 |
+
raise ValueError(f"by_row={by_row} not allowed")
|
788 |
+
super().__init__(
|
789 |
+
obj,
|
790 |
+
func,
|
791 |
+
raw,
|
792 |
+
result_type,
|
793 |
+
by_row=by_row,
|
794 |
+
engine=engine,
|
795 |
+
engine_kwargs=engine_kwargs,
|
796 |
+
args=args,
|
797 |
+
kwargs=kwargs,
|
798 |
+
)
|
799 |
+
|
800 |
+
# ---------------------------------------------------------------
|
801 |
+
# Abstract Methods
|
802 |
+
|
803 |
+
@property
|
804 |
+
@abc.abstractmethod
|
805 |
+
def result_index(self) -> Index:
|
806 |
+
pass
|
807 |
+
|
808 |
+
@property
|
809 |
+
@abc.abstractmethod
|
810 |
+
def result_columns(self) -> Index:
|
811 |
+
pass
|
812 |
+
|
813 |
+
@property
|
814 |
+
@abc.abstractmethod
|
815 |
+
def series_generator(self) -> Generator[Series, None, None]:
|
816 |
+
pass
|
817 |
+
|
818 |
+
@staticmethod
|
819 |
+
@functools.cache
|
820 |
+
@abc.abstractmethod
|
821 |
+
def generate_numba_apply_func(
|
822 |
+
func, nogil=True, nopython=True, parallel=False
|
823 |
+
) -> Callable[[npt.NDArray, Index, Index], dict[int, Any]]:
|
824 |
+
pass
|
825 |
+
|
826 |
+
@abc.abstractmethod
|
827 |
+
def apply_with_numba(self):
|
828 |
+
pass
|
829 |
+
|
830 |
+
def validate_values_for_numba(self):
|
831 |
+
# Validate column dtyps all OK
|
832 |
+
for colname, dtype in self.obj.dtypes.items():
|
833 |
+
if not is_numeric_dtype(dtype):
|
834 |
+
raise ValueError(
|
835 |
+
f"Column {colname} must have a numeric dtype. "
|
836 |
+
f"Found '{dtype}' instead"
|
837 |
+
)
|
838 |
+
if is_extension_array_dtype(dtype):
|
839 |
+
raise ValueError(
|
840 |
+
f"Column {colname} is backed by an extension array, "
|
841 |
+
f"which is not supported by the numba engine."
|
842 |
+
)
|
843 |
+
|
844 |
+
@abc.abstractmethod
|
845 |
+
def wrap_results_for_axis(
|
846 |
+
self, results: ResType, res_index: Index
|
847 |
+
) -> DataFrame | Series:
|
848 |
+
pass
|
849 |
+
|
850 |
+
# ---------------------------------------------------------------
|
851 |
+
|
852 |
+
@property
|
853 |
+
def res_columns(self) -> Index:
|
854 |
+
return self.result_columns
|
855 |
+
|
856 |
+
@property
|
857 |
+
def columns(self) -> Index:
|
858 |
+
return self.obj.columns
|
859 |
+
|
860 |
+
@cache_readonly
|
861 |
+
def values(self):
|
862 |
+
return self.obj.values
|
863 |
+
|
864 |
+
def apply(self) -> DataFrame | Series:
|
865 |
+
"""compute the results"""
|
866 |
+
|
867 |
+
# dispatch to handle list-like or dict-like
|
868 |
+
if is_list_like(self.func):
|
869 |
+
if self.engine == "numba":
|
870 |
+
raise NotImplementedError(
|
871 |
+
"the 'numba' engine doesn't support lists of callables yet"
|
872 |
+
)
|
873 |
+
return self.apply_list_or_dict_like()
|
874 |
+
|
875 |
+
# all empty
|
876 |
+
if len(self.columns) == 0 and len(self.index) == 0:
|
877 |
+
return self.apply_empty_result()
|
878 |
+
|
879 |
+
# string dispatch
|
880 |
+
if isinstance(self.func, str):
|
881 |
+
if self.engine == "numba":
|
882 |
+
raise NotImplementedError(
|
883 |
+
"the 'numba' engine doesn't support using "
|
884 |
+
"a string as the callable function"
|
885 |
+
)
|
886 |
+
return self.apply_str()
|
887 |
+
|
888 |
+
# ufunc
|
889 |
+
elif isinstance(self.func, np.ufunc):
|
890 |
+
if self.engine == "numba":
|
891 |
+
raise NotImplementedError(
|
892 |
+
"the 'numba' engine doesn't support "
|
893 |
+
"using a numpy ufunc as the callable function"
|
894 |
+
)
|
895 |
+
with np.errstate(all="ignore"):
|
896 |
+
results = self.obj._mgr.apply("apply", func=self.func)
|
897 |
+
# _constructor will retain self.index and self.columns
|
898 |
+
return self.obj._constructor_from_mgr(results, axes=results.axes)
|
899 |
+
|
900 |
+
# broadcasting
|
901 |
+
if self.result_type == "broadcast":
|
902 |
+
if self.engine == "numba":
|
903 |
+
raise NotImplementedError(
|
904 |
+
"the 'numba' engine doesn't support result_type='broadcast'"
|
905 |
+
)
|
906 |
+
return self.apply_broadcast(self.obj)
|
907 |
+
|
908 |
+
# one axis empty
|
909 |
+
elif not all(self.obj.shape):
|
910 |
+
return self.apply_empty_result()
|
911 |
+
|
912 |
+
# raw
|
913 |
+
elif self.raw:
|
914 |
+
return self.apply_raw(engine=self.engine, engine_kwargs=self.engine_kwargs)
|
915 |
+
|
916 |
+
return self.apply_standard()
|
917 |
+
|
918 |
+
def agg(self):
|
919 |
+
obj = self.obj
|
920 |
+
axis = self.axis
|
921 |
+
|
922 |
+
# TODO: Avoid having to change state
|
923 |
+
self.obj = self.obj if self.axis == 0 else self.obj.T
|
924 |
+
self.axis = 0
|
925 |
+
|
926 |
+
result = None
|
927 |
+
try:
|
928 |
+
result = super().agg()
|
929 |
+
finally:
|
930 |
+
self.obj = obj
|
931 |
+
self.axis = axis
|
932 |
+
|
933 |
+
if axis == 1:
|
934 |
+
result = result.T if result is not None else result
|
935 |
+
|
936 |
+
if result is None:
|
937 |
+
result = self.obj.apply(self.func, axis, args=self.args, **self.kwargs)
|
938 |
+
|
939 |
+
return result
|
940 |
+
|
941 |
+
def apply_empty_result(self):
|
942 |
+
"""
|
943 |
+
we have an empty result; at least 1 axis is 0
|
944 |
+
|
945 |
+
we will try to apply the function to an empty
|
946 |
+
series in order to see if this is a reduction function
|
947 |
+
"""
|
948 |
+
assert callable(self.func)
|
949 |
+
|
950 |
+
# we are not asked to reduce or infer reduction
|
951 |
+
# so just return a copy of the existing object
|
952 |
+
if self.result_type not in ["reduce", None]:
|
953 |
+
return self.obj.copy()
|
954 |
+
|
955 |
+
# we may need to infer
|
956 |
+
should_reduce = self.result_type == "reduce"
|
957 |
+
|
958 |
+
from pandas import Series
|
959 |
+
|
960 |
+
if not should_reduce:
|
961 |
+
try:
|
962 |
+
if self.axis == 0:
|
963 |
+
r = self.func(
|
964 |
+
Series([], dtype=np.float64), *self.args, **self.kwargs
|
965 |
+
)
|
966 |
+
else:
|
967 |
+
r = self.func(
|
968 |
+
Series(index=self.columns, dtype=np.float64),
|
969 |
+
*self.args,
|
970 |
+
**self.kwargs,
|
971 |
+
)
|
972 |
+
except Exception:
|
973 |
+
pass
|
974 |
+
else:
|
975 |
+
should_reduce = not isinstance(r, Series)
|
976 |
+
|
977 |
+
if should_reduce:
|
978 |
+
if len(self.agg_axis):
|
979 |
+
r = self.func(Series([], dtype=np.float64), *self.args, **self.kwargs)
|
980 |
+
else:
|
981 |
+
r = np.nan
|
982 |
+
|
983 |
+
return self.obj._constructor_sliced(r, index=self.agg_axis)
|
984 |
+
else:
|
985 |
+
return self.obj.copy()
|
986 |
+
|
987 |
+
def apply_raw(self, engine="python", engine_kwargs=None):
|
988 |
+
"""apply to the values as a numpy array"""
|
989 |
+
|
990 |
+
def wrap_function(func):
|
991 |
+
"""
|
992 |
+
Wrap user supplied function to work around numpy issue.
|
993 |
+
|
994 |
+
see https://github.com/numpy/numpy/issues/8352
|
995 |
+
"""
|
996 |
+
|
997 |
+
def wrapper(*args, **kwargs):
|
998 |
+
result = func(*args, **kwargs)
|
999 |
+
if isinstance(result, str):
|
1000 |
+
result = np.array(result, dtype=object)
|
1001 |
+
return result
|
1002 |
+
|
1003 |
+
return wrapper
|
1004 |
+
|
1005 |
+
if engine == "numba":
|
1006 |
+
engine_kwargs = {} if engine_kwargs is None else engine_kwargs
|
1007 |
+
|
1008 |
+
# error: Argument 1 to "__call__" of "_lru_cache_wrapper" has
|
1009 |
+
# incompatible type "Callable[..., Any] | str | list[Callable
|
1010 |
+
# [..., Any] | str] | dict[Hashable,Callable[..., Any] | str |
|
1011 |
+
# list[Callable[..., Any] | str]]"; expected "Hashable"
|
1012 |
+
nb_looper = generate_apply_looper(
|
1013 |
+
self.func, **engine_kwargs # type: ignore[arg-type]
|
1014 |
+
)
|
1015 |
+
result = nb_looper(self.values, self.axis)
|
1016 |
+
# If we made the result 2-D, squeeze it back to 1-D
|
1017 |
+
result = np.squeeze(result)
|
1018 |
+
else:
|
1019 |
+
result = np.apply_along_axis(
|
1020 |
+
wrap_function(self.func),
|
1021 |
+
self.axis,
|
1022 |
+
self.values,
|
1023 |
+
*self.args,
|
1024 |
+
**self.kwargs,
|
1025 |
+
)
|
1026 |
+
|
1027 |
+
# TODO: mixed type case
|
1028 |
+
if result.ndim == 2:
|
1029 |
+
return self.obj._constructor(result, index=self.index, columns=self.columns)
|
1030 |
+
else:
|
1031 |
+
return self.obj._constructor_sliced(result, index=self.agg_axis)
|
1032 |
+
|
1033 |
+
def apply_broadcast(self, target: DataFrame) -> DataFrame:
|
1034 |
+
assert callable(self.func)
|
1035 |
+
|
1036 |
+
result_values = np.empty_like(target.values)
|
1037 |
+
|
1038 |
+
# axis which we want to compare compliance
|
1039 |
+
result_compare = target.shape[0]
|
1040 |
+
|
1041 |
+
for i, col in enumerate(target.columns):
|
1042 |
+
res = self.func(target[col], *self.args, **self.kwargs)
|
1043 |
+
ares = np.asarray(res).ndim
|
1044 |
+
|
1045 |
+
# must be a scalar or 1d
|
1046 |
+
if ares > 1:
|
1047 |
+
raise ValueError("too many dims to broadcast")
|
1048 |
+
if ares == 1:
|
1049 |
+
# must match return dim
|
1050 |
+
if result_compare != len(res):
|
1051 |
+
raise ValueError("cannot broadcast result")
|
1052 |
+
|
1053 |
+
result_values[:, i] = res
|
1054 |
+
|
1055 |
+
# we *always* preserve the original index / columns
|
1056 |
+
result = self.obj._constructor(
|
1057 |
+
result_values, index=target.index, columns=target.columns
|
1058 |
+
)
|
1059 |
+
return result
|
1060 |
+
|
1061 |
+
def apply_standard(self):
|
1062 |
+
if self.engine == "python":
|
1063 |
+
results, res_index = self.apply_series_generator()
|
1064 |
+
else:
|
1065 |
+
results, res_index = self.apply_series_numba()
|
1066 |
+
|
1067 |
+
# wrap results
|
1068 |
+
return self.wrap_results(results, res_index)
|
1069 |
+
|
1070 |
+
def apply_series_generator(self) -> tuple[ResType, Index]:
|
1071 |
+
assert callable(self.func)
|
1072 |
+
|
1073 |
+
series_gen = self.series_generator
|
1074 |
+
res_index = self.result_index
|
1075 |
+
|
1076 |
+
results = {}
|
1077 |
+
|
1078 |
+
with option_context("mode.chained_assignment", None):
|
1079 |
+
for i, v in enumerate(series_gen):
|
1080 |
+
# ignore SettingWithCopy here in case the user mutates
|
1081 |
+
results[i] = self.func(v, *self.args, **self.kwargs)
|
1082 |
+
if isinstance(results[i], ABCSeries):
|
1083 |
+
# If we have a view on v, we need to make a copy because
|
1084 |
+
# series_generator will swap out the underlying data
|
1085 |
+
results[i] = results[i].copy(deep=False)
|
1086 |
+
|
1087 |
+
return results, res_index
|
1088 |
+
|
1089 |
+
def apply_series_numba(self):
|
1090 |
+
if self.engine_kwargs.get("parallel", False):
|
1091 |
+
raise NotImplementedError(
|
1092 |
+
"Parallel apply is not supported when raw=False and engine='numba'"
|
1093 |
+
)
|
1094 |
+
if not self.obj.index.is_unique or not self.columns.is_unique:
|
1095 |
+
raise NotImplementedError(
|
1096 |
+
"The index/columns must be unique when raw=False and engine='numba'"
|
1097 |
+
)
|
1098 |
+
self.validate_values_for_numba()
|
1099 |
+
results = self.apply_with_numba()
|
1100 |
+
return results, self.result_index
|
1101 |
+
|
1102 |
+
def wrap_results(self, results: ResType, res_index: Index) -> DataFrame | Series:
|
1103 |
+
from pandas import Series
|
1104 |
+
|
1105 |
+
# see if we can infer the results
|
1106 |
+
if len(results) > 0 and 0 in results and is_sequence(results[0]):
|
1107 |
+
return self.wrap_results_for_axis(results, res_index)
|
1108 |
+
|
1109 |
+
# dict of scalars
|
1110 |
+
|
1111 |
+
# the default dtype of an empty Series is `object`, but this
|
1112 |
+
# code can be hit by df.mean() where the result should have dtype
|
1113 |
+
# float64 even if it's an empty Series.
|
1114 |
+
constructor_sliced = self.obj._constructor_sliced
|
1115 |
+
if len(results) == 0 and constructor_sliced is Series:
|
1116 |
+
result = constructor_sliced(results, dtype=np.float64)
|
1117 |
+
else:
|
1118 |
+
result = constructor_sliced(results)
|
1119 |
+
result.index = res_index
|
1120 |
+
|
1121 |
+
return result
|
1122 |
+
|
1123 |
+
def apply_str(self) -> DataFrame | Series:
|
1124 |
+
# Caller is responsible for checking isinstance(self.func, str)
|
1125 |
+
# TODO: GH#39993 - Avoid special-casing by replacing with lambda
|
1126 |
+
if self.func == "size":
|
1127 |
+
# Special-cased because DataFrame.size returns a single scalar
|
1128 |
+
obj = self.obj
|
1129 |
+
value = obj.shape[self.axis]
|
1130 |
+
return obj._constructor_sliced(value, index=self.agg_axis)
|
1131 |
+
return super().apply_str()
|
1132 |
+
|
1133 |
+
|
1134 |
+
class FrameRowApply(FrameApply):
|
1135 |
+
axis: AxisInt = 0
|
1136 |
+
|
1137 |
+
@property
|
1138 |
+
def series_generator(self) -> Generator[Series, None, None]:
|
1139 |
+
return (self.obj._ixs(i, axis=1) for i in range(len(self.columns)))
|
1140 |
+
|
1141 |
+
@staticmethod
|
1142 |
+
@functools.cache
|
1143 |
+
def generate_numba_apply_func(
|
1144 |
+
func, nogil=True, nopython=True, parallel=False
|
1145 |
+
) -> Callable[[npt.NDArray, Index, Index], dict[int, Any]]:
|
1146 |
+
numba = import_optional_dependency("numba")
|
1147 |
+
from pandas import Series
|
1148 |
+
|
1149 |
+
# Import helper from extensions to cast string object -> np strings
|
1150 |
+
# Note: This also has the side effect of loading our numba extensions
|
1151 |
+
from pandas.core._numba.extensions import maybe_cast_str
|
1152 |
+
|
1153 |
+
jitted_udf = numba.extending.register_jitable(func)
|
1154 |
+
|
1155 |
+
# Currently the parallel argument doesn't get passed through here
|
1156 |
+
# (it's disabled) since the dicts in numba aren't thread-safe.
|
1157 |
+
@numba.jit(nogil=nogil, nopython=nopython, parallel=parallel)
|
1158 |
+
def numba_func(values, col_names, df_index):
|
1159 |
+
results = {}
|
1160 |
+
for j in range(values.shape[1]):
|
1161 |
+
# Create the series
|
1162 |
+
ser = Series(
|
1163 |
+
values[:, j], index=df_index, name=maybe_cast_str(col_names[j])
|
1164 |
+
)
|
1165 |
+
results[j] = jitted_udf(ser)
|
1166 |
+
return results
|
1167 |
+
|
1168 |
+
return numba_func
|
1169 |
+
|
1170 |
+
def apply_with_numba(self) -> dict[int, Any]:
|
1171 |
+
nb_func = self.generate_numba_apply_func(
|
1172 |
+
cast(Callable, self.func), **self.engine_kwargs
|
1173 |
+
)
|
1174 |
+
from pandas.core._numba.extensions import set_numba_data
|
1175 |
+
|
1176 |
+
index = self.obj.index
|
1177 |
+
if index.dtype == "string":
|
1178 |
+
index = index.astype(object)
|
1179 |
+
|
1180 |
+
columns = self.obj.columns
|
1181 |
+
if columns.dtype == "string":
|
1182 |
+
columns = columns.astype(object)
|
1183 |
+
|
1184 |
+
# Convert from numba dict to regular dict
|
1185 |
+
# Our isinstance checks in the df constructor don't pass for numbas typed dict
|
1186 |
+
with set_numba_data(index) as index, set_numba_data(columns) as columns:
|
1187 |
+
res = dict(nb_func(self.values, columns, index))
|
1188 |
+
return res
|
1189 |
+
|
1190 |
+
@property
|
1191 |
+
def result_index(self) -> Index:
|
1192 |
+
return self.columns
|
1193 |
+
|
1194 |
+
@property
|
1195 |
+
def result_columns(self) -> Index:
|
1196 |
+
return self.index
|
1197 |
+
|
1198 |
+
def wrap_results_for_axis(
|
1199 |
+
self, results: ResType, res_index: Index
|
1200 |
+
) -> DataFrame | Series:
|
1201 |
+
"""return the results for the rows"""
|
1202 |
+
|
1203 |
+
if self.result_type == "reduce":
|
1204 |
+
# e.g. test_apply_dict GH#8735
|
1205 |
+
res = self.obj._constructor_sliced(results)
|
1206 |
+
res.index = res_index
|
1207 |
+
return res
|
1208 |
+
|
1209 |
+
elif self.result_type is None and all(
|
1210 |
+
isinstance(x, dict) for x in results.values()
|
1211 |
+
):
|
1212 |
+
# Our operation was a to_dict op e.g.
|
1213 |
+
# test_apply_dict GH#8735, test_apply_reduce_to_dict GH#25196 #37544
|
1214 |
+
res = self.obj._constructor_sliced(results)
|
1215 |
+
res.index = res_index
|
1216 |
+
return res
|
1217 |
+
|
1218 |
+
try:
|
1219 |
+
result = self.obj._constructor(data=results)
|
1220 |
+
except ValueError as err:
|
1221 |
+
if "All arrays must be of the same length" in str(err):
|
1222 |
+
# e.g. result = [[2, 3], [1.5], ['foo', 'bar']]
|
1223 |
+
# see test_agg_listlike_result GH#29587
|
1224 |
+
res = self.obj._constructor_sliced(results)
|
1225 |
+
res.index = res_index
|
1226 |
+
return res
|
1227 |
+
else:
|
1228 |
+
raise
|
1229 |
+
|
1230 |
+
if not isinstance(results[0], ABCSeries):
|
1231 |
+
if len(result.index) == len(self.res_columns):
|
1232 |
+
result.index = self.res_columns
|
1233 |
+
|
1234 |
+
if len(result.columns) == len(res_index):
|
1235 |
+
result.columns = res_index
|
1236 |
+
|
1237 |
+
return result
|
1238 |
+
|
1239 |
+
|
1240 |
+
class FrameColumnApply(FrameApply):
|
1241 |
+
axis: AxisInt = 1
|
1242 |
+
|
1243 |
+
def apply_broadcast(self, target: DataFrame) -> DataFrame:
|
1244 |
+
result = super().apply_broadcast(target.T)
|
1245 |
+
return result.T
|
1246 |
+
|
1247 |
+
@property
|
1248 |
+
def series_generator(self) -> Generator[Series, None, None]:
|
1249 |
+
values = self.values
|
1250 |
+
values = ensure_wrapped_if_datetimelike(values)
|
1251 |
+
assert len(values) > 0
|
1252 |
+
|
1253 |
+
# We create one Series object, and will swap out the data inside
|
1254 |
+
# of it. Kids: don't do this at home.
|
1255 |
+
ser = self.obj._ixs(0, axis=0)
|
1256 |
+
mgr = ser._mgr
|
1257 |
+
|
1258 |
+
is_view = mgr.blocks[0].refs.has_reference() # type: ignore[union-attr]
|
1259 |
+
|
1260 |
+
if isinstance(ser.dtype, ExtensionDtype):
|
1261 |
+
# values will be incorrect for this block
|
1262 |
+
# TODO(EA2D): special case would be unnecessary with 2D EAs
|
1263 |
+
obj = self.obj
|
1264 |
+
for i in range(len(obj)):
|
1265 |
+
yield obj._ixs(i, axis=0)
|
1266 |
+
|
1267 |
+
else:
|
1268 |
+
for arr, name in zip(values, self.index):
|
1269 |
+
# GH#35462 re-pin mgr in case setitem changed it
|
1270 |
+
ser._mgr = mgr
|
1271 |
+
mgr.set_values(arr)
|
1272 |
+
object.__setattr__(ser, "_name", name)
|
1273 |
+
if not is_view:
|
1274 |
+
# In apply_series_generator we store the a shallow copy of the
|
1275 |
+
# result, which potentially increases the ref count of this reused
|
1276 |
+
# `ser` object (depending on the result of the applied function)
|
1277 |
+
# -> if that happened and `ser` is already a copy, then we reset
|
1278 |
+
# the refs here to avoid triggering a unnecessary CoW inside the
|
1279 |
+
# applied function (https://github.com/pandas-dev/pandas/pull/56212)
|
1280 |
+
mgr.blocks[0].refs = BlockValuesRefs(mgr.blocks[0]) # type: ignore[union-attr]
|
1281 |
+
yield ser
|
1282 |
+
|
1283 |
+
@staticmethod
|
1284 |
+
@functools.cache
|
1285 |
+
def generate_numba_apply_func(
|
1286 |
+
func, nogil=True, nopython=True, parallel=False
|
1287 |
+
) -> Callable[[npt.NDArray, Index, Index], dict[int, Any]]:
|
1288 |
+
numba = import_optional_dependency("numba")
|
1289 |
+
from pandas import Series
|
1290 |
+
from pandas.core._numba.extensions import maybe_cast_str
|
1291 |
+
|
1292 |
+
jitted_udf = numba.extending.register_jitable(func)
|
1293 |
+
|
1294 |
+
@numba.jit(nogil=nogil, nopython=nopython, parallel=parallel)
|
1295 |
+
def numba_func(values, col_names_index, index):
|
1296 |
+
results = {}
|
1297 |
+
# Currently the parallel argument doesn't get passed through here
|
1298 |
+
# (it's disabled) since the dicts in numba aren't thread-safe.
|
1299 |
+
for i in range(values.shape[0]):
|
1300 |
+
# Create the series
|
1301 |
+
# TODO: values corrupted without the copy
|
1302 |
+
ser = Series(
|
1303 |
+
values[i].copy(),
|
1304 |
+
index=col_names_index,
|
1305 |
+
name=maybe_cast_str(index[i]),
|
1306 |
+
)
|
1307 |
+
results[i] = jitted_udf(ser)
|
1308 |
+
|
1309 |
+
return results
|
1310 |
+
|
1311 |
+
return numba_func
|
1312 |
+
|
1313 |
+
def apply_with_numba(self) -> dict[int, Any]:
|
1314 |
+
nb_func = self.generate_numba_apply_func(
|
1315 |
+
cast(Callable, self.func), **self.engine_kwargs
|
1316 |
+
)
|
1317 |
+
|
1318 |
+
from pandas.core._numba.extensions import set_numba_data
|
1319 |
+
|
1320 |
+
# Convert from numba dict to regular dict
|
1321 |
+
# Our isinstance checks in the df constructor don't pass for numbas typed dict
|
1322 |
+
with set_numba_data(self.obj.index) as index, set_numba_data(
|
1323 |
+
self.columns
|
1324 |
+
) as columns:
|
1325 |
+
res = dict(nb_func(self.values, columns, index))
|
1326 |
+
|
1327 |
+
return res
|
1328 |
+
|
1329 |
+
@property
|
1330 |
+
def result_index(self) -> Index:
|
1331 |
+
return self.index
|
1332 |
+
|
1333 |
+
@property
|
1334 |
+
def result_columns(self) -> Index:
|
1335 |
+
return self.columns
|
1336 |
+
|
1337 |
+
def wrap_results_for_axis(
|
1338 |
+
self, results: ResType, res_index: Index
|
1339 |
+
) -> DataFrame | Series:
|
1340 |
+
"""return the results for the columns"""
|
1341 |
+
result: DataFrame | Series
|
1342 |
+
|
1343 |
+
# we have requested to expand
|
1344 |
+
if self.result_type == "expand":
|
1345 |
+
result = self.infer_to_same_shape(results, res_index)
|
1346 |
+
|
1347 |
+
# we have a non-series and don't want inference
|
1348 |
+
elif not isinstance(results[0], ABCSeries):
|
1349 |
+
result = self.obj._constructor_sliced(results)
|
1350 |
+
result.index = res_index
|
1351 |
+
|
1352 |
+
# we may want to infer results
|
1353 |
+
else:
|
1354 |
+
result = self.infer_to_same_shape(results, res_index)
|
1355 |
+
|
1356 |
+
return result
|
1357 |
+
|
1358 |
+
def infer_to_same_shape(self, results: ResType, res_index: Index) -> DataFrame:
|
1359 |
+
"""infer the results to the same shape as the input object"""
|
1360 |
+
result = self.obj._constructor(data=results)
|
1361 |
+
result = result.T
|
1362 |
+
|
1363 |
+
# set the index
|
1364 |
+
result.index = res_index
|
1365 |
+
|
1366 |
+
# infer dtypes
|
1367 |
+
result = result.infer_objects(copy=False)
|
1368 |
+
|
1369 |
+
return result
|
1370 |
+
|
1371 |
+
|
1372 |
+
class SeriesApply(NDFrameApply):
|
1373 |
+
obj: Series
|
1374 |
+
axis: AxisInt = 0
|
1375 |
+
by_row: Literal[False, "compat", "_compat"] # only relevant for apply()
|
1376 |
+
|
1377 |
+
def __init__(
|
1378 |
+
self,
|
1379 |
+
obj: Series,
|
1380 |
+
func: AggFuncType,
|
1381 |
+
*,
|
1382 |
+
convert_dtype: bool | lib.NoDefault = lib.no_default,
|
1383 |
+
by_row: Literal[False, "compat", "_compat"] = "compat",
|
1384 |
+
args,
|
1385 |
+
kwargs,
|
1386 |
+
) -> None:
|
1387 |
+
if convert_dtype is lib.no_default:
|
1388 |
+
convert_dtype = True
|
1389 |
+
else:
|
1390 |
+
warnings.warn(
|
1391 |
+
"the convert_dtype parameter is deprecated and will be removed in a "
|
1392 |
+
"future version. Do ``ser.astype(object).apply()`` "
|
1393 |
+
"instead if you want ``convert_dtype=False``.",
|
1394 |
+
FutureWarning,
|
1395 |
+
stacklevel=find_stack_level(),
|
1396 |
+
)
|
1397 |
+
self.convert_dtype = convert_dtype
|
1398 |
+
|
1399 |
+
super().__init__(
|
1400 |
+
obj,
|
1401 |
+
func,
|
1402 |
+
raw=False,
|
1403 |
+
result_type=None,
|
1404 |
+
by_row=by_row,
|
1405 |
+
args=args,
|
1406 |
+
kwargs=kwargs,
|
1407 |
+
)
|
1408 |
+
|
1409 |
+
def apply(self) -> DataFrame | Series:
|
1410 |
+
obj = self.obj
|
1411 |
+
|
1412 |
+
if len(obj) == 0:
|
1413 |
+
return self.apply_empty_result()
|
1414 |
+
|
1415 |
+
# dispatch to handle list-like or dict-like
|
1416 |
+
if is_list_like(self.func):
|
1417 |
+
return self.apply_list_or_dict_like()
|
1418 |
+
|
1419 |
+
if isinstance(self.func, str):
|
1420 |
+
# if we are a string, try to dispatch
|
1421 |
+
return self.apply_str()
|
1422 |
+
|
1423 |
+
if self.by_row == "_compat":
|
1424 |
+
return self.apply_compat()
|
1425 |
+
|
1426 |
+
# self.func is Callable
|
1427 |
+
return self.apply_standard()
|
1428 |
+
|
1429 |
+
def agg(self):
|
1430 |
+
result = super().agg()
|
1431 |
+
if result is None:
|
1432 |
+
obj = self.obj
|
1433 |
+
func = self.func
|
1434 |
+
# string, list-like, and dict-like are entirely handled in super
|
1435 |
+
assert callable(func)
|
1436 |
+
|
1437 |
+
# GH53325: The setup below is just to keep current behavior while emitting a
|
1438 |
+
# deprecation message. In the future this will all be replaced with a simple
|
1439 |
+
# `result = f(self.obj, *self.args, **self.kwargs)`.
|
1440 |
+
try:
|
1441 |
+
result = obj.apply(func, args=self.args, **self.kwargs)
|
1442 |
+
except (ValueError, AttributeError, TypeError):
|
1443 |
+
result = func(obj, *self.args, **self.kwargs)
|
1444 |
+
else:
|
1445 |
+
msg = (
|
1446 |
+
f"using {func} in {type(obj).__name__}.agg cannot aggregate and "
|
1447 |
+
f"has been deprecated. Use {type(obj).__name__}.transform to "
|
1448 |
+
f"keep behavior unchanged."
|
1449 |
+
)
|
1450 |
+
warnings.warn(msg, FutureWarning, stacklevel=find_stack_level())
|
1451 |
+
|
1452 |
+
return result
|
1453 |
+
|
1454 |
+
def apply_empty_result(self) -> Series:
|
1455 |
+
obj = self.obj
|
1456 |
+
return obj._constructor(dtype=obj.dtype, index=obj.index).__finalize__(
|
1457 |
+
obj, method="apply"
|
1458 |
+
)
|
1459 |
+
|
1460 |
+
def apply_compat(self):
|
1461 |
+
"""compat apply method for funcs in listlikes and dictlikes.
|
1462 |
+
|
1463 |
+
Used for each callable when giving listlikes and dictlikes of callables to
|
1464 |
+
apply. Needed for compatibility with Pandas < v2.1.
|
1465 |
+
|
1466 |
+
.. versionadded:: 2.1.0
|
1467 |
+
"""
|
1468 |
+
obj = self.obj
|
1469 |
+
func = self.func
|
1470 |
+
|
1471 |
+
if callable(func):
|
1472 |
+
f = com.get_cython_func(func)
|
1473 |
+
if f and not self.args and not self.kwargs:
|
1474 |
+
return obj.apply(func, by_row=False)
|
1475 |
+
|
1476 |
+
try:
|
1477 |
+
result = obj.apply(func, by_row="compat")
|
1478 |
+
except (ValueError, AttributeError, TypeError):
|
1479 |
+
result = obj.apply(func, by_row=False)
|
1480 |
+
return result
|
1481 |
+
|
1482 |
+
def apply_standard(self) -> DataFrame | Series:
|
1483 |
+
# caller is responsible for ensuring that f is Callable
|
1484 |
+
func = cast(Callable, self.func)
|
1485 |
+
obj = self.obj
|
1486 |
+
|
1487 |
+
if isinstance(func, np.ufunc):
|
1488 |
+
with np.errstate(all="ignore"):
|
1489 |
+
return func(obj, *self.args, **self.kwargs)
|
1490 |
+
elif not self.by_row:
|
1491 |
+
return func(obj, *self.args, **self.kwargs)
|
1492 |
+
|
1493 |
+
if self.args or self.kwargs:
|
1494 |
+
# _map_values does not support args/kwargs
|
1495 |
+
def curried(x):
|
1496 |
+
return func(x, *self.args, **self.kwargs)
|
1497 |
+
|
1498 |
+
else:
|
1499 |
+
curried = func
|
1500 |
+
|
1501 |
+
# row-wise access
|
1502 |
+
# apply doesn't have a `na_action` keyword and for backward compat reasons
|
1503 |
+
# we need to give `na_action="ignore"` for categorical data.
|
1504 |
+
# TODO: remove the `na_action="ignore"` when that default has been changed in
|
1505 |
+
# Categorical (GH51645).
|
1506 |
+
action = "ignore" if isinstance(obj.dtype, CategoricalDtype) else None
|
1507 |
+
mapped = obj._map_values(
|
1508 |
+
mapper=curried, na_action=action, convert=self.convert_dtype
|
1509 |
+
)
|
1510 |
+
|
1511 |
+
if len(mapped) and isinstance(mapped[0], ABCSeries):
|
1512 |
+
# GH#43986 Need to do list(mapped) in order to get treated as nested
|
1513 |
+
# See also GH#25959 regarding EA support
|
1514 |
+
return obj._constructor_expanddim(list(mapped), index=obj.index)
|
1515 |
+
else:
|
1516 |
+
return obj._constructor(mapped, index=obj.index).__finalize__(
|
1517 |
+
obj, method="apply"
|
1518 |
+
)
|
1519 |
+
|
1520 |
+
|
1521 |
+
class GroupByApply(Apply):
|
1522 |
+
obj: GroupBy | Resampler | BaseWindow
|
1523 |
+
|
1524 |
+
def __init__(
|
1525 |
+
self,
|
1526 |
+
obj: GroupBy[NDFrameT],
|
1527 |
+
func: AggFuncType,
|
1528 |
+
*,
|
1529 |
+
args,
|
1530 |
+
kwargs,
|
1531 |
+
) -> None:
|
1532 |
+
kwargs = kwargs.copy()
|
1533 |
+
self.axis = obj.obj._get_axis_number(kwargs.get("axis", 0))
|
1534 |
+
super().__init__(
|
1535 |
+
obj,
|
1536 |
+
func,
|
1537 |
+
raw=False,
|
1538 |
+
result_type=None,
|
1539 |
+
args=args,
|
1540 |
+
kwargs=kwargs,
|
1541 |
+
)
|
1542 |
+
|
1543 |
+
def apply(self):
|
1544 |
+
raise NotImplementedError
|
1545 |
+
|
1546 |
+
def transform(self):
|
1547 |
+
raise NotImplementedError
|
1548 |
+
|
1549 |
+
def agg_or_apply_list_like(
|
1550 |
+
self, op_name: Literal["agg", "apply"]
|
1551 |
+
) -> DataFrame | Series:
|
1552 |
+
obj = self.obj
|
1553 |
+
kwargs = self.kwargs
|
1554 |
+
if op_name == "apply":
|
1555 |
+
kwargs = {**kwargs, "by_row": False}
|
1556 |
+
|
1557 |
+
if getattr(obj, "axis", 0) == 1:
|
1558 |
+
raise NotImplementedError("axis other than 0 is not supported")
|
1559 |
+
|
1560 |
+
if obj._selected_obj.ndim == 1:
|
1561 |
+
# For SeriesGroupBy this matches _obj_with_exclusions
|
1562 |
+
selected_obj = obj._selected_obj
|
1563 |
+
else:
|
1564 |
+
selected_obj = obj._obj_with_exclusions
|
1565 |
+
|
1566 |
+
# Only set as_index=True on groupby objects, not Window or Resample
|
1567 |
+
# that inherit from this class.
|
1568 |
+
with com.temp_setattr(
|
1569 |
+
obj, "as_index", True, condition=hasattr(obj, "as_index")
|
1570 |
+
):
|
1571 |
+
keys, results = self.compute_list_like(op_name, selected_obj, kwargs)
|
1572 |
+
result = self.wrap_results_list_like(keys, results)
|
1573 |
+
return result
|
1574 |
+
|
1575 |
+
def agg_or_apply_dict_like(
|
1576 |
+
self, op_name: Literal["agg", "apply"]
|
1577 |
+
) -> DataFrame | Series:
|
1578 |
+
from pandas.core.groupby.generic import (
|
1579 |
+
DataFrameGroupBy,
|
1580 |
+
SeriesGroupBy,
|
1581 |
+
)
|
1582 |
+
|
1583 |
+
assert op_name in ["agg", "apply"]
|
1584 |
+
|
1585 |
+
obj = self.obj
|
1586 |
+
kwargs = {}
|
1587 |
+
if op_name == "apply":
|
1588 |
+
by_row = "_compat" if self.by_row else False
|
1589 |
+
kwargs.update({"by_row": by_row})
|
1590 |
+
|
1591 |
+
if getattr(obj, "axis", 0) == 1:
|
1592 |
+
raise NotImplementedError("axis other than 0 is not supported")
|
1593 |
+
|
1594 |
+
selected_obj = obj._selected_obj
|
1595 |
+
selection = obj._selection
|
1596 |
+
|
1597 |
+
is_groupby = isinstance(obj, (DataFrameGroupBy, SeriesGroupBy))
|
1598 |
+
|
1599 |
+
# Numba Groupby engine/engine-kwargs passthrough
|
1600 |
+
if is_groupby:
|
1601 |
+
engine = self.kwargs.get("engine", None)
|
1602 |
+
engine_kwargs = self.kwargs.get("engine_kwargs", None)
|
1603 |
+
kwargs.update({"engine": engine, "engine_kwargs": engine_kwargs})
|
1604 |
+
|
1605 |
+
with com.temp_setattr(
|
1606 |
+
obj, "as_index", True, condition=hasattr(obj, "as_index")
|
1607 |
+
):
|
1608 |
+
result_index, result_data = self.compute_dict_like(
|
1609 |
+
op_name, selected_obj, selection, kwargs
|
1610 |
+
)
|
1611 |
+
result = self.wrap_results_dict_like(selected_obj, result_index, result_data)
|
1612 |
+
return result
|
1613 |
+
|
1614 |
+
|
1615 |
+
class ResamplerWindowApply(GroupByApply):
|
1616 |
+
axis: AxisInt = 0
|
1617 |
+
obj: Resampler | BaseWindow
|
1618 |
+
|
1619 |
+
def __init__(
|
1620 |
+
self,
|
1621 |
+
obj: Resampler | BaseWindow,
|
1622 |
+
func: AggFuncType,
|
1623 |
+
*,
|
1624 |
+
args,
|
1625 |
+
kwargs,
|
1626 |
+
) -> None:
|
1627 |
+
super(GroupByApply, self).__init__(
|
1628 |
+
obj,
|
1629 |
+
func,
|
1630 |
+
raw=False,
|
1631 |
+
result_type=None,
|
1632 |
+
args=args,
|
1633 |
+
kwargs=kwargs,
|
1634 |
+
)
|
1635 |
+
|
1636 |
+
def apply(self):
|
1637 |
+
raise NotImplementedError
|
1638 |
+
|
1639 |
+
def transform(self):
|
1640 |
+
raise NotImplementedError
|
1641 |
+
|
1642 |
+
|
1643 |
+
def reconstruct_func(
|
1644 |
+
func: AggFuncType | None, **kwargs
|
1645 |
+
) -> tuple[bool, AggFuncType, tuple[str, ...] | None, npt.NDArray[np.intp] | None]:
|
1646 |
+
"""
|
1647 |
+
This is the internal function to reconstruct func given if there is relabeling
|
1648 |
+
or not and also normalize the keyword to get new order of columns.
|
1649 |
+
|
1650 |
+
If named aggregation is applied, `func` will be None, and kwargs contains the
|
1651 |
+
column and aggregation function information to be parsed;
|
1652 |
+
If named aggregation is not applied, `func` is either string (e.g. 'min') or
|
1653 |
+
Callable, or list of them (e.g. ['min', np.max]), or the dictionary of column name
|
1654 |
+
and str/Callable/list of them (e.g. {'A': 'min'}, or {'A': [np.min, lambda x: x]})
|
1655 |
+
|
1656 |
+
If relabeling is True, will return relabeling, reconstructed func, column
|
1657 |
+
names, and the reconstructed order of columns.
|
1658 |
+
If relabeling is False, the columns and order will be None.
|
1659 |
+
|
1660 |
+
Parameters
|
1661 |
+
----------
|
1662 |
+
func: agg function (e.g. 'min' or Callable) or list of agg functions
|
1663 |
+
(e.g. ['min', np.max]) or dictionary (e.g. {'A': ['min', np.max]}).
|
1664 |
+
**kwargs: dict, kwargs used in is_multi_agg_with_relabel and
|
1665 |
+
normalize_keyword_aggregation function for relabelling
|
1666 |
+
|
1667 |
+
Returns
|
1668 |
+
-------
|
1669 |
+
relabelling: bool, if there is relabelling or not
|
1670 |
+
func: normalized and mangled func
|
1671 |
+
columns: tuple of column names
|
1672 |
+
order: array of columns indices
|
1673 |
+
|
1674 |
+
Examples
|
1675 |
+
--------
|
1676 |
+
>>> reconstruct_func(None, **{"foo": ("col", "min")})
|
1677 |
+
(True, defaultdict(<class 'list'>, {'col': ['min']}), ('foo',), array([0]))
|
1678 |
+
|
1679 |
+
>>> reconstruct_func("min")
|
1680 |
+
(False, 'min', None, None)
|
1681 |
+
"""
|
1682 |
+
relabeling = func is None and is_multi_agg_with_relabel(**kwargs)
|
1683 |
+
columns: tuple[str, ...] | None = None
|
1684 |
+
order: npt.NDArray[np.intp] | None = None
|
1685 |
+
|
1686 |
+
if not relabeling:
|
1687 |
+
if isinstance(func, list) and len(func) > len(set(func)):
|
1688 |
+
# GH 28426 will raise error if duplicated function names are used and
|
1689 |
+
# there is no reassigned name
|
1690 |
+
raise SpecificationError(
|
1691 |
+
"Function names must be unique if there is no new column names "
|
1692 |
+
"assigned"
|
1693 |
+
)
|
1694 |
+
if func is None:
|
1695 |
+
# nicer error message
|
1696 |
+
raise TypeError("Must provide 'func' or tuples of '(column, aggfunc).")
|
1697 |
+
|
1698 |
+
if relabeling:
|
1699 |
+
# error: Incompatible types in assignment (expression has type
|
1700 |
+
# "MutableMapping[Hashable, list[Callable[..., Any] | str]]", variable has type
|
1701 |
+
# "Callable[..., Any] | str | list[Callable[..., Any] | str] |
|
1702 |
+
# MutableMapping[Hashable, Callable[..., Any] | str | list[Callable[..., Any] |
|
1703 |
+
# str]] | None")
|
1704 |
+
func, columns, order = normalize_keyword_aggregation( # type: ignore[assignment]
|
1705 |
+
kwargs
|
1706 |
+
)
|
1707 |
+
assert func is not None
|
1708 |
+
|
1709 |
+
return relabeling, func, columns, order
|
1710 |
+
|
1711 |
+
|
1712 |
+
def is_multi_agg_with_relabel(**kwargs) -> bool:
|
1713 |
+
"""
|
1714 |
+
Check whether kwargs passed to .agg look like multi-agg with relabeling.
|
1715 |
+
|
1716 |
+
Parameters
|
1717 |
+
----------
|
1718 |
+
**kwargs : dict
|
1719 |
+
|
1720 |
+
Returns
|
1721 |
+
-------
|
1722 |
+
bool
|
1723 |
+
|
1724 |
+
Examples
|
1725 |
+
--------
|
1726 |
+
>>> is_multi_agg_with_relabel(a="max")
|
1727 |
+
False
|
1728 |
+
>>> is_multi_agg_with_relabel(a_max=("a", "max"), a_min=("a", "min"))
|
1729 |
+
True
|
1730 |
+
>>> is_multi_agg_with_relabel()
|
1731 |
+
False
|
1732 |
+
"""
|
1733 |
+
return all(isinstance(v, tuple) and len(v) == 2 for v in kwargs.values()) and (
|
1734 |
+
len(kwargs) > 0
|
1735 |
+
)
|
1736 |
+
|
1737 |
+
|
1738 |
+
def normalize_keyword_aggregation(
|
1739 |
+
kwargs: dict,
|
1740 |
+
) -> tuple[
|
1741 |
+
MutableMapping[Hashable, list[AggFuncTypeBase]],
|
1742 |
+
tuple[str, ...],
|
1743 |
+
npt.NDArray[np.intp],
|
1744 |
+
]:
|
1745 |
+
"""
|
1746 |
+
Normalize user-provided "named aggregation" kwargs.
|
1747 |
+
Transforms from the new ``Mapping[str, NamedAgg]`` style kwargs
|
1748 |
+
to the old Dict[str, List[scalar]]].
|
1749 |
+
|
1750 |
+
Parameters
|
1751 |
+
----------
|
1752 |
+
kwargs : dict
|
1753 |
+
|
1754 |
+
Returns
|
1755 |
+
-------
|
1756 |
+
aggspec : dict
|
1757 |
+
The transformed kwargs.
|
1758 |
+
columns : tuple[str, ...]
|
1759 |
+
The user-provided keys.
|
1760 |
+
col_idx_order : List[int]
|
1761 |
+
List of columns indices.
|
1762 |
+
|
1763 |
+
Examples
|
1764 |
+
--------
|
1765 |
+
>>> normalize_keyword_aggregation({"output": ("input", "sum")})
|
1766 |
+
(defaultdict(<class 'list'>, {'input': ['sum']}), ('output',), array([0]))
|
1767 |
+
"""
|
1768 |
+
from pandas.core.indexes.base import Index
|
1769 |
+
|
1770 |
+
# Normalize the aggregation functions as Mapping[column, List[func]],
|
1771 |
+
# process normally, then fixup the names.
|
1772 |
+
# TODO: aggspec type: typing.Dict[str, List[AggScalar]]
|
1773 |
+
aggspec = defaultdict(list)
|
1774 |
+
order = []
|
1775 |
+
columns, pairs = list(zip(*kwargs.items()))
|
1776 |
+
|
1777 |
+
for column, aggfunc in pairs:
|
1778 |
+
aggspec[column].append(aggfunc)
|
1779 |
+
order.append((column, com.get_callable_name(aggfunc) or aggfunc))
|
1780 |
+
|
1781 |
+
# uniquify aggfunc name if duplicated in order list
|
1782 |
+
uniquified_order = _make_unique_kwarg_list(order)
|
1783 |
+
|
1784 |
+
# GH 25719, due to aggspec will change the order of assigned columns in aggregation
|
1785 |
+
# uniquified_aggspec will store uniquified order list and will compare it with order
|
1786 |
+
# based on index
|
1787 |
+
aggspec_order = [
|
1788 |
+
(column, com.get_callable_name(aggfunc) or aggfunc)
|
1789 |
+
for column, aggfuncs in aggspec.items()
|
1790 |
+
for aggfunc in aggfuncs
|
1791 |
+
]
|
1792 |
+
uniquified_aggspec = _make_unique_kwarg_list(aggspec_order)
|
1793 |
+
|
1794 |
+
# get the new index of columns by comparison
|
1795 |
+
col_idx_order = Index(uniquified_aggspec).get_indexer(uniquified_order)
|
1796 |
+
return aggspec, columns, col_idx_order
|
1797 |
+
|
1798 |
+
|
1799 |
+
def _make_unique_kwarg_list(
|
1800 |
+
seq: Sequence[tuple[Any, Any]]
|
1801 |
+
) -> Sequence[tuple[Any, Any]]:
|
1802 |
+
"""
|
1803 |
+
Uniquify aggfunc name of the pairs in the order list
|
1804 |
+
|
1805 |
+
Examples:
|
1806 |
+
--------
|
1807 |
+
>>> kwarg_list = [('a', '<lambda>'), ('a', '<lambda>'), ('b', '<lambda>')]
|
1808 |
+
>>> _make_unique_kwarg_list(kwarg_list)
|
1809 |
+
[('a', '<lambda>_0'), ('a', '<lambda>_1'), ('b', '<lambda>')]
|
1810 |
+
"""
|
1811 |
+
return [
|
1812 |
+
(pair[0], f"{pair[1]}_{seq[:i].count(pair)}") if seq.count(pair) > 1 else pair
|
1813 |
+
for i, pair in enumerate(seq)
|
1814 |
+
]
|
1815 |
+
|
1816 |
+
|
1817 |
+
def relabel_result(
|
1818 |
+
result: DataFrame | Series,
|
1819 |
+
func: dict[str, list[Callable | str]],
|
1820 |
+
columns: Iterable[Hashable],
|
1821 |
+
order: Iterable[int],
|
1822 |
+
) -> dict[Hashable, Series]:
|
1823 |
+
"""
|
1824 |
+
Internal function to reorder result if relabelling is True for
|
1825 |
+
dataframe.agg, and return the reordered result in dict.
|
1826 |
+
|
1827 |
+
Parameters:
|
1828 |
+
----------
|
1829 |
+
result: Result from aggregation
|
1830 |
+
func: Dict of (column name, funcs)
|
1831 |
+
columns: New columns name for relabelling
|
1832 |
+
order: New order for relabelling
|
1833 |
+
|
1834 |
+
Examples
|
1835 |
+
--------
|
1836 |
+
>>> from pandas.core.apply import relabel_result
|
1837 |
+
>>> result = pd.DataFrame(
|
1838 |
+
... {"A": [np.nan, 2, np.nan], "C": [6, np.nan, np.nan], "B": [np.nan, 4, 2.5]},
|
1839 |
+
... index=["max", "mean", "min"]
|
1840 |
+
... )
|
1841 |
+
>>> funcs = {"A": ["max"], "C": ["max"], "B": ["mean", "min"]}
|
1842 |
+
>>> columns = ("foo", "aab", "bar", "dat")
|
1843 |
+
>>> order = [0, 1, 2, 3]
|
1844 |
+
>>> result_in_dict = relabel_result(result, funcs, columns, order)
|
1845 |
+
>>> pd.DataFrame(result_in_dict, index=columns)
|
1846 |
+
A C B
|
1847 |
+
foo 2.0 NaN NaN
|
1848 |
+
aab NaN 6.0 NaN
|
1849 |
+
bar NaN NaN 4.0
|
1850 |
+
dat NaN NaN 2.5
|
1851 |
+
"""
|
1852 |
+
from pandas.core.indexes.base import Index
|
1853 |
+
|
1854 |
+
reordered_indexes = [
|
1855 |
+
pair[0] for pair in sorted(zip(columns, order), key=lambda t: t[1])
|
1856 |
+
]
|
1857 |
+
reordered_result_in_dict: dict[Hashable, Series] = {}
|
1858 |
+
idx = 0
|
1859 |
+
|
1860 |
+
reorder_mask = not isinstance(result, ABCSeries) and len(result.columns) > 1
|
1861 |
+
for col, fun in func.items():
|
1862 |
+
s = result[col].dropna()
|
1863 |
+
|
1864 |
+
# In the `_aggregate`, the callable names are obtained and used in `result`, and
|
1865 |
+
# these names are ordered alphabetically. e.g.
|
1866 |
+
# C2 C1
|
1867 |
+
# <lambda> 1 NaN
|
1868 |
+
# amax NaN 4.0
|
1869 |
+
# max NaN 4.0
|
1870 |
+
# sum 18.0 6.0
|
1871 |
+
# Therefore, the order of functions for each column could be shuffled
|
1872 |
+
# accordingly so need to get the callable name if it is not parsed names, and
|
1873 |
+
# reorder the aggregated result for each column.
|
1874 |
+
# e.g. if df.agg(c1=("C2", sum), c2=("C2", lambda x: min(x))), correct order is
|
1875 |
+
# [sum, <lambda>], but in `result`, it will be [<lambda>, sum], and we need to
|
1876 |
+
# reorder so that aggregated values map to their functions regarding the order.
|
1877 |
+
|
1878 |
+
# However there is only one column being used for aggregation, not need to
|
1879 |
+
# reorder since the index is not sorted, and keep as is in `funcs`, e.g.
|
1880 |
+
# A
|
1881 |
+
# min 1.0
|
1882 |
+
# mean 1.5
|
1883 |
+
# mean 1.5
|
1884 |
+
if reorder_mask:
|
1885 |
+
fun = [
|
1886 |
+
com.get_callable_name(f) if not isinstance(f, str) else f for f in fun
|
1887 |
+
]
|
1888 |
+
col_idx_order = Index(s.index).get_indexer(fun)
|
1889 |
+
s = s.iloc[col_idx_order]
|
1890 |
+
|
1891 |
+
# assign the new user-provided "named aggregation" as index names, and reindex
|
1892 |
+
# it based on the whole user-provided names.
|
1893 |
+
s.index = reordered_indexes[idx : idx + len(fun)]
|
1894 |
+
reordered_result_in_dict[col] = s.reindex(columns, copy=False)
|
1895 |
+
idx = idx + len(fun)
|
1896 |
+
return reordered_result_in_dict
|
1897 |
+
|
1898 |
+
|
1899 |
+
def reconstruct_and_relabel_result(result, func, **kwargs) -> DataFrame | Series:
|
1900 |
+
from pandas import DataFrame
|
1901 |
+
|
1902 |
+
relabeling, func, columns, order = reconstruct_func(func, **kwargs)
|
1903 |
+
|
1904 |
+
if relabeling:
|
1905 |
+
# This is to keep the order to columns occurrence unchanged, and also
|
1906 |
+
# keep the order of new columns occurrence unchanged
|
1907 |
+
|
1908 |
+
# For the return values of reconstruct_func, if relabeling is
|
1909 |
+
# False, columns and order will be None.
|
1910 |
+
assert columns is not None
|
1911 |
+
assert order is not None
|
1912 |
+
|
1913 |
+
result_in_dict = relabel_result(result, func, columns, order)
|
1914 |
+
result = DataFrame(result_in_dict, index=columns)
|
1915 |
+
|
1916 |
+
return result
|
1917 |
+
|
1918 |
+
|
1919 |
+
# TODO: Can't use, because mypy doesn't like us setting __name__
|
1920 |
+
# error: "partial[Any]" has no attribute "__name__"
|
1921 |
+
# the type is:
|
1922 |
+
# typing.Sequence[Callable[..., ScalarResult]]
|
1923 |
+
# -> typing.Sequence[Callable[..., ScalarResult]]:
|
1924 |
+
|
1925 |
+
|
1926 |
+
def _managle_lambda_list(aggfuncs: Sequence[Any]) -> Sequence[Any]:
|
1927 |
+
"""
|
1928 |
+
Possibly mangle a list of aggfuncs.
|
1929 |
+
|
1930 |
+
Parameters
|
1931 |
+
----------
|
1932 |
+
aggfuncs : Sequence
|
1933 |
+
|
1934 |
+
Returns
|
1935 |
+
-------
|
1936 |
+
mangled: list-like
|
1937 |
+
A new AggSpec sequence, where lambdas have been converted
|
1938 |
+
to have unique names.
|
1939 |
+
|
1940 |
+
Notes
|
1941 |
+
-----
|
1942 |
+
If just one aggfunc is passed, the name will not be mangled.
|
1943 |
+
"""
|
1944 |
+
if len(aggfuncs) <= 1:
|
1945 |
+
# don't mangle for .agg([lambda x: .])
|
1946 |
+
return aggfuncs
|
1947 |
+
i = 0
|
1948 |
+
mangled_aggfuncs = []
|
1949 |
+
for aggfunc in aggfuncs:
|
1950 |
+
if com.get_callable_name(aggfunc) == "<lambda>":
|
1951 |
+
aggfunc = partial(aggfunc)
|
1952 |
+
aggfunc.__name__ = f"<lambda_{i}>"
|
1953 |
+
i += 1
|
1954 |
+
mangled_aggfuncs.append(aggfunc)
|
1955 |
+
|
1956 |
+
return mangled_aggfuncs
|
1957 |
+
|
1958 |
+
|
1959 |
+
def maybe_mangle_lambdas(agg_spec: Any) -> Any:
|
1960 |
+
"""
|
1961 |
+
Make new lambdas with unique names.
|
1962 |
+
|
1963 |
+
Parameters
|
1964 |
+
----------
|
1965 |
+
agg_spec : Any
|
1966 |
+
An argument to GroupBy.agg.
|
1967 |
+
Non-dict-like `agg_spec` are pass through as is.
|
1968 |
+
For dict-like `agg_spec` a new spec is returned
|
1969 |
+
with name-mangled lambdas.
|
1970 |
+
|
1971 |
+
Returns
|
1972 |
+
-------
|
1973 |
+
mangled : Any
|
1974 |
+
Same type as the input.
|
1975 |
+
|
1976 |
+
Examples
|
1977 |
+
--------
|
1978 |
+
>>> maybe_mangle_lambdas('sum')
|
1979 |
+
'sum'
|
1980 |
+
>>> maybe_mangle_lambdas([lambda: 1, lambda: 2]) # doctest: +SKIP
|
1981 |
+
[<function __main__.<lambda_0>,
|
1982 |
+
<function pandas...._make_lambda.<locals>.f(*args, **kwargs)>]
|
1983 |
+
"""
|
1984 |
+
is_dict = is_dict_like(agg_spec)
|
1985 |
+
if not (is_dict or is_list_like(agg_spec)):
|
1986 |
+
return agg_spec
|
1987 |
+
mangled_aggspec = type(agg_spec)() # dict or OrderedDict
|
1988 |
+
|
1989 |
+
if is_dict:
|
1990 |
+
for key, aggfuncs in agg_spec.items():
|
1991 |
+
if is_list_like(aggfuncs) and not is_dict_like(aggfuncs):
|
1992 |
+
mangled_aggfuncs = _managle_lambda_list(aggfuncs)
|
1993 |
+
else:
|
1994 |
+
mangled_aggfuncs = aggfuncs
|
1995 |
+
|
1996 |
+
mangled_aggspec[key] = mangled_aggfuncs
|
1997 |
+
else:
|
1998 |
+
mangled_aggspec = _managle_lambda_list(agg_spec)
|
1999 |
+
|
2000 |
+
return mangled_aggspec
|
2001 |
+
|
2002 |
+
|
2003 |
+
def validate_func_kwargs(
|
2004 |
+
kwargs: dict,
|
2005 |
+
) -> tuple[list[str], list[str | Callable[..., Any]]]:
|
2006 |
+
"""
|
2007 |
+
Validates types of user-provided "named aggregation" kwargs.
|
2008 |
+
`TypeError` is raised if aggfunc is not `str` or callable.
|
2009 |
+
|
2010 |
+
Parameters
|
2011 |
+
----------
|
2012 |
+
kwargs : dict
|
2013 |
+
|
2014 |
+
Returns
|
2015 |
+
-------
|
2016 |
+
columns : List[str]
|
2017 |
+
List of user-provided keys.
|
2018 |
+
func : List[Union[str, callable[...,Any]]]
|
2019 |
+
List of user-provided aggfuncs
|
2020 |
+
|
2021 |
+
Examples
|
2022 |
+
--------
|
2023 |
+
>>> validate_func_kwargs({'one': 'min', 'two': 'max'})
|
2024 |
+
(['one', 'two'], ['min', 'max'])
|
2025 |
+
"""
|
2026 |
+
tuple_given_message = "func is expected but received {} in **kwargs."
|
2027 |
+
columns = list(kwargs)
|
2028 |
+
func = []
|
2029 |
+
for col_func in kwargs.values():
|
2030 |
+
if not (isinstance(col_func, str) or callable(col_func)):
|
2031 |
+
raise TypeError(tuple_given_message.format(type(col_func).__name__))
|
2032 |
+
func.append(col_func)
|
2033 |
+
if not columns:
|
2034 |
+
no_arg_message = "Must provide 'func' or named aggregation **kwargs."
|
2035 |
+
raise TypeError(no_arg_message)
|
2036 |
+
return columns, func
|
2037 |
+
|
2038 |
+
|
2039 |
+
def include_axis(op_name: Literal["agg", "apply"], colg: Series | DataFrame) -> bool:
|
2040 |
+
return isinstance(colg, ABCDataFrame) or (
|
2041 |
+
isinstance(colg, ABCSeries) and op_name == "agg"
|
2042 |
+
)
|
2043 |
+
|
2044 |
+
|
2045 |
+
def warn_alias_replacement(
|
2046 |
+
obj: AggObjType,
|
2047 |
+
func: Callable,
|
2048 |
+
alias: str,
|
2049 |
+
) -> None:
|
2050 |
+
if alias.startswith("np."):
|
2051 |
+
full_alias = alias
|
2052 |
+
else:
|
2053 |
+
full_alias = f"{type(obj).__name__}.{alias}"
|
2054 |
+
alias = f'"{alias}"'
|
2055 |
+
warnings.warn(
|
2056 |
+
f"The provided callable {func} is currently using "
|
2057 |
+
f"{full_alias}. In a future version of pandas, "
|
2058 |
+
f"the provided callable will be used directly. To keep current "
|
2059 |
+
f"behavior pass the string {alias} instead.",
|
2060 |
+
category=FutureWarning,
|
2061 |
+
stacklevel=find_stack_level(),
|
2062 |
+
)
|
env-llmeval/lib/python3.10/site-packages/pandas/core/arraylike.py
ADDED
@@ -0,0 +1,530 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
"""
|
2 |
+
Methods that can be shared by many array-like classes or subclasses:
|
3 |
+
Series
|
4 |
+
Index
|
5 |
+
ExtensionArray
|
6 |
+
"""
|
7 |
+
from __future__ import annotations
|
8 |
+
|
9 |
+
import operator
|
10 |
+
from typing import Any
|
11 |
+
|
12 |
+
import numpy as np
|
13 |
+
|
14 |
+
from pandas._libs import lib
|
15 |
+
from pandas._libs.ops_dispatch import maybe_dispatch_ufunc_to_dunder_op
|
16 |
+
|
17 |
+
from pandas.core.dtypes.generic import ABCNDFrame
|
18 |
+
|
19 |
+
from pandas.core import roperator
|
20 |
+
from pandas.core.construction import extract_array
|
21 |
+
from pandas.core.ops.common import unpack_zerodim_and_defer
|
22 |
+
|
23 |
+
REDUCTION_ALIASES = {
|
24 |
+
"maximum": "max",
|
25 |
+
"minimum": "min",
|
26 |
+
"add": "sum",
|
27 |
+
"multiply": "prod",
|
28 |
+
}
|
29 |
+
|
30 |
+
|
31 |
+
class OpsMixin:
|
32 |
+
# -------------------------------------------------------------
|
33 |
+
# Comparisons
|
34 |
+
|
35 |
+
def _cmp_method(self, other, op):
|
36 |
+
return NotImplemented
|
37 |
+
|
38 |
+
@unpack_zerodim_and_defer("__eq__")
|
39 |
+
def __eq__(self, other):
|
40 |
+
return self._cmp_method(other, operator.eq)
|
41 |
+
|
42 |
+
@unpack_zerodim_and_defer("__ne__")
|
43 |
+
def __ne__(self, other):
|
44 |
+
return self._cmp_method(other, operator.ne)
|
45 |
+
|
46 |
+
@unpack_zerodim_and_defer("__lt__")
|
47 |
+
def __lt__(self, other):
|
48 |
+
return self._cmp_method(other, operator.lt)
|
49 |
+
|
50 |
+
@unpack_zerodim_and_defer("__le__")
|
51 |
+
def __le__(self, other):
|
52 |
+
return self._cmp_method(other, operator.le)
|
53 |
+
|
54 |
+
@unpack_zerodim_and_defer("__gt__")
|
55 |
+
def __gt__(self, other):
|
56 |
+
return self._cmp_method(other, operator.gt)
|
57 |
+
|
58 |
+
@unpack_zerodim_and_defer("__ge__")
|
59 |
+
def __ge__(self, other):
|
60 |
+
return self._cmp_method(other, operator.ge)
|
61 |
+
|
62 |
+
# -------------------------------------------------------------
|
63 |
+
# Logical Methods
|
64 |
+
|
65 |
+
def _logical_method(self, other, op):
|
66 |
+
return NotImplemented
|
67 |
+
|
68 |
+
@unpack_zerodim_and_defer("__and__")
|
69 |
+
def __and__(self, other):
|
70 |
+
return self._logical_method(other, operator.and_)
|
71 |
+
|
72 |
+
@unpack_zerodim_and_defer("__rand__")
|
73 |
+
def __rand__(self, other):
|
74 |
+
return self._logical_method(other, roperator.rand_)
|
75 |
+
|
76 |
+
@unpack_zerodim_and_defer("__or__")
|
77 |
+
def __or__(self, other):
|
78 |
+
return self._logical_method(other, operator.or_)
|
79 |
+
|
80 |
+
@unpack_zerodim_and_defer("__ror__")
|
81 |
+
def __ror__(self, other):
|
82 |
+
return self._logical_method(other, roperator.ror_)
|
83 |
+
|
84 |
+
@unpack_zerodim_and_defer("__xor__")
|
85 |
+
def __xor__(self, other):
|
86 |
+
return self._logical_method(other, operator.xor)
|
87 |
+
|
88 |
+
@unpack_zerodim_and_defer("__rxor__")
|
89 |
+
def __rxor__(self, other):
|
90 |
+
return self._logical_method(other, roperator.rxor)
|
91 |
+
|
92 |
+
# -------------------------------------------------------------
|
93 |
+
# Arithmetic Methods
|
94 |
+
|
95 |
+
def _arith_method(self, other, op):
|
96 |
+
return NotImplemented
|
97 |
+
|
98 |
+
@unpack_zerodim_and_defer("__add__")
|
99 |
+
def __add__(self, other):
|
100 |
+
"""
|
101 |
+
Get Addition of DataFrame and other, column-wise.
|
102 |
+
|
103 |
+
Equivalent to ``DataFrame.add(other)``.
|
104 |
+
|
105 |
+
Parameters
|
106 |
+
----------
|
107 |
+
other : scalar, sequence, Series, dict or DataFrame
|
108 |
+
Object to be added to the DataFrame.
|
109 |
+
|
110 |
+
Returns
|
111 |
+
-------
|
112 |
+
DataFrame
|
113 |
+
The result of adding ``other`` to DataFrame.
|
114 |
+
|
115 |
+
See Also
|
116 |
+
--------
|
117 |
+
DataFrame.add : Add a DataFrame and another object, with option for index-
|
118 |
+
or column-oriented addition.
|
119 |
+
|
120 |
+
Examples
|
121 |
+
--------
|
122 |
+
>>> df = pd.DataFrame({'height': [1.5, 2.6], 'weight': [500, 800]},
|
123 |
+
... index=['elk', 'moose'])
|
124 |
+
>>> df
|
125 |
+
height weight
|
126 |
+
elk 1.5 500
|
127 |
+
moose 2.6 800
|
128 |
+
|
129 |
+
Adding a scalar affects all rows and columns.
|
130 |
+
|
131 |
+
>>> df[['height', 'weight']] + 1.5
|
132 |
+
height weight
|
133 |
+
elk 3.0 501.5
|
134 |
+
moose 4.1 801.5
|
135 |
+
|
136 |
+
Each element of a list is added to a column of the DataFrame, in order.
|
137 |
+
|
138 |
+
>>> df[['height', 'weight']] + [0.5, 1.5]
|
139 |
+
height weight
|
140 |
+
elk 2.0 501.5
|
141 |
+
moose 3.1 801.5
|
142 |
+
|
143 |
+
Keys of a dictionary are aligned to the DataFrame, based on column names;
|
144 |
+
each value in the dictionary is added to the corresponding column.
|
145 |
+
|
146 |
+
>>> df[['height', 'weight']] + {'height': 0.5, 'weight': 1.5}
|
147 |
+
height weight
|
148 |
+
elk 2.0 501.5
|
149 |
+
moose 3.1 801.5
|
150 |
+
|
151 |
+
When `other` is a :class:`Series`, the index of `other` is aligned with the
|
152 |
+
columns of the DataFrame.
|
153 |
+
|
154 |
+
>>> s1 = pd.Series([0.5, 1.5], index=['weight', 'height'])
|
155 |
+
>>> df[['height', 'weight']] + s1
|
156 |
+
height weight
|
157 |
+
elk 3.0 500.5
|
158 |
+
moose 4.1 800.5
|
159 |
+
|
160 |
+
Even when the index of `other` is the same as the index of the DataFrame,
|
161 |
+
the :class:`Series` will not be reoriented. If index-wise alignment is desired,
|
162 |
+
:meth:`DataFrame.add` should be used with `axis='index'`.
|
163 |
+
|
164 |
+
>>> s2 = pd.Series([0.5, 1.5], index=['elk', 'moose'])
|
165 |
+
>>> df[['height', 'weight']] + s2
|
166 |
+
elk height moose weight
|
167 |
+
elk NaN NaN NaN NaN
|
168 |
+
moose NaN NaN NaN NaN
|
169 |
+
|
170 |
+
>>> df[['height', 'weight']].add(s2, axis='index')
|
171 |
+
height weight
|
172 |
+
elk 2.0 500.5
|
173 |
+
moose 4.1 801.5
|
174 |
+
|
175 |
+
When `other` is a :class:`DataFrame`, both columns names and the
|
176 |
+
index are aligned.
|
177 |
+
|
178 |
+
>>> other = pd.DataFrame({'height': [0.2, 0.4, 0.6]},
|
179 |
+
... index=['elk', 'moose', 'deer'])
|
180 |
+
>>> df[['height', 'weight']] + other
|
181 |
+
height weight
|
182 |
+
deer NaN NaN
|
183 |
+
elk 1.7 NaN
|
184 |
+
moose 3.0 NaN
|
185 |
+
"""
|
186 |
+
return self._arith_method(other, operator.add)
|
187 |
+
|
188 |
+
@unpack_zerodim_and_defer("__radd__")
|
189 |
+
def __radd__(self, other):
|
190 |
+
return self._arith_method(other, roperator.radd)
|
191 |
+
|
192 |
+
@unpack_zerodim_and_defer("__sub__")
|
193 |
+
def __sub__(self, other):
|
194 |
+
return self._arith_method(other, operator.sub)
|
195 |
+
|
196 |
+
@unpack_zerodim_and_defer("__rsub__")
|
197 |
+
def __rsub__(self, other):
|
198 |
+
return self._arith_method(other, roperator.rsub)
|
199 |
+
|
200 |
+
@unpack_zerodim_and_defer("__mul__")
|
201 |
+
def __mul__(self, other):
|
202 |
+
return self._arith_method(other, operator.mul)
|
203 |
+
|
204 |
+
@unpack_zerodim_and_defer("__rmul__")
|
205 |
+
def __rmul__(self, other):
|
206 |
+
return self._arith_method(other, roperator.rmul)
|
207 |
+
|
208 |
+
@unpack_zerodim_and_defer("__truediv__")
|
209 |
+
def __truediv__(self, other):
|
210 |
+
return self._arith_method(other, operator.truediv)
|
211 |
+
|
212 |
+
@unpack_zerodim_and_defer("__rtruediv__")
|
213 |
+
def __rtruediv__(self, other):
|
214 |
+
return self._arith_method(other, roperator.rtruediv)
|
215 |
+
|
216 |
+
@unpack_zerodim_and_defer("__floordiv__")
|
217 |
+
def __floordiv__(self, other):
|
218 |
+
return self._arith_method(other, operator.floordiv)
|
219 |
+
|
220 |
+
@unpack_zerodim_and_defer("__rfloordiv")
|
221 |
+
def __rfloordiv__(self, other):
|
222 |
+
return self._arith_method(other, roperator.rfloordiv)
|
223 |
+
|
224 |
+
@unpack_zerodim_and_defer("__mod__")
|
225 |
+
def __mod__(self, other):
|
226 |
+
return self._arith_method(other, operator.mod)
|
227 |
+
|
228 |
+
@unpack_zerodim_and_defer("__rmod__")
|
229 |
+
def __rmod__(self, other):
|
230 |
+
return self._arith_method(other, roperator.rmod)
|
231 |
+
|
232 |
+
@unpack_zerodim_and_defer("__divmod__")
|
233 |
+
def __divmod__(self, other):
|
234 |
+
return self._arith_method(other, divmod)
|
235 |
+
|
236 |
+
@unpack_zerodim_and_defer("__rdivmod__")
|
237 |
+
def __rdivmod__(self, other):
|
238 |
+
return self._arith_method(other, roperator.rdivmod)
|
239 |
+
|
240 |
+
@unpack_zerodim_and_defer("__pow__")
|
241 |
+
def __pow__(self, other):
|
242 |
+
return self._arith_method(other, operator.pow)
|
243 |
+
|
244 |
+
@unpack_zerodim_and_defer("__rpow__")
|
245 |
+
def __rpow__(self, other):
|
246 |
+
return self._arith_method(other, roperator.rpow)
|
247 |
+
|
248 |
+
|
249 |
+
# -----------------------------------------------------------------------------
|
250 |
+
# Helpers to implement __array_ufunc__
|
251 |
+
|
252 |
+
|
253 |
+
def array_ufunc(self, ufunc: np.ufunc, method: str, *inputs: Any, **kwargs: Any):
|
254 |
+
"""
|
255 |
+
Compatibility with numpy ufuncs.
|
256 |
+
|
257 |
+
See also
|
258 |
+
--------
|
259 |
+
numpy.org/doc/stable/reference/arrays.classes.html#numpy.class.__array_ufunc__
|
260 |
+
"""
|
261 |
+
from pandas.core.frame import (
|
262 |
+
DataFrame,
|
263 |
+
Series,
|
264 |
+
)
|
265 |
+
from pandas.core.generic import NDFrame
|
266 |
+
from pandas.core.internals import (
|
267 |
+
ArrayManager,
|
268 |
+
BlockManager,
|
269 |
+
)
|
270 |
+
|
271 |
+
cls = type(self)
|
272 |
+
|
273 |
+
kwargs = _standardize_out_kwarg(**kwargs)
|
274 |
+
|
275 |
+
# for binary ops, use our custom dunder methods
|
276 |
+
result = maybe_dispatch_ufunc_to_dunder_op(self, ufunc, method, *inputs, **kwargs)
|
277 |
+
if result is not NotImplemented:
|
278 |
+
return result
|
279 |
+
|
280 |
+
# Determine if we should defer.
|
281 |
+
no_defer = (
|
282 |
+
np.ndarray.__array_ufunc__,
|
283 |
+
cls.__array_ufunc__,
|
284 |
+
)
|
285 |
+
|
286 |
+
for item in inputs:
|
287 |
+
higher_priority = (
|
288 |
+
hasattr(item, "__array_priority__")
|
289 |
+
and item.__array_priority__ > self.__array_priority__
|
290 |
+
)
|
291 |
+
has_array_ufunc = (
|
292 |
+
hasattr(item, "__array_ufunc__")
|
293 |
+
and type(item).__array_ufunc__ not in no_defer
|
294 |
+
and not isinstance(item, self._HANDLED_TYPES)
|
295 |
+
)
|
296 |
+
if higher_priority or has_array_ufunc:
|
297 |
+
return NotImplemented
|
298 |
+
|
299 |
+
# align all the inputs.
|
300 |
+
types = tuple(type(x) for x in inputs)
|
301 |
+
alignable = [x for x, t in zip(inputs, types) if issubclass(t, NDFrame)]
|
302 |
+
|
303 |
+
if len(alignable) > 1:
|
304 |
+
# This triggers alignment.
|
305 |
+
# At the moment, there aren't any ufuncs with more than two inputs
|
306 |
+
# so this ends up just being x1.index | x2.index, but we write
|
307 |
+
# it to handle *args.
|
308 |
+
set_types = set(types)
|
309 |
+
if len(set_types) > 1 and {DataFrame, Series}.issubset(set_types):
|
310 |
+
# We currently don't handle ufunc(DataFrame, Series)
|
311 |
+
# well. Previously this raised an internal ValueError. We might
|
312 |
+
# support it someday, so raise a NotImplementedError.
|
313 |
+
raise NotImplementedError(
|
314 |
+
f"Cannot apply ufunc {ufunc} to mixed DataFrame and Series inputs."
|
315 |
+
)
|
316 |
+
axes = self.axes
|
317 |
+
for obj in alignable[1:]:
|
318 |
+
# this relies on the fact that we aren't handling mixed
|
319 |
+
# series / frame ufuncs.
|
320 |
+
for i, (ax1, ax2) in enumerate(zip(axes, obj.axes)):
|
321 |
+
axes[i] = ax1.union(ax2)
|
322 |
+
|
323 |
+
reconstruct_axes = dict(zip(self._AXIS_ORDERS, axes))
|
324 |
+
inputs = tuple(
|
325 |
+
x.reindex(**reconstruct_axes) if issubclass(t, NDFrame) else x
|
326 |
+
for x, t in zip(inputs, types)
|
327 |
+
)
|
328 |
+
else:
|
329 |
+
reconstruct_axes = dict(zip(self._AXIS_ORDERS, self.axes))
|
330 |
+
|
331 |
+
if self.ndim == 1:
|
332 |
+
names = [getattr(x, "name") for x in inputs if hasattr(x, "name")]
|
333 |
+
name = names[0] if len(set(names)) == 1 else None
|
334 |
+
reconstruct_kwargs = {"name": name}
|
335 |
+
else:
|
336 |
+
reconstruct_kwargs = {}
|
337 |
+
|
338 |
+
def reconstruct(result):
|
339 |
+
if ufunc.nout > 1:
|
340 |
+
# np.modf, np.frexp, np.divmod
|
341 |
+
return tuple(_reconstruct(x) for x in result)
|
342 |
+
|
343 |
+
return _reconstruct(result)
|
344 |
+
|
345 |
+
def _reconstruct(result):
|
346 |
+
if lib.is_scalar(result):
|
347 |
+
return result
|
348 |
+
|
349 |
+
if result.ndim != self.ndim:
|
350 |
+
if method == "outer":
|
351 |
+
raise NotImplementedError
|
352 |
+
return result
|
353 |
+
if isinstance(result, (BlockManager, ArrayManager)):
|
354 |
+
# we went through BlockManager.apply e.g. np.sqrt
|
355 |
+
result = self._constructor_from_mgr(result, axes=result.axes)
|
356 |
+
else:
|
357 |
+
# we converted an array, lost our axes
|
358 |
+
result = self._constructor(
|
359 |
+
result, **reconstruct_axes, **reconstruct_kwargs, copy=False
|
360 |
+
)
|
361 |
+
# TODO: When we support multiple values in __finalize__, this
|
362 |
+
# should pass alignable to `__finalize__` instead of self.
|
363 |
+
# Then `np.add(a, b)` would consider attrs from both a and b
|
364 |
+
# when a and b are NDFrames.
|
365 |
+
if len(alignable) == 1:
|
366 |
+
result = result.__finalize__(self)
|
367 |
+
return result
|
368 |
+
|
369 |
+
if "out" in kwargs:
|
370 |
+
# e.g. test_multiindex_get_loc
|
371 |
+
result = dispatch_ufunc_with_out(self, ufunc, method, *inputs, **kwargs)
|
372 |
+
return reconstruct(result)
|
373 |
+
|
374 |
+
if method == "reduce":
|
375 |
+
# e.g. test.series.test_ufunc.test_reduce
|
376 |
+
result = dispatch_reduction_ufunc(self, ufunc, method, *inputs, **kwargs)
|
377 |
+
if result is not NotImplemented:
|
378 |
+
return result
|
379 |
+
|
380 |
+
# We still get here with kwargs `axis` for e.g. np.maximum.accumulate
|
381 |
+
# and `dtype` and `keepdims` for np.ptp
|
382 |
+
|
383 |
+
if self.ndim > 1 and (len(inputs) > 1 or ufunc.nout > 1):
|
384 |
+
# Just give up on preserving types in the complex case.
|
385 |
+
# In theory we could preserve them for them.
|
386 |
+
# * nout>1 is doable if BlockManager.apply took nout and
|
387 |
+
# returned a Tuple[BlockManager].
|
388 |
+
# * len(inputs) > 1 is doable when we know that we have
|
389 |
+
# aligned blocks / dtypes.
|
390 |
+
|
391 |
+
# e.g. my_ufunc, modf, logaddexp, heaviside, subtract, add
|
392 |
+
inputs = tuple(np.asarray(x) for x in inputs)
|
393 |
+
# Note: we can't use default_array_ufunc here bc reindexing means
|
394 |
+
# that `self` may not be among `inputs`
|
395 |
+
result = getattr(ufunc, method)(*inputs, **kwargs)
|
396 |
+
elif self.ndim == 1:
|
397 |
+
# ufunc(series, ...)
|
398 |
+
inputs = tuple(extract_array(x, extract_numpy=True) for x in inputs)
|
399 |
+
result = getattr(ufunc, method)(*inputs, **kwargs)
|
400 |
+
else:
|
401 |
+
# ufunc(dataframe)
|
402 |
+
if method == "__call__" and not kwargs:
|
403 |
+
# for np.<ufunc>(..) calls
|
404 |
+
# kwargs cannot necessarily be handled block-by-block, so only
|
405 |
+
# take this path if there are no kwargs
|
406 |
+
mgr = inputs[0]._mgr
|
407 |
+
result = mgr.apply(getattr(ufunc, method))
|
408 |
+
else:
|
409 |
+
# otherwise specific ufunc methods (eg np.<ufunc>.accumulate(..))
|
410 |
+
# Those can have an axis keyword and thus can't be called block-by-block
|
411 |
+
result = default_array_ufunc(inputs[0], ufunc, method, *inputs, **kwargs)
|
412 |
+
# e.g. np.negative (only one reached), with "where" and "out" in kwargs
|
413 |
+
|
414 |
+
result = reconstruct(result)
|
415 |
+
return result
|
416 |
+
|
417 |
+
|
418 |
+
def _standardize_out_kwarg(**kwargs) -> dict:
|
419 |
+
"""
|
420 |
+
If kwargs contain "out1" and "out2", replace that with a tuple "out"
|
421 |
+
|
422 |
+
np.divmod, np.modf, np.frexp can have either `out=(out1, out2)` or
|
423 |
+
`out1=out1, out2=out2)`
|
424 |
+
"""
|
425 |
+
if "out" not in kwargs and "out1" in kwargs and "out2" in kwargs:
|
426 |
+
out1 = kwargs.pop("out1")
|
427 |
+
out2 = kwargs.pop("out2")
|
428 |
+
out = (out1, out2)
|
429 |
+
kwargs["out"] = out
|
430 |
+
return kwargs
|
431 |
+
|
432 |
+
|
433 |
+
def dispatch_ufunc_with_out(self, ufunc: np.ufunc, method: str, *inputs, **kwargs):
|
434 |
+
"""
|
435 |
+
If we have an `out` keyword, then call the ufunc without `out` and then
|
436 |
+
set the result into the given `out`.
|
437 |
+
"""
|
438 |
+
|
439 |
+
# Note: we assume _standardize_out_kwarg has already been called.
|
440 |
+
out = kwargs.pop("out")
|
441 |
+
where = kwargs.pop("where", None)
|
442 |
+
|
443 |
+
result = getattr(ufunc, method)(*inputs, **kwargs)
|
444 |
+
|
445 |
+
if result is NotImplemented:
|
446 |
+
return NotImplemented
|
447 |
+
|
448 |
+
if isinstance(result, tuple):
|
449 |
+
# i.e. np.divmod, np.modf, np.frexp
|
450 |
+
if not isinstance(out, tuple) or len(out) != len(result):
|
451 |
+
raise NotImplementedError
|
452 |
+
|
453 |
+
for arr, res in zip(out, result):
|
454 |
+
_assign_where(arr, res, where)
|
455 |
+
|
456 |
+
return out
|
457 |
+
|
458 |
+
if isinstance(out, tuple):
|
459 |
+
if len(out) == 1:
|
460 |
+
out = out[0]
|
461 |
+
else:
|
462 |
+
raise NotImplementedError
|
463 |
+
|
464 |
+
_assign_where(out, result, where)
|
465 |
+
return out
|
466 |
+
|
467 |
+
|
468 |
+
def _assign_where(out, result, where) -> None:
|
469 |
+
"""
|
470 |
+
Set a ufunc result into 'out', masking with a 'where' argument if necessary.
|
471 |
+
"""
|
472 |
+
if where is None:
|
473 |
+
# no 'where' arg passed to ufunc
|
474 |
+
out[:] = result
|
475 |
+
else:
|
476 |
+
np.putmask(out, where, result)
|
477 |
+
|
478 |
+
|
479 |
+
def default_array_ufunc(self, ufunc: np.ufunc, method: str, *inputs, **kwargs):
|
480 |
+
"""
|
481 |
+
Fallback to the behavior we would get if we did not define __array_ufunc__.
|
482 |
+
|
483 |
+
Notes
|
484 |
+
-----
|
485 |
+
We are assuming that `self` is among `inputs`.
|
486 |
+
"""
|
487 |
+
if not any(x is self for x in inputs):
|
488 |
+
raise NotImplementedError
|
489 |
+
|
490 |
+
new_inputs = [x if x is not self else np.asarray(x) for x in inputs]
|
491 |
+
|
492 |
+
return getattr(ufunc, method)(*new_inputs, **kwargs)
|
493 |
+
|
494 |
+
|
495 |
+
def dispatch_reduction_ufunc(self, ufunc: np.ufunc, method: str, *inputs, **kwargs):
|
496 |
+
"""
|
497 |
+
Dispatch ufunc reductions to self's reduction methods.
|
498 |
+
"""
|
499 |
+
assert method == "reduce"
|
500 |
+
|
501 |
+
if len(inputs) != 1 or inputs[0] is not self:
|
502 |
+
return NotImplemented
|
503 |
+
|
504 |
+
if ufunc.__name__ not in REDUCTION_ALIASES:
|
505 |
+
return NotImplemented
|
506 |
+
|
507 |
+
method_name = REDUCTION_ALIASES[ufunc.__name__]
|
508 |
+
|
509 |
+
# NB: we are assuming that min/max represent minimum/maximum methods,
|
510 |
+
# which would not be accurate for e.g. Timestamp.min
|
511 |
+
if not hasattr(self, method_name):
|
512 |
+
return NotImplemented
|
513 |
+
|
514 |
+
if self.ndim > 1:
|
515 |
+
if isinstance(self, ABCNDFrame):
|
516 |
+
# TODO: test cases where this doesn't hold, i.e. 2D DTA/TDA
|
517 |
+
kwargs["numeric_only"] = False
|
518 |
+
|
519 |
+
if "axis" not in kwargs:
|
520 |
+
# For DataFrame reductions we don't want the default axis=0
|
521 |
+
# Note: np.min is not a ufunc, but uses array_function_dispatch,
|
522 |
+
# so calls DataFrame.min (without ever getting here) with the np.min
|
523 |
+
# default of axis=None, which DataFrame.min catches and changes to axis=0.
|
524 |
+
# np.minimum.reduce(df) gets here bc axis is not in kwargs,
|
525 |
+
# so we set axis=0 to match the behaviorof np.minimum.reduce(df.values)
|
526 |
+
kwargs["axis"] = 0
|
527 |
+
|
528 |
+
# By default, numpy's reductions do not skip NaNs, so we have to
|
529 |
+
# pass skipna=False
|
530 |
+
return getattr(self, method_name)(skipna=False, **kwargs)
|
env-llmeval/lib/python3.10/site-packages/pandas/core/arrays/__init__.py
ADDED
@@ -0,0 +1,43 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from pandas.core.arrays.arrow import ArrowExtensionArray
|
2 |
+
from pandas.core.arrays.base import (
|
3 |
+
ExtensionArray,
|
4 |
+
ExtensionOpsMixin,
|
5 |
+
ExtensionScalarOpsMixin,
|
6 |
+
)
|
7 |
+
from pandas.core.arrays.boolean import BooleanArray
|
8 |
+
from pandas.core.arrays.categorical import Categorical
|
9 |
+
from pandas.core.arrays.datetimes import DatetimeArray
|
10 |
+
from pandas.core.arrays.floating import FloatingArray
|
11 |
+
from pandas.core.arrays.integer import IntegerArray
|
12 |
+
from pandas.core.arrays.interval import IntervalArray
|
13 |
+
from pandas.core.arrays.masked import BaseMaskedArray
|
14 |
+
from pandas.core.arrays.numpy_ import NumpyExtensionArray
|
15 |
+
from pandas.core.arrays.period import (
|
16 |
+
PeriodArray,
|
17 |
+
period_array,
|
18 |
+
)
|
19 |
+
from pandas.core.arrays.sparse import SparseArray
|
20 |
+
from pandas.core.arrays.string_ import StringArray
|
21 |
+
from pandas.core.arrays.string_arrow import ArrowStringArray
|
22 |
+
from pandas.core.arrays.timedeltas import TimedeltaArray
|
23 |
+
|
24 |
+
__all__ = [
|
25 |
+
"ArrowExtensionArray",
|
26 |
+
"ExtensionArray",
|
27 |
+
"ExtensionOpsMixin",
|
28 |
+
"ExtensionScalarOpsMixin",
|
29 |
+
"ArrowStringArray",
|
30 |
+
"BaseMaskedArray",
|
31 |
+
"BooleanArray",
|
32 |
+
"Categorical",
|
33 |
+
"DatetimeArray",
|
34 |
+
"FloatingArray",
|
35 |
+
"IntegerArray",
|
36 |
+
"IntervalArray",
|
37 |
+
"NumpyExtensionArray",
|
38 |
+
"PeriodArray",
|
39 |
+
"period_array",
|
40 |
+
"SparseArray",
|
41 |
+
"StringArray",
|
42 |
+
"TimedeltaArray",
|
43 |
+
]
|
env-llmeval/lib/python3.10/site-packages/pandas/core/arrays/__pycache__/integer.cpython-310.pyc
ADDED
Binary file (6.98 kB). View file
|
|
env-llmeval/lib/python3.10/site-packages/pandas/core/arrays/_arrow_string_mixins.py
ADDED
@@ -0,0 +1,84 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from __future__ import annotations
|
2 |
+
|
3 |
+
from typing import Literal
|
4 |
+
|
5 |
+
import numpy as np
|
6 |
+
|
7 |
+
from pandas.compat import pa_version_under10p1
|
8 |
+
|
9 |
+
if not pa_version_under10p1:
|
10 |
+
import pyarrow as pa
|
11 |
+
import pyarrow.compute as pc
|
12 |
+
|
13 |
+
|
14 |
+
class ArrowStringArrayMixin:
|
15 |
+
_pa_array = None
|
16 |
+
|
17 |
+
def __init__(self, *args, **kwargs) -> None:
|
18 |
+
raise NotImplementedError
|
19 |
+
|
20 |
+
def _str_pad(
|
21 |
+
self,
|
22 |
+
width: int,
|
23 |
+
side: Literal["left", "right", "both"] = "left",
|
24 |
+
fillchar: str = " ",
|
25 |
+
):
|
26 |
+
if side == "left":
|
27 |
+
pa_pad = pc.utf8_lpad
|
28 |
+
elif side == "right":
|
29 |
+
pa_pad = pc.utf8_rpad
|
30 |
+
elif side == "both":
|
31 |
+
pa_pad = pc.utf8_center
|
32 |
+
else:
|
33 |
+
raise ValueError(
|
34 |
+
f"Invalid side: {side}. Side must be one of 'left', 'right', 'both'"
|
35 |
+
)
|
36 |
+
return type(self)(pa_pad(self._pa_array, width=width, padding=fillchar))
|
37 |
+
|
38 |
+
def _str_get(self, i: int):
|
39 |
+
lengths = pc.utf8_length(self._pa_array)
|
40 |
+
if i >= 0:
|
41 |
+
out_of_bounds = pc.greater_equal(i, lengths)
|
42 |
+
start = i
|
43 |
+
stop = i + 1
|
44 |
+
step = 1
|
45 |
+
else:
|
46 |
+
out_of_bounds = pc.greater(-i, lengths)
|
47 |
+
start = i
|
48 |
+
stop = i - 1
|
49 |
+
step = -1
|
50 |
+
not_out_of_bounds = pc.invert(out_of_bounds.fill_null(True))
|
51 |
+
selected = pc.utf8_slice_codeunits(
|
52 |
+
self._pa_array, start=start, stop=stop, step=step
|
53 |
+
)
|
54 |
+
null_value = pa.scalar(
|
55 |
+
None, type=self._pa_array.type # type: ignore[attr-defined]
|
56 |
+
)
|
57 |
+
result = pc.if_else(not_out_of_bounds, selected, null_value)
|
58 |
+
return type(self)(result)
|
59 |
+
|
60 |
+
def _str_slice_replace(
|
61 |
+
self, start: int | None = None, stop: int | None = None, repl: str | None = None
|
62 |
+
):
|
63 |
+
if repl is None:
|
64 |
+
repl = ""
|
65 |
+
if start is None:
|
66 |
+
start = 0
|
67 |
+
if stop is None:
|
68 |
+
stop = np.iinfo(np.int64).max
|
69 |
+
return type(self)(pc.utf8_replace_slice(self._pa_array, start, stop, repl))
|
70 |
+
|
71 |
+
def _str_capitalize(self):
|
72 |
+
return type(self)(pc.utf8_capitalize(self._pa_array))
|
73 |
+
|
74 |
+
def _str_title(self):
|
75 |
+
return type(self)(pc.utf8_title(self._pa_array))
|
76 |
+
|
77 |
+
def _str_swapcase(self):
|
78 |
+
return type(self)(pc.utf8_swapcase(self._pa_array))
|
79 |
+
|
80 |
+
def _str_removesuffix(self, suffix: str):
|
81 |
+
ends_with = pc.ends_with(self._pa_array, pattern=suffix)
|
82 |
+
removed = pc.utf8_slice_codeunits(self._pa_array, 0, stop=-len(suffix))
|
83 |
+
result = pc.if_else(ends_with, removed, self._pa_array)
|
84 |
+
return type(self)(result)
|
env-llmeval/lib/python3.10/site-packages/pandas/core/arrays/_mixins.py
ADDED
@@ -0,0 +1,547 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from __future__ import annotations
|
2 |
+
|
3 |
+
from functools import wraps
|
4 |
+
from typing import (
|
5 |
+
TYPE_CHECKING,
|
6 |
+
Any,
|
7 |
+
Literal,
|
8 |
+
cast,
|
9 |
+
overload,
|
10 |
+
)
|
11 |
+
|
12 |
+
import numpy as np
|
13 |
+
|
14 |
+
from pandas._libs import lib
|
15 |
+
from pandas._libs.arrays import NDArrayBacked
|
16 |
+
from pandas._libs.tslibs import is_supported_dtype
|
17 |
+
from pandas._typing import (
|
18 |
+
ArrayLike,
|
19 |
+
AxisInt,
|
20 |
+
Dtype,
|
21 |
+
F,
|
22 |
+
FillnaOptions,
|
23 |
+
PositionalIndexer2D,
|
24 |
+
PositionalIndexerTuple,
|
25 |
+
ScalarIndexer,
|
26 |
+
Self,
|
27 |
+
SequenceIndexer,
|
28 |
+
Shape,
|
29 |
+
TakeIndexer,
|
30 |
+
npt,
|
31 |
+
)
|
32 |
+
from pandas.errors import AbstractMethodError
|
33 |
+
from pandas.util._decorators import doc
|
34 |
+
from pandas.util._validators import (
|
35 |
+
validate_bool_kwarg,
|
36 |
+
validate_fillna_kwargs,
|
37 |
+
validate_insert_loc,
|
38 |
+
)
|
39 |
+
|
40 |
+
from pandas.core.dtypes.common import pandas_dtype
|
41 |
+
from pandas.core.dtypes.dtypes import (
|
42 |
+
DatetimeTZDtype,
|
43 |
+
ExtensionDtype,
|
44 |
+
PeriodDtype,
|
45 |
+
)
|
46 |
+
from pandas.core.dtypes.missing import array_equivalent
|
47 |
+
|
48 |
+
from pandas.core import missing
|
49 |
+
from pandas.core.algorithms import (
|
50 |
+
take,
|
51 |
+
unique,
|
52 |
+
value_counts_internal as value_counts,
|
53 |
+
)
|
54 |
+
from pandas.core.array_algos.quantile import quantile_with_mask
|
55 |
+
from pandas.core.array_algos.transforms import shift
|
56 |
+
from pandas.core.arrays.base import ExtensionArray
|
57 |
+
from pandas.core.construction import extract_array
|
58 |
+
from pandas.core.indexers import check_array_indexer
|
59 |
+
from pandas.core.sorting import nargminmax
|
60 |
+
|
61 |
+
if TYPE_CHECKING:
|
62 |
+
from collections.abc import Sequence
|
63 |
+
|
64 |
+
from pandas._typing import (
|
65 |
+
NumpySorter,
|
66 |
+
NumpyValueArrayLike,
|
67 |
+
)
|
68 |
+
|
69 |
+
from pandas import Series
|
70 |
+
|
71 |
+
|
72 |
+
def ravel_compat(meth: F) -> F:
|
73 |
+
"""
|
74 |
+
Decorator to ravel a 2D array before passing it to a cython operation,
|
75 |
+
then reshape the result to our own shape.
|
76 |
+
"""
|
77 |
+
|
78 |
+
@wraps(meth)
|
79 |
+
def method(self, *args, **kwargs):
|
80 |
+
if self.ndim == 1:
|
81 |
+
return meth(self, *args, **kwargs)
|
82 |
+
|
83 |
+
flags = self._ndarray.flags
|
84 |
+
flat = self.ravel("K")
|
85 |
+
result = meth(flat, *args, **kwargs)
|
86 |
+
order = "F" if flags.f_contiguous else "C"
|
87 |
+
return result.reshape(self.shape, order=order)
|
88 |
+
|
89 |
+
return cast(F, method)
|
90 |
+
|
91 |
+
|
92 |
+
class NDArrayBackedExtensionArray(NDArrayBacked, ExtensionArray):
|
93 |
+
"""
|
94 |
+
ExtensionArray that is backed by a single NumPy ndarray.
|
95 |
+
"""
|
96 |
+
|
97 |
+
_ndarray: np.ndarray
|
98 |
+
|
99 |
+
# scalar used to denote NA value inside our self._ndarray, e.g. -1
|
100 |
+
# for Categorical, iNaT for Period. Outside of object dtype,
|
101 |
+
# self.isna() should be exactly locations in self._ndarray with
|
102 |
+
# _internal_fill_value.
|
103 |
+
_internal_fill_value: Any
|
104 |
+
|
105 |
+
def _box_func(self, x):
|
106 |
+
"""
|
107 |
+
Wrap numpy type in our dtype.type if necessary.
|
108 |
+
"""
|
109 |
+
return x
|
110 |
+
|
111 |
+
def _validate_scalar(self, value):
|
112 |
+
# used by NDArrayBackedExtensionIndex.insert
|
113 |
+
raise AbstractMethodError(self)
|
114 |
+
|
115 |
+
# ------------------------------------------------------------------------
|
116 |
+
|
117 |
+
def view(self, dtype: Dtype | None = None) -> ArrayLike:
|
118 |
+
# We handle datetime64, datetime64tz, timedelta64, and period
|
119 |
+
# dtypes here. Everything else we pass through to the underlying
|
120 |
+
# ndarray.
|
121 |
+
if dtype is None or dtype is self.dtype:
|
122 |
+
return self._from_backing_data(self._ndarray)
|
123 |
+
|
124 |
+
if isinstance(dtype, type):
|
125 |
+
# we sometimes pass non-dtype objects, e.g np.ndarray;
|
126 |
+
# pass those through to the underlying ndarray
|
127 |
+
return self._ndarray.view(dtype)
|
128 |
+
|
129 |
+
dtype = pandas_dtype(dtype)
|
130 |
+
arr = self._ndarray
|
131 |
+
|
132 |
+
if isinstance(dtype, PeriodDtype):
|
133 |
+
cls = dtype.construct_array_type()
|
134 |
+
return cls(arr.view("i8"), dtype=dtype)
|
135 |
+
elif isinstance(dtype, DatetimeTZDtype):
|
136 |
+
dt_cls = dtype.construct_array_type()
|
137 |
+
dt64_values = arr.view(f"M8[{dtype.unit}]")
|
138 |
+
return dt_cls._simple_new(dt64_values, dtype=dtype)
|
139 |
+
elif lib.is_np_dtype(dtype, "M") and is_supported_dtype(dtype):
|
140 |
+
from pandas.core.arrays import DatetimeArray
|
141 |
+
|
142 |
+
dt64_values = arr.view(dtype)
|
143 |
+
return DatetimeArray._simple_new(dt64_values, dtype=dtype)
|
144 |
+
|
145 |
+
elif lib.is_np_dtype(dtype, "m") and is_supported_dtype(dtype):
|
146 |
+
from pandas.core.arrays import TimedeltaArray
|
147 |
+
|
148 |
+
td64_values = arr.view(dtype)
|
149 |
+
return TimedeltaArray._simple_new(td64_values, dtype=dtype)
|
150 |
+
|
151 |
+
# error: Argument "dtype" to "view" of "_ArrayOrScalarCommon" has incompatible
|
152 |
+
# type "Union[ExtensionDtype, dtype[Any]]"; expected "Union[dtype[Any], None,
|
153 |
+
# type, _SupportsDType, str, Union[Tuple[Any, int], Tuple[Any, Union[int,
|
154 |
+
# Sequence[int]]], List[Any], _DTypeDict, Tuple[Any, Any]]]"
|
155 |
+
return arr.view(dtype=dtype) # type: ignore[arg-type]
|
156 |
+
|
157 |
+
def take(
|
158 |
+
self,
|
159 |
+
indices: TakeIndexer,
|
160 |
+
*,
|
161 |
+
allow_fill: bool = False,
|
162 |
+
fill_value: Any = None,
|
163 |
+
axis: AxisInt = 0,
|
164 |
+
) -> Self:
|
165 |
+
if allow_fill:
|
166 |
+
fill_value = self._validate_scalar(fill_value)
|
167 |
+
|
168 |
+
new_data = take(
|
169 |
+
self._ndarray,
|
170 |
+
indices,
|
171 |
+
allow_fill=allow_fill,
|
172 |
+
fill_value=fill_value,
|
173 |
+
axis=axis,
|
174 |
+
)
|
175 |
+
return self._from_backing_data(new_data)
|
176 |
+
|
177 |
+
# ------------------------------------------------------------------------
|
178 |
+
|
179 |
+
def equals(self, other) -> bool:
|
180 |
+
if type(self) is not type(other):
|
181 |
+
return False
|
182 |
+
if self.dtype != other.dtype:
|
183 |
+
return False
|
184 |
+
return bool(array_equivalent(self._ndarray, other._ndarray, dtype_equal=True))
|
185 |
+
|
186 |
+
@classmethod
|
187 |
+
def _from_factorized(cls, values, original):
|
188 |
+
assert values.dtype == original._ndarray.dtype
|
189 |
+
return original._from_backing_data(values)
|
190 |
+
|
191 |
+
def _values_for_argsort(self) -> np.ndarray:
|
192 |
+
return self._ndarray
|
193 |
+
|
194 |
+
def _values_for_factorize(self):
|
195 |
+
return self._ndarray, self._internal_fill_value
|
196 |
+
|
197 |
+
def _hash_pandas_object(
|
198 |
+
self, *, encoding: str, hash_key: str, categorize: bool
|
199 |
+
) -> npt.NDArray[np.uint64]:
|
200 |
+
from pandas.core.util.hashing import hash_array
|
201 |
+
|
202 |
+
values = self._ndarray
|
203 |
+
return hash_array(
|
204 |
+
values, encoding=encoding, hash_key=hash_key, categorize=categorize
|
205 |
+
)
|
206 |
+
|
207 |
+
# Signature of "argmin" incompatible with supertype "ExtensionArray"
|
208 |
+
def argmin(self, axis: AxisInt = 0, skipna: bool = True): # type: ignore[override]
|
209 |
+
# override base class by adding axis keyword
|
210 |
+
validate_bool_kwarg(skipna, "skipna")
|
211 |
+
if not skipna and self._hasna:
|
212 |
+
raise NotImplementedError
|
213 |
+
return nargminmax(self, "argmin", axis=axis)
|
214 |
+
|
215 |
+
# Signature of "argmax" incompatible with supertype "ExtensionArray"
|
216 |
+
def argmax(self, axis: AxisInt = 0, skipna: bool = True): # type: ignore[override]
|
217 |
+
# override base class by adding axis keyword
|
218 |
+
validate_bool_kwarg(skipna, "skipna")
|
219 |
+
if not skipna and self._hasna:
|
220 |
+
raise NotImplementedError
|
221 |
+
return nargminmax(self, "argmax", axis=axis)
|
222 |
+
|
223 |
+
def unique(self) -> Self:
|
224 |
+
new_data = unique(self._ndarray)
|
225 |
+
return self._from_backing_data(new_data)
|
226 |
+
|
227 |
+
@classmethod
|
228 |
+
@doc(ExtensionArray._concat_same_type)
|
229 |
+
def _concat_same_type(
|
230 |
+
cls,
|
231 |
+
to_concat: Sequence[Self],
|
232 |
+
axis: AxisInt = 0,
|
233 |
+
) -> Self:
|
234 |
+
if not lib.dtypes_all_equal([x.dtype for x in to_concat]):
|
235 |
+
dtypes = {str(x.dtype) for x in to_concat}
|
236 |
+
raise ValueError("to_concat must have the same dtype", dtypes)
|
237 |
+
|
238 |
+
return super()._concat_same_type(to_concat, axis=axis)
|
239 |
+
|
240 |
+
@doc(ExtensionArray.searchsorted)
|
241 |
+
def searchsorted(
|
242 |
+
self,
|
243 |
+
value: NumpyValueArrayLike | ExtensionArray,
|
244 |
+
side: Literal["left", "right"] = "left",
|
245 |
+
sorter: NumpySorter | None = None,
|
246 |
+
) -> npt.NDArray[np.intp] | np.intp:
|
247 |
+
npvalue = self._validate_setitem_value(value)
|
248 |
+
return self._ndarray.searchsorted(npvalue, side=side, sorter=sorter)
|
249 |
+
|
250 |
+
@doc(ExtensionArray.shift)
|
251 |
+
def shift(self, periods: int = 1, fill_value=None):
|
252 |
+
# NB: shift is always along axis=0
|
253 |
+
axis = 0
|
254 |
+
fill_value = self._validate_scalar(fill_value)
|
255 |
+
new_values = shift(self._ndarray, periods, axis, fill_value)
|
256 |
+
|
257 |
+
return self._from_backing_data(new_values)
|
258 |
+
|
259 |
+
def __setitem__(self, key, value) -> None:
|
260 |
+
key = check_array_indexer(self, key)
|
261 |
+
value = self._validate_setitem_value(value)
|
262 |
+
self._ndarray[key] = value
|
263 |
+
|
264 |
+
def _validate_setitem_value(self, value):
|
265 |
+
return value
|
266 |
+
|
267 |
+
@overload
|
268 |
+
def __getitem__(self, key: ScalarIndexer) -> Any:
|
269 |
+
...
|
270 |
+
|
271 |
+
@overload
|
272 |
+
def __getitem__(
|
273 |
+
self,
|
274 |
+
key: SequenceIndexer | PositionalIndexerTuple,
|
275 |
+
) -> Self:
|
276 |
+
...
|
277 |
+
|
278 |
+
def __getitem__(
|
279 |
+
self,
|
280 |
+
key: PositionalIndexer2D,
|
281 |
+
) -> Self | Any:
|
282 |
+
if lib.is_integer(key):
|
283 |
+
# fast-path
|
284 |
+
result = self._ndarray[key]
|
285 |
+
if self.ndim == 1:
|
286 |
+
return self._box_func(result)
|
287 |
+
return self._from_backing_data(result)
|
288 |
+
|
289 |
+
# error: Incompatible types in assignment (expression has type "ExtensionArray",
|
290 |
+
# variable has type "Union[int, slice, ndarray]")
|
291 |
+
key = extract_array(key, extract_numpy=True) # type: ignore[assignment]
|
292 |
+
key = check_array_indexer(self, key)
|
293 |
+
result = self._ndarray[key]
|
294 |
+
if lib.is_scalar(result):
|
295 |
+
return self._box_func(result)
|
296 |
+
|
297 |
+
result = self._from_backing_data(result)
|
298 |
+
return result
|
299 |
+
|
300 |
+
def _fill_mask_inplace(
|
301 |
+
self, method: str, limit: int | None, mask: npt.NDArray[np.bool_]
|
302 |
+
) -> None:
|
303 |
+
# (for now) when self.ndim == 2, we assume axis=0
|
304 |
+
func = missing.get_fill_func(method, ndim=self.ndim)
|
305 |
+
func(self._ndarray.T, limit=limit, mask=mask.T)
|
306 |
+
|
307 |
+
def _pad_or_backfill(
|
308 |
+
self,
|
309 |
+
*,
|
310 |
+
method: FillnaOptions,
|
311 |
+
limit: int | None = None,
|
312 |
+
limit_area: Literal["inside", "outside"] | None = None,
|
313 |
+
copy: bool = True,
|
314 |
+
) -> Self:
|
315 |
+
mask = self.isna()
|
316 |
+
if mask.any():
|
317 |
+
# (for now) when self.ndim == 2, we assume axis=0
|
318 |
+
func = missing.get_fill_func(method, ndim=self.ndim)
|
319 |
+
|
320 |
+
npvalues = self._ndarray.T
|
321 |
+
if copy:
|
322 |
+
npvalues = npvalues.copy()
|
323 |
+
func(npvalues, limit=limit, limit_area=limit_area, mask=mask.T)
|
324 |
+
npvalues = npvalues.T
|
325 |
+
|
326 |
+
if copy:
|
327 |
+
new_values = self._from_backing_data(npvalues)
|
328 |
+
else:
|
329 |
+
new_values = self
|
330 |
+
|
331 |
+
else:
|
332 |
+
if copy:
|
333 |
+
new_values = self.copy()
|
334 |
+
else:
|
335 |
+
new_values = self
|
336 |
+
return new_values
|
337 |
+
|
338 |
+
@doc(ExtensionArray.fillna)
|
339 |
+
def fillna(
|
340 |
+
self, value=None, method=None, limit: int | None = None, copy: bool = True
|
341 |
+
) -> Self:
|
342 |
+
value, method = validate_fillna_kwargs(
|
343 |
+
value, method, validate_scalar_dict_value=False
|
344 |
+
)
|
345 |
+
|
346 |
+
mask = self.isna()
|
347 |
+
# error: Argument 2 to "check_value_size" has incompatible type
|
348 |
+
# "ExtensionArray"; expected "ndarray"
|
349 |
+
value = missing.check_value_size(
|
350 |
+
value, mask, len(self) # type: ignore[arg-type]
|
351 |
+
)
|
352 |
+
|
353 |
+
if mask.any():
|
354 |
+
if method is not None:
|
355 |
+
# (for now) when self.ndim == 2, we assume axis=0
|
356 |
+
func = missing.get_fill_func(method, ndim=self.ndim)
|
357 |
+
npvalues = self._ndarray.T
|
358 |
+
if copy:
|
359 |
+
npvalues = npvalues.copy()
|
360 |
+
func(npvalues, limit=limit, mask=mask.T)
|
361 |
+
npvalues = npvalues.T
|
362 |
+
|
363 |
+
# TODO: NumpyExtensionArray didn't used to copy, need tests
|
364 |
+
# for this
|
365 |
+
new_values = self._from_backing_data(npvalues)
|
366 |
+
else:
|
367 |
+
# fill with value
|
368 |
+
if copy:
|
369 |
+
new_values = self.copy()
|
370 |
+
else:
|
371 |
+
new_values = self[:]
|
372 |
+
new_values[mask] = value
|
373 |
+
else:
|
374 |
+
# We validate the fill_value even if there is nothing to fill
|
375 |
+
if value is not None:
|
376 |
+
self._validate_setitem_value(value)
|
377 |
+
|
378 |
+
if not copy:
|
379 |
+
new_values = self[:]
|
380 |
+
else:
|
381 |
+
new_values = self.copy()
|
382 |
+
return new_values
|
383 |
+
|
384 |
+
# ------------------------------------------------------------------------
|
385 |
+
# Reductions
|
386 |
+
|
387 |
+
def _wrap_reduction_result(self, axis: AxisInt | None, result):
|
388 |
+
if axis is None or self.ndim == 1:
|
389 |
+
return self._box_func(result)
|
390 |
+
return self._from_backing_data(result)
|
391 |
+
|
392 |
+
# ------------------------------------------------------------------------
|
393 |
+
# __array_function__ methods
|
394 |
+
|
395 |
+
def _putmask(self, mask: npt.NDArray[np.bool_], value) -> None:
|
396 |
+
"""
|
397 |
+
Analogue to np.putmask(self, mask, value)
|
398 |
+
|
399 |
+
Parameters
|
400 |
+
----------
|
401 |
+
mask : np.ndarray[bool]
|
402 |
+
value : scalar or listlike
|
403 |
+
|
404 |
+
Raises
|
405 |
+
------
|
406 |
+
TypeError
|
407 |
+
If value cannot be cast to self.dtype.
|
408 |
+
"""
|
409 |
+
value = self._validate_setitem_value(value)
|
410 |
+
|
411 |
+
np.putmask(self._ndarray, mask, value)
|
412 |
+
|
413 |
+
def _where(self: Self, mask: npt.NDArray[np.bool_], value) -> Self:
|
414 |
+
"""
|
415 |
+
Analogue to np.where(mask, self, value)
|
416 |
+
|
417 |
+
Parameters
|
418 |
+
----------
|
419 |
+
mask : np.ndarray[bool]
|
420 |
+
value : scalar or listlike
|
421 |
+
|
422 |
+
Raises
|
423 |
+
------
|
424 |
+
TypeError
|
425 |
+
If value cannot be cast to self.dtype.
|
426 |
+
"""
|
427 |
+
value = self._validate_setitem_value(value)
|
428 |
+
|
429 |
+
res_values = np.where(mask, self._ndarray, value)
|
430 |
+
if res_values.dtype != self._ndarray.dtype:
|
431 |
+
raise AssertionError(
|
432 |
+
# GH#56410
|
433 |
+
"Something has gone wrong, please report a bug at "
|
434 |
+
"github.com/pandas-dev/pandas/"
|
435 |
+
)
|
436 |
+
return self._from_backing_data(res_values)
|
437 |
+
|
438 |
+
# ------------------------------------------------------------------------
|
439 |
+
# Index compat methods
|
440 |
+
|
441 |
+
def insert(self, loc: int, item) -> Self:
|
442 |
+
"""
|
443 |
+
Make new ExtensionArray inserting new item at location. Follows
|
444 |
+
Python list.append semantics for negative values.
|
445 |
+
|
446 |
+
Parameters
|
447 |
+
----------
|
448 |
+
loc : int
|
449 |
+
item : object
|
450 |
+
|
451 |
+
Returns
|
452 |
+
-------
|
453 |
+
type(self)
|
454 |
+
"""
|
455 |
+
loc = validate_insert_loc(loc, len(self))
|
456 |
+
|
457 |
+
code = self._validate_scalar(item)
|
458 |
+
|
459 |
+
new_vals = np.concatenate(
|
460 |
+
(
|
461 |
+
self._ndarray[:loc],
|
462 |
+
np.asarray([code], dtype=self._ndarray.dtype),
|
463 |
+
self._ndarray[loc:],
|
464 |
+
)
|
465 |
+
)
|
466 |
+
return self._from_backing_data(new_vals)
|
467 |
+
|
468 |
+
# ------------------------------------------------------------------------
|
469 |
+
# Additional array methods
|
470 |
+
# These are not part of the EA API, but we implement them because
|
471 |
+
# pandas assumes they're there.
|
472 |
+
|
473 |
+
def value_counts(self, dropna: bool = True) -> Series:
|
474 |
+
"""
|
475 |
+
Return a Series containing counts of unique values.
|
476 |
+
|
477 |
+
Parameters
|
478 |
+
----------
|
479 |
+
dropna : bool, default True
|
480 |
+
Don't include counts of NA values.
|
481 |
+
|
482 |
+
Returns
|
483 |
+
-------
|
484 |
+
Series
|
485 |
+
"""
|
486 |
+
if self.ndim != 1:
|
487 |
+
raise NotImplementedError
|
488 |
+
|
489 |
+
from pandas import (
|
490 |
+
Index,
|
491 |
+
Series,
|
492 |
+
)
|
493 |
+
|
494 |
+
if dropna:
|
495 |
+
# error: Unsupported operand type for ~ ("ExtensionArray")
|
496 |
+
values = self[~self.isna()]._ndarray # type: ignore[operator]
|
497 |
+
else:
|
498 |
+
values = self._ndarray
|
499 |
+
|
500 |
+
result = value_counts(values, sort=False, dropna=dropna)
|
501 |
+
|
502 |
+
index_arr = self._from_backing_data(np.asarray(result.index._data))
|
503 |
+
index = Index(index_arr, name=result.index.name)
|
504 |
+
return Series(result._values, index=index, name=result.name, copy=False)
|
505 |
+
|
506 |
+
def _quantile(
|
507 |
+
self,
|
508 |
+
qs: npt.NDArray[np.float64],
|
509 |
+
interpolation: str,
|
510 |
+
) -> Self:
|
511 |
+
# TODO: disable for Categorical if not ordered?
|
512 |
+
|
513 |
+
mask = np.asarray(self.isna())
|
514 |
+
arr = self._ndarray
|
515 |
+
fill_value = self._internal_fill_value
|
516 |
+
|
517 |
+
res_values = quantile_with_mask(arr, mask, fill_value, qs, interpolation)
|
518 |
+
|
519 |
+
res_values = self._cast_quantile_result(res_values)
|
520 |
+
return self._from_backing_data(res_values)
|
521 |
+
|
522 |
+
# TODO: see if we can share this with other dispatch-wrapping methods
|
523 |
+
def _cast_quantile_result(self, res_values: np.ndarray) -> np.ndarray:
|
524 |
+
"""
|
525 |
+
Cast the result of quantile_with_mask to an appropriate dtype
|
526 |
+
to pass to _from_backing_data in _quantile.
|
527 |
+
"""
|
528 |
+
return res_values
|
529 |
+
|
530 |
+
# ------------------------------------------------------------------------
|
531 |
+
# numpy-like methods
|
532 |
+
|
533 |
+
@classmethod
|
534 |
+
def _empty(cls, shape: Shape, dtype: ExtensionDtype) -> Self:
|
535 |
+
"""
|
536 |
+
Analogous to np.empty(shape, dtype=dtype)
|
537 |
+
|
538 |
+
Parameters
|
539 |
+
----------
|
540 |
+
shape : tuple[int]
|
541 |
+
dtype : ExtensionDtype
|
542 |
+
"""
|
543 |
+
# The base implementation uses a naive approach to find the dtype
|
544 |
+
# for the backing ndarray
|
545 |
+
arr = cls._from_sequence([], dtype=dtype)
|
546 |
+
backing = np.empty(shape, dtype=arr._ndarray.dtype)
|
547 |
+
return arr._from_backing_data(backing)
|
env-llmeval/lib/python3.10/site-packages/pandas/core/arrays/_ranges.py
ADDED
@@ -0,0 +1,207 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
"""
|
2 |
+
Helper functions to generate range-like data for DatetimeArray
|
3 |
+
(and possibly TimedeltaArray/PeriodArray)
|
4 |
+
"""
|
5 |
+
from __future__ import annotations
|
6 |
+
|
7 |
+
from typing import TYPE_CHECKING
|
8 |
+
|
9 |
+
import numpy as np
|
10 |
+
|
11 |
+
from pandas._libs.lib import i8max
|
12 |
+
from pandas._libs.tslibs import (
|
13 |
+
BaseOffset,
|
14 |
+
OutOfBoundsDatetime,
|
15 |
+
Timedelta,
|
16 |
+
Timestamp,
|
17 |
+
iNaT,
|
18 |
+
)
|
19 |
+
|
20 |
+
if TYPE_CHECKING:
|
21 |
+
from pandas._typing import npt
|
22 |
+
|
23 |
+
|
24 |
+
def generate_regular_range(
|
25 |
+
start: Timestamp | Timedelta | None,
|
26 |
+
end: Timestamp | Timedelta | None,
|
27 |
+
periods: int | None,
|
28 |
+
freq: BaseOffset,
|
29 |
+
unit: str = "ns",
|
30 |
+
) -> npt.NDArray[np.intp]:
|
31 |
+
"""
|
32 |
+
Generate a range of dates or timestamps with the spans between dates
|
33 |
+
described by the given `freq` DateOffset.
|
34 |
+
|
35 |
+
Parameters
|
36 |
+
----------
|
37 |
+
start : Timedelta, Timestamp or None
|
38 |
+
First point of produced date range.
|
39 |
+
end : Timedelta, Timestamp or None
|
40 |
+
Last point of produced date range.
|
41 |
+
periods : int or None
|
42 |
+
Number of periods in produced date range.
|
43 |
+
freq : Tick
|
44 |
+
Describes space between dates in produced date range.
|
45 |
+
unit : str, default "ns"
|
46 |
+
The resolution the output is meant to represent.
|
47 |
+
|
48 |
+
Returns
|
49 |
+
-------
|
50 |
+
ndarray[np.int64]
|
51 |
+
Representing the given resolution.
|
52 |
+
"""
|
53 |
+
istart = start._value if start is not None else None
|
54 |
+
iend = end._value if end is not None else None
|
55 |
+
freq.nanos # raises if non-fixed frequency
|
56 |
+
td = Timedelta(freq)
|
57 |
+
b: int
|
58 |
+
e: int
|
59 |
+
try:
|
60 |
+
td = td.as_unit(unit, round_ok=False)
|
61 |
+
except ValueError as err:
|
62 |
+
raise ValueError(
|
63 |
+
f"freq={freq} is incompatible with unit={unit}. "
|
64 |
+
"Use a lower freq or a higher unit instead."
|
65 |
+
) from err
|
66 |
+
stride = int(td._value)
|
67 |
+
|
68 |
+
if periods is None and istart is not None and iend is not None:
|
69 |
+
b = istart
|
70 |
+
# cannot just use e = Timestamp(end) + 1 because arange breaks when
|
71 |
+
# stride is too large, see GH10887
|
72 |
+
e = b + (iend - b) // stride * stride + stride // 2 + 1
|
73 |
+
elif istart is not None and periods is not None:
|
74 |
+
b = istart
|
75 |
+
e = _generate_range_overflow_safe(b, periods, stride, side="start")
|
76 |
+
elif iend is not None and periods is not None:
|
77 |
+
e = iend + stride
|
78 |
+
b = _generate_range_overflow_safe(e, periods, stride, side="end")
|
79 |
+
else:
|
80 |
+
raise ValueError(
|
81 |
+
"at least 'start' or 'end' should be specified if a 'period' is given."
|
82 |
+
)
|
83 |
+
|
84 |
+
with np.errstate(over="raise"):
|
85 |
+
# If the range is sufficiently large, np.arange may overflow
|
86 |
+
# and incorrectly return an empty array if not caught.
|
87 |
+
try:
|
88 |
+
values = np.arange(b, e, stride, dtype=np.int64)
|
89 |
+
except FloatingPointError:
|
90 |
+
xdr = [b]
|
91 |
+
while xdr[-1] != e:
|
92 |
+
xdr.append(xdr[-1] + stride)
|
93 |
+
values = np.array(xdr[:-1], dtype=np.int64)
|
94 |
+
return values
|
95 |
+
|
96 |
+
|
97 |
+
def _generate_range_overflow_safe(
|
98 |
+
endpoint: int, periods: int, stride: int, side: str = "start"
|
99 |
+
) -> int:
|
100 |
+
"""
|
101 |
+
Calculate the second endpoint for passing to np.arange, checking
|
102 |
+
to avoid an integer overflow. Catch OverflowError and re-raise
|
103 |
+
as OutOfBoundsDatetime.
|
104 |
+
|
105 |
+
Parameters
|
106 |
+
----------
|
107 |
+
endpoint : int
|
108 |
+
nanosecond timestamp of the known endpoint of the desired range
|
109 |
+
periods : int
|
110 |
+
number of periods in the desired range
|
111 |
+
stride : int
|
112 |
+
nanoseconds between periods in the desired range
|
113 |
+
side : {'start', 'end'}
|
114 |
+
which end of the range `endpoint` refers to
|
115 |
+
|
116 |
+
Returns
|
117 |
+
-------
|
118 |
+
other_end : int
|
119 |
+
|
120 |
+
Raises
|
121 |
+
------
|
122 |
+
OutOfBoundsDatetime
|
123 |
+
"""
|
124 |
+
# GH#14187 raise instead of incorrectly wrapping around
|
125 |
+
assert side in ["start", "end"]
|
126 |
+
|
127 |
+
i64max = np.uint64(i8max)
|
128 |
+
msg = f"Cannot generate range with {side}={endpoint} and periods={periods}"
|
129 |
+
|
130 |
+
with np.errstate(over="raise"):
|
131 |
+
# if periods * strides cannot be multiplied within the *uint64* bounds,
|
132 |
+
# we cannot salvage the operation by recursing, so raise
|
133 |
+
try:
|
134 |
+
addend = np.uint64(periods) * np.uint64(np.abs(stride))
|
135 |
+
except FloatingPointError as err:
|
136 |
+
raise OutOfBoundsDatetime(msg) from err
|
137 |
+
|
138 |
+
if np.abs(addend) <= i64max:
|
139 |
+
# relatively easy case without casting concerns
|
140 |
+
return _generate_range_overflow_safe_signed(endpoint, periods, stride, side)
|
141 |
+
|
142 |
+
elif (endpoint > 0 and side == "start" and stride > 0) or (
|
143 |
+
endpoint < 0 < stride and side == "end"
|
144 |
+
):
|
145 |
+
# no chance of not-overflowing
|
146 |
+
raise OutOfBoundsDatetime(msg)
|
147 |
+
|
148 |
+
elif side == "end" and endpoint - stride <= i64max < endpoint:
|
149 |
+
# in _generate_regular_range we added `stride` thereby overflowing
|
150 |
+
# the bounds. Adjust to fix this.
|
151 |
+
return _generate_range_overflow_safe(
|
152 |
+
endpoint - stride, periods - 1, stride, side
|
153 |
+
)
|
154 |
+
|
155 |
+
# split into smaller pieces
|
156 |
+
mid_periods = periods // 2
|
157 |
+
remaining = periods - mid_periods
|
158 |
+
assert 0 < remaining < periods, (remaining, periods, endpoint, stride)
|
159 |
+
|
160 |
+
midpoint = int(_generate_range_overflow_safe(endpoint, mid_periods, stride, side))
|
161 |
+
return _generate_range_overflow_safe(midpoint, remaining, stride, side)
|
162 |
+
|
163 |
+
|
164 |
+
def _generate_range_overflow_safe_signed(
|
165 |
+
endpoint: int, periods: int, stride: int, side: str
|
166 |
+
) -> int:
|
167 |
+
"""
|
168 |
+
A special case for _generate_range_overflow_safe where `periods * stride`
|
169 |
+
can be calculated without overflowing int64 bounds.
|
170 |
+
"""
|
171 |
+
assert side in ["start", "end"]
|
172 |
+
if side == "end":
|
173 |
+
stride *= -1
|
174 |
+
|
175 |
+
with np.errstate(over="raise"):
|
176 |
+
addend = np.int64(periods) * np.int64(stride)
|
177 |
+
try:
|
178 |
+
# easy case with no overflows
|
179 |
+
result = np.int64(endpoint) + addend
|
180 |
+
if result == iNaT:
|
181 |
+
# Putting this into a DatetimeArray/TimedeltaArray
|
182 |
+
# would incorrectly be interpreted as NaT
|
183 |
+
raise OverflowError
|
184 |
+
return int(result)
|
185 |
+
except (FloatingPointError, OverflowError):
|
186 |
+
# with endpoint negative and addend positive we risk
|
187 |
+
# FloatingPointError; with reversed signed we risk OverflowError
|
188 |
+
pass
|
189 |
+
|
190 |
+
# if stride and endpoint had opposite signs, then endpoint + addend
|
191 |
+
# should never overflow. so they must have the same signs
|
192 |
+
assert (stride > 0 and endpoint >= 0) or (stride < 0 and endpoint <= 0)
|
193 |
+
|
194 |
+
if stride > 0:
|
195 |
+
# watch out for very special case in which we just slightly
|
196 |
+
# exceed implementation bounds, but when passing the result to
|
197 |
+
# np.arange will get a result slightly within the bounds
|
198 |
+
|
199 |
+
uresult = np.uint64(endpoint) + np.uint64(addend)
|
200 |
+
i64max = np.uint64(i8max)
|
201 |
+
assert uresult > i64max
|
202 |
+
if uresult <= i64max + np.uint64(stride):
|
203 |
+
return int(uresult)
|
204 |
+
|
205 |
+
raise OutOfBoundsDatetime(
|
206 |
+
f"Cannot generate range with {side}={endpoint} and periods={periods}"
|
207 |
+
)
|
env-llmeval/lib/python3.10/site-packages/pandas/core/arrays/_utils.py
ADDED
@@ -0,0 +1,63 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from __future__ import annotations
|
2 |
+
|
3 |
+
from typing import (
|
4 |
+
TYPE_CHECKING,
|
5 |
+
Any,
|
6 |
+
)
|
7 |
+
|
8 |
+
import numpy as np
|
9 |
+
|
10 |
+
from pandas._libs import lib
|
11 |
+
from pandas.errors import LossySetitemError
|
12 |
+
|
13 |
+
from pandas.core.dtypes.cast import np_can_hold_element
|
14 |
+
from pandas.core.dtypes.common import is_numeric_dtype
|
15 |
+
|
16 |
+
if TYPE_CHECKING:
|
17 |
+
from pandas._typing import (
|
18 |
+
ArrayLike,
|
19 |
+
npt,
|
20 |
+
)
|
21 |
+
|
22 |
+
|
23 |
+
def to_numpy_dtype_inference(
|
24 |
+
arr: ArrayLike, dtype: npt.DTypeLike | None, na_value, hasna: bool
|
25 |
+
) -> tuple[npt.DTypeLike, Any]:
|
26 |
+
if dtype is None and is_numeric_dtype(arr.dtype):
|
27 |
+
dtype_given = False
|
28 |
+
if hasna:
|
29 |
+
if arr.dtype.kind == "b":
|
30 |
+
dtype = np.dtype(np.object_)
|
31 |
+
else:
|
32 |
+
if arr.dtype.kind in "iu":
|
33 |
+
dtype = np.dtype(np.float64)
|
34 |
+
else:
|
35 |
+
dtype = arr.dtype.numpy_dtype # type: ignore[union-attr]
|
36 |
+
if na_value is lib.no_default:
|
37 |
+
na_value = np.nan
|
38 |
+
else:
|
39 |
+
dtype = arr.dtype.numpy_dtype # type: ignore[union-attr]
|
40 |
+
elif dtype is not None:
|
41 |
+
dtype = np.dtype(dtype)
|
42 |
+
dtype_given = True
|
43 |
+
else:
|
44 |
+
dtype_given = True
|
45 |
+
|
46 |
+
if na_value is lib.no_default:
|
47 |
+
if dtype is None or not hasna:
|
48 |
+
na_value = arr.dtype.na_value
|
49 |
+
elif dtype.kind == "f": # type: ignore[union-attr]
|
50 |
+
na_value = np.nan
|
51 |
+
elif dtype.kind == "M": # type: ignore[union-attr]
|
52 |
+
na_value = np.datetime64("nat")
|
53 |
+
elif dtype.kind == "m": # type: ignore[union-attr]
|
54 |
+
na_value = np.timedelta64("nat")
|
55 |
+
else:
|
56 |
+
na_value = arr.dtype.na_value
|
57 |
+
|
58 |
+
if not dtype_given and hasna:
|
59 |
+
try:
|
60 |
+
np_can_hold_element(dtype, na_value) # type: ignore[arg-type]
|
61 |
+
except LossySetitemError:
|
62 |
+
dtype = np.dtype(np.object_)
|
63 |
+
return dtype, na_value
|
env-llmeval/lib/python3.10/site-packages/pandas/core/arrays/base.py
ADDED
@@ -0,0 +1,2588 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
"""
|
2 |
+
An interface for extending pandas with custom arrays.
|
3 |
+
|
4 |
+
.. warning::
|
5 |
+
|
6 |
+
This is an experimental API and subject to breaking changes
|
7 |
+
without warning.
|
8 |
+
"""
|
9 |
+
from __future__ import annotations
|
10 |
+
|
11 |
+
import operator
|
12 |
+
from typing import (
|
13 |
+
TYPE_CHECKING,
|
14 |
+
Any,
|
15 |
+
Callable,
|
16 |
+
ClassVar,
|
17 |
+
Literal,
|
18 |
+
cast,
|
19 |
+
overload,
|
20 |
+
)
|
21 |
+
import warnings
|
22 |
+
|
23 |
+
import numpy as np
|
24 |
+
|
25 |
+
from pandas._libs import (
|
26 |
+
algos as libalgos,
|
27 |
+
lib,
|
28 |
+
)
|
29 |
+
from pandas.compat import set_function_name
|
30 |
+
from pandas.compat.numpy import function as nv
|
31 |
+
from pandas.errors import AbstractMethodError
|
32 |
+
from pandas.util._decorators import (
|
33 |
+
Appender,
|
34 |
+
Substitution,
|
35 |
+
cache_readonly,
|
36 |
+
)
|
37 |
+
from pandas.util._exceptions import find_stack_level
|
38 |
+
from pandas.util._validators import (
|
39 |
+
validate_bool_kwarg,
|
40 |
+
validate_fillna_kwargs,
|
41 |
+
validate_insert_loc,
|
42 |
+
)
|
43 |
+
|
44 |
+
from pandas.core.dtypes.cast import maybe_cast_pointwise_result
|
45 |
+
from pandas.core.dtypes.common import (
|
46 |
+
is_list_like,
|
47 |
+
is_scalar,
|
48 |
+
pandas_dtype,
|
49 |
+
)
|
50 |
+
from pandas.core.dtypes.dtypes import ExtensionDtype
|
51 |
+
from pandas.core.dtypes.generic import (
|
52 |
+
ABCDataFrame,
|
53 |
+
ABCIndex,
|
54 |
+
ABCSeries,
|
55 |
+
)
|
56 |
+
from pandas.core.dtypes.missing import isna
|
57 |
+
|
58 |
+
from pandas.core import (
|
59 |
+
arraylike,
|
60 |
+
missing,
|
61 |
+
roperator,
|
62 |
+
)
|
63 |
+
from pandas.core.algorithms import (
|
64 |
+
duplicated,
|
65 |
+
factorize_array,
|
66 |
+
isin,
|
67 |
+
map_array,
|
68 |
+
mode,
|
69 |
+
rank,
|
70 |
+
unique,
|
71 |
+
)
|
72 |
+
from pandas.core.array_algos.quantile import quantile_with_mask
|
73 |
+
from pandas.core.missing import _fill_limit_area_1d
|
74 |
+
from pandas.core.sorting import (
|
75 |
+
nargminmax,
|
76 |
+
nargsort,
|
77 |
+
)
|
78 |
+
|
79 |
+
if TYPE_CHECKING:
|
80 |
+
from collections.abc import (
|
81 |
+
Iterator,
|
82 |
+
Sequence,
|
83 |
+
)
|
84 |
+
|
85 |
+
from pandas._typing import (
|
86 |
+
ArrayLike,
|
87 |
+
AstypeArg,
|
88 |
+
AxisInt,
|
89 |
+
Dtype,
|
90 |
+
DtypeObj,
|
91 |
+
FillnaOptions,
|
92 |
+
InterpolateOptions,
|
93 |
+
NumpySorter,
|
94 |
+
NumpyValueArrayLike,
|
95 |
+
PositionalIndexer,
|
96 |
+
ScalarIndexer,
|
97 |
+
Self,
|
98 |
+
SequenceIndexer,
|
99 |
+
Shape,
|
100 |
+
SortKind,
|
101 |
+
TakeIndexer,
|
102 |
+
npt,
|
103 |
+
)
|
104 |
+
|
105 |
+
from pandas import Index
|
106 |
+
|
107 |
+
_extension_array_shared_docs: dict[str, str] = {}
|
108 |
+
|
109 |
+
|
110 |
+
class ExtensionArray:
|
111 |
+
"""
|
112 |
+
Abstract base class for custom 1-D array types.
|
113 |
+
|
114 |
+
pandas will recognize instances of this class as proper arrays
|
115 |
+
with a custom type and will not attempt to coerce them to objects. They
|
116 |
+
may be stored directly inside a :class:`DataFrame` or :class:`Series`.
|
117 |
+
|
118 |
+
Attributes
|
119 |
+
----------
|
120 |
+
dtype
|
121 |
+
nbytes
|
122 |
+
ndim
|
123 |
+
shape
|
124 |
+
|
125 |
+
Methods
|
126 |
+
-------
|
127 |
+
argsort
|
128 |
+
astype
|
129 |
+
copy
|
130 |
+
dropna
|
131 |
+
duplicated
|
132 |
+
factorize
|
133 |
+
fillna
|
134 |
+
equals
|
135 |
+
insert
|
136 |
+
interpolate
|
137 |
+
isin
|
138 |
+
isna
|
139 |
+
ravel
|
140 |
+
repeat
|
141 |
+
searchsorted
|
142 |
+
shift
|
143 |
+
take
|
144 |
+
tolist
|
145 |
+
unique
|
146 |
+
view
|
147 |
+
_accumulate
|
148 |
+
_concat_same_type
|
149 |
+
_explode
|
150 |
+
_formatter
|
151 |
+
_from_factorized
|
152 |
+
_from_sequence
|
153 |
+
_from_sequence_of_strings
|
154 |
+
_hash_pandas_object
|
155 |
+
_pad_or_backfill
|
156 |
+
_reduce
|
157 |
+
_values_for_argsort
|
158 |
+
_values_for_factorize
|
159 |
+
|
160 |
+
Notes
|
161 |
+
-----
|
162 |
+
The interface includes the following abstract methods that must be
|
163 |
+
implemented by subclasses:
|
164 |
+
|
165 |
+
* _from_sequence
|
166 |
+
* _from_factorized
|
167 |
+
* __getitem__
|
168 |
+
* __len__
|
169 |
+
* __eq__
|
170 |
+
* dtype
|
171 |
+
* nbytes
|
172 |
+
* isna
|
173 |
+
* take
|
174 |
+
* copy
|
175 |
+
* _concat_same_type
|
176 |
+
* interpolate
|
177 |
+
|
178 |
+
A default repr displaying the type, (truncated) data, length,
|
179 |
+
and dtype is provided. It can be customized or replaced by
|
180 |
+
by overriding:
|
181 |
+
|
182 |
+
* __repr__ : A default repr for the ExtensionArray.
|
183 |
+
* _formatter : Print scalars inside a Series or DataFrame.
|
184 |
+
|
185 |
+
Some methods require casting the ExtensionArray to an ndarray of Python
|
186 |
+
objects with ``self.astype(object)``, which may be expensive. When
|
187 |
+
performance is a concern, we highly recommend overriding the following
|
188 |
+
methods:
|
189 |
+
|
190 |
+
* fillna
|
191 |
+
* _pad_or_backfill
|
192 |
+
* dropna
|
193 |
+
* unique
|
194 |
+
* factorize / _values_for_factorize
|
195 |
+
* argsort, argmax, argmin / _values_for_argsort
|
196 |
+
* searchsorted
|
197 |
+
* map
|
198 |
+
|
199 |
+
The remaining methods implemented on this class should be performant,
|
200 |
+
as they only compose abstract methods. Still, a more efficient
|
201 |
+
implementation may be available, and these methods can be overridden.
|
202 |
+
|
203 |
+
One can implement methods to handle array accumulations or reductions.
|
204 |
+
|
205 |
+
* _accumulate
|
206 |
+
* _reduce
|
207 |
+
|
208 |
+
One can implement methods to handle parsing from strings that will be used
|
209 |
+
in methods such as ``pandas.io.parsers.read_csv``.
|
210 |
+
|
211 |
+
* _from_sequence_of_strings
|
212 |
+
|
213 |
+
This class does not inherit from 'abc.ABCMeta' for performance reasons.
|
214 |
+
Methods and properties required by the interface raise
|
215 |
+
``pandas.errors.AbstractMethodError`` and no ``register`` method is
|
216 |
+
provided for registering virtual subclasses.
|
217 |
+
|
218 |
+
ExtensionArrays are limited to 1 dimension.
|
219 |
+
|
220 |
+
They may be backed by none, one, or many NumPy arrays. For example,
|
221 |
+
``pandas.Categorical`` is an extension array backed by two arrays,
|
222 |
+
one for codes and one for categories. An array of IPv6 address may
|
223 |
+
be backed by a NumPy structured array with two fields, one for the
|
224 |
+
lower 64 bits and one for the upper 64 bits. Or they may be backed
|
225 |
+
by some other storage type, like Python lists. Pandas makes no
|
226 |
+
assumptions on how the data are stored, just that it can be converted
|
227 |
+
to a NumPy array.
|
228 |
+
The ExtensionArray interface does not impose any rules on how this data
|
229 |
+
is stored. However, currently, the backing data cannot be stored in
|
230 |
+
attributes called ``.values`` or ``._values`` to ensure full compatibility
|
231 |
+
with pandas internals. But other names as ``.data``, ``._data``,
|
232 |
+
``._items``, ... can be freely used.
|
233 |
+
|
234 |
+
If implementing NumPy's ``__array_ufunc__`` interface, pandas expects
|
235 |
+
that
|
236 |
+
|
237 |
+
1. You defer by returning ``NotImplemented`` when any Series are present
|
238 |
+
in `inputs`. Pandas will extract the arrays and call the ufunc again.
|
239 |
+
2. You define a ``_HANDLED_TYPES`` tuple as an attribute on the class.
|
240 |
+
Pandas inspect this to determine whether the ufunc is valid for the
|
241 |
+
types present.
|
242 |
+
|
243 |
+
See :ref:`extending.extension.ufunc` for more.
|
244 |
+
|
245 |
+
By default, ExtensionArrays are not hashable. Immutable subclasses may
|
246 |
+
override this behavior.
|
247 |
+
|
248 |
+
Examples
|
249 |
+
--------
|
250 |
+
Please see the following:
|
251 |
+
|
252 |
+
https://github.com/pandas-dev/pandas/blob/main/pandas/tests/extension/list/array.py
|
253 |
+
"""
|
254 |
+
|
255 |
+
# '_typ' is for pandas.core.dtypes.generic.ABCExtensionArray.
|
256 |
+
# Don't override this.
|
257 |
+
_typ = "extension"
|
258 |
+
|
259 |
+
# similar to __array_priority__, positions ExtensionArray after Index,
|
260 |
+
# Series, and DataFrame. EA subclasses may override to choose which EA
|
261 |
+
# subclass takes priority. If overriding, the value should always be
|
262 |
+
# strictly less than 2000 to be below Index.__pandas_priority__.
|
263 |
+
__pandas_priority__ = 1000
|
264 |
+
|
265 |
+
# ------------------------------------------------------------------------
|
266 |
+
# Constructors
|
267 |
+
# ------------------------------------------------------------------------
|
268 |
+
|
269 |
+
@classmethod
|
270 |
+
def _from_sequence(cls, scalars, *, dtype: Dtype | None = None, copy: bool = False):
|
271 |
+
"""
|
272 |
+
Construct a new ExtensionArray from a sequence of scalars.
|
273 |
+
|
274 |
+
Parameters
|
275 |
+
----------
|
276 |
+
scalars : Sequence
|
277 |
+
Each element will be an instance of the scalar type for this
|
278 |
+
array, ``cls.dtype.type`` or be converted into this type in this method.
|
279 |
+
dtype : dtype, optional
|
280 |
+
Construct for this particular dtype. This should be a Dtype
|
281 |
+
compatible with the ExtensionArray.
|
282 |
+
copy : bool, default False
|
283 |
+
If True, copy the underlying data.
|
284 |
+
|
285 |
+
Returns
|
286 |
+
-------
|
287 |
+
ExtensionArray
|
288 |
+
|
289 |
+
Examples
|
290 |
+
--------
|
291 |
+
>>> pd.arrays.IntegerArray._from_sequence([4, 5])
|
292 |
+
<IntegerArray>
|
293 |
+
[4, 5]
|
294 |
+
Length: 2, dtype: Int64
|
295 |
+
"""
|
296 |
+
raise AbstractMethodError(cls)
|
297 |
+
|
298 |
+
@classmethod
|
299 |
+
def _from_scalars(cls, scalars, *, dtype: DtypeObj) -> Self:
|
300 |
+
"""
|
301 |
+
Strict analogue to _from_sequence, allowing only sequences of scalars
|
302 |
+
that should be specifically inferred to the given dtype.
|
303 |
+
|
304 |
+
Parameters
|
305 |
+
----------
|
306 |
+
scalars : sequence
|
307 |
+
dtype : ExtensionDtype
|
308 |
+
|
309 |
+
Raises
|
310 |
+
------
|
311 |
+
TypeError or ValueError
|
312 |
+
|
313 |
+
Notes
|
314 |
+
-----
|
315 |
+
This is called in a try/except block when casting the result of a
|
316 |
+
pointwise operation.
|
317 |
+
"""
|
318 |
+
try:
|
319 |
+
return cls._from_sequence(scalars, dtype=dtype, copy=False)
|
320 |
+
except (ValueError, TypeError):
|
321 |
+
raise
|
322 |
+
except Exception:
|
323 |
+
warnings.warn(
|
324 |
+
"_from_scalars should only raise ValueError or TypeError. "
|
325 |
+
"Consider overriding _from_scalars where appropriate.",
|
326 |
+
stacklevel=find_stack_level(),
|
327 |
+
)
|
328 |
+
raise
|
329 |
+
|
330 |
+
@classmethod
|
331 |
+
def _from_sequence_of_strings(
|
332 |
+
cls, strings, *, dtype: Dtype | None = None, copy: bool = False
|
333 |
+
):
|
334 |
+
"""
|
335 |
+
Construct a new ExtensionArray from a sequence of strings.
|
336 |
+
|
337 |
+
Parameters
|
338 |
+
----------
|
339 |
+
strings : Sequence
|
340 |
+
Each element will be an instance of the scalar type for this
|
341 |
+
array, ``cls.dtype.type``.
|
342 |
+
dtype : dtype, optional
|
343 |
+
Construct for this particular dtype. This should be a Dtype
|
344 |
+
compatible with the ExtensionArray.
|
345 |
+
copy : bool, default False
|
346 |
+
If True, copy the underlying data.
|
347 |
+
|
348 |
+
Returns
|
349 |
+
-------
|
350 |
+
ExtensionArray
|
351 |
+
|
352 |
+
Examples
|
353 |
+
--------
|
354 |
+
>>> pd.arrays.IntegerArray._from_sequence_of_strings(["1", "2", "3"])
|
355 |
+
<IntegerArray>
|
356 |
+
[1, 2, 3]
|
357 |
+
Length: 3, dtype: Int64
|
358 |
+
"""
|
359 |
+
raise AbstractMethodError(cls)
|
360 |
+
|
361 |
+
@classmethod
|
362 |
+
def _from_factorized(cls, values, original):
|
363 |
+
"""
|
364 |
+
Reconstruct an ExtensionArray after factorization.
|
365 |
+
|
366 |
+
Parameters
|
367 |
+
----------
|
368 |
+
values : ndarray
|
369 |
+
An integer ndarray with the factorized values.
|
370 |
+
original : ExtensionArray
|
371 |
+
The original ExtensionArray that factorize was called on.
|
372 |
+
|
373 |
+
See Also
|
374 |
+
--------
|
375 |
+
factorize : Top-level factorize method that dispatches here.
|
376 |
+
ExtensionArray.factorize : Encode the extension array as an enumerated type.
|
377 |
+
|
378 |
+
Examples
|
379 |
+
--------
|
380 |
+
>>> interv_arr = pd.arrays.IntervalArray([pd.Interval(0, 1),
|
381 |
+
... pd.Interval(1, 5), pd.Interval(1, 5)])
|
382 |
+
>>> codes, uniques = pd.factorize(interv_arr)
|
383 |
+
>>> pd.arrays.IntervalArray._from_factorized(uniques, interv_arr)
|
384 |
+
<IntervalArray>
|
385 |
+
[(0, 1], (1, 5]]
|
386 |
+
Length: 2, dtype: interval[int64, right]
|
387 |
+
"""
|
388 |
+
raise AbstractMethodError(cls)
|
389 |
+
|
390 |
+
# ------------------------------------------------------------------------
|
391 |
+
# Must be a Sequence
|
392 |
+
# ------------------------------------------------------------------------
|
393 |
+
@overload
|
394 |
+
def __getitem__(self, item: ScalarIndexer) -> Any:
|
395 |
+
...
|
396 |
+
|
397 |
+
@overload
|
398 |
+
def __getitem__(self, item: SequenceIndexer) -> Self:
|
399 |
+
...
|
400 |
+
|
401 |
+
def __getitem__(self, item: PositionalIndexer) -> Self | Any:
|
402 |
+
"""
|
403 |
+
Select a subset of self.
|
404 |
+
|
405 |
+
Parameters
|
406 |
+
----------
|
407 |
+
item : int, slice, or ndarray
|
408 |
+
* int: The position in 'self' to get.
|
409 |
+
|
410 |
+
* slice: A slice object, where 'start', 'stop', and 'step' are
|
411 |
+
integers or None
|
412 |
+
|
413 |
+
* ndarray: A 1-d boolean NumPy ndarray the same length as 'self'
|
414 |
+
|
415 |
+
* list[int]: A list of int
|
416 |
+
|
417 |
+
Returns
|
418 |
+
-------
|
419 |
+
item : scalar or ExtensionArray
|
420 |
+
|
421 |
+
Notes
|
422 |
+
-----
|
423 |
+
For scalar ``item``, return a scalar value suitable for the array's
|
424 |
+
type. This should be an instance of ``self.dtype.type``.
|
425 |
+
|
426 |
+
For slice ``key``, return an instance of ``ExtensionArray``, even
|
427 |
+
if the slice is length 0 or 1.
|
428 |
+
|
429 |
+
For a boolean mask, return an instance of ``ExtensionArray``, filtered
|
430 |
+
to the values where ``item`` is True.
|
431 |
+
"""
|
432 |
+
raise AbstractMethodError(self)
|
433 |
+
|
434 |
+
def __setitem__(self, key, value) -> None:
|
435 |
+
"""
|
436 |
+
Set one or more values inplace.
|
437 |
+
|
438 |
+
This method is not required to satisfy the pandas extension array
|
439 |
+
interface.
|
440 |
+
|
441 |
+
Parameters
|
442 |
+
----------
|
443 |
+
key : int, ndarray, or slice
|
444 |
+
When called from, e.g. ``Series.__setitem__``, ``key`` will be
|
445 |
+
one of
|
446 |
+
|
447 |
+
* scalar int
|
448 |
+
* ndarray of integers.
|
449 |
+
* boolean ndarray
|
450 |
+
* slice object
|
451 |
+
|
452 |
+
value : ExtensionDtype.type, Sequence[ExtensionDtype.type], or object
|
453 |
+
value or values to be set of ``key``.
|
454 |
+
|
455 |
+
Returns
|
456 |
+
-------
|
457 |
+
None
|
458 |
+
"""
|
459 |
+
# Some notes to the ExtensionArray implementer who may have ended up
|
460 |
+
# here. While this method is not required for the interface, if you
|
461 |
+
# *do* choose to implement __setitem__, then some semantics should be
|
462 |
+
# observed:
|
463 |
+
#
|
464 |
+
# * Setting multiple values : ExtensionArrays should support setting
|
465 |
+
# multiple values at once, 'key' will be a sequence of integers and
|
466 |
+
# 'value' will be a same-length sequence.
|
467 |
+
#
|
468 |
+
# * Broadcasting : For a sequence 'key' and a scalar 'value',
|
469 |
+
# each position in 'key' should be set to 'value'.
|
470 |
+
#
|
471 |
+
# * Coercion : Most users will expect basic coercion to work. For
|
472 |
+
# example, a string like '2018-01-01' is coerced to a datetime
|
473 |
+
# when setting on a datetime64ns array. In general, if the
|
474 |
+
# __init__ method coerces that value, then so should __setitem__
|
475 |
+
# Note, also, that Series/DataFrame.where internally use __setitem__
|
476 |
+
# on a copy of the data.
|
477 |
+
raise NotImplementedError(f"{type(self)} does not implement __setitem__.")
|
478 |
+
|
479 |
+
def __len__(self) -> int:
|
480 |
+
"""
|
481 |
+
Length of this array
|
482 |
+
|
483 |
+
Returns
|
484 |
+
-------
|
485 |
+
length : int
|
486 |
+
"""
|
487 |
+
raise AbstractMethodError(self)
|
488 |
+
|
489 |
+
def __iter__(self) -> Iterator[Any]:
|
490 |
+
"""
|
491 |
+
Iterate over elements of the array.
|
492 |
+
"""
|
493 |
+
# This needs to be implemented so that pandas recognizes extension
|
494 |
+
# arrays as list-like. The default implementation makes successive
|
495 |
+
# calls to ``__getitem__``, which may be slower than necessary.
|
496 |
+
for i in range(len(self)):
|
497 |
+
yield self[i]
|
498 |
+
|
499 |
+
def __contains__(self, item: object) -> bool | np.bool_:
|
500 |
+
"""
|
501 |
+
Return for `item in self`.
|
502 |
+
"""
|
503 |
+
# GH37867
|
504 |
+
# comparisons of any item to pd.NA always return pd.NA, so e.g. "a" in [pd.NA]
|
505 |
+
# would raise a TypeError. The implementation below works around that.
|
506 |
+
if is_scalar(item) and isna(item):
|
507 |
+
if not self._can_hold_na:
|
508 |
+
return False
|
509 |
+
elif item is self.dtype.na_value or isinstance(item, self.dtype.type):
|
510 |
+
return self._hasna
|
511 |
+
else:
|
512 |
+
return False
|
513 |
+
else:
|
514 |
+
# error: Item "ExtensionArray" of "Union[ExtensionArray, ndarray]" has no
|
515 |
+
# attribute "any"
|
516 |
+
return (item == self).any() # type: ignore[union-attr]
|
517 |
+
|
518 |
+
# error: Signature of "__eq__" incompatible with supertype "object"
|
519 |
+
def __eq__(self, other: object) -> ArrayLike: # type: ignore[override]
|
520 |
+
"""
|
521 |
+
Return for `self == other` (element-wise equality).
|
522 |
+
"""
|
523 |
+
# Implementer note: this should return a boolean numpy ndarray or
|
524 |
+
# a boolean ExtensionArray.
|
525 |
+
# When `other` is one of Series, Index, or DataFrame, this method should
|
526 |
+
# return NotImplemented (to ensure that those objects are responsible for
|
527 |
+
# first unpacking the arrays, and then dispatch the operation to the
|
528 |
+
# underlying arrays)
|
529 |
+
raise AbstractMethodError(self)
|
530 |
+
|
531 |
+
# error: Signature of "__ne__" incompatible with supertype "object"
|
532 |
+
def __ne__(self, other: object) -> ArrayLike: # type: ignore[override]
|
533 |
+
"""
|
534 |
+
Return for `self != other` (element-wise in-equality).
|
535 |
+
"""
|
536 |
+
# error: Unsupported operand type for ~ ("ExtensionArray")
|
537 |
+
return ~(self == other) # type: ignore[operator]
|
538 |
+
|
539 |
+
def to_numpy(
|
540 |
+
self,
|
541 |
+
dtype: npt.DTypeLike | None = None,
|
542 |
+
copy: bool = False,
|
543 |
+
na_value: object = lib.no_default,
|
544 |
+
) -> np.ndarray:
|
545 |
+
"""
|
546 |
+
Convert to a NumPy ndarray.
|
547 |
+
|
548 |
+
This is similar to :meth:`numpy.asarray`, but may provide additional control
|
549 |
+
over how the conversion is done.
|
550 |
+
|
551 |
+
Parameters
|
552 |
+
----------
|
553 |
+
dtype : str or numpy.dtype, optional
|
554 |
+
The dtype to pass to :meth:`numpy.asarray`.
|
555 |
+
copy : bool, default False
|
556 |
+
Whether to ensure that the returned value is a not a view on
|
557 |
+
another array. Note that ``copy=False`` does not *ensure* that
|
558 |
+
``to_numpy()`` is no-copy. Rather, ``copy=True`` ensure that
|
559 |
+
a copy is made, even if not strictly necessary.
|
560 |
+
na_value : Any, optional
|
561 |
+
The value to use for missing values. The default value depends
|
562 |
+
on `dtype` and the type of the array.
|
563 |
+
|
564 |
+
Returns
|
565 |
+
-------
|
566 |
+
numpy.ndarray
|
567 |
+
"""
|
568 |
+
result = np.asarray(self, dtype=dtype)
|
569 |
+
if copy or na_value is not lib.no_default:
|
570 |
+
result = result.copy()
|
571 |
+
if na_value is not lib.no_default:
|
572 |
+
result[self.isna()] = na_value
|
573 |
+
return result
|
574 |
+
|
575 |
+
# ------------------------------------------------------------------------
|
576 |
+
# Required attributes
|
577 |
+
# ------------------------------------------------------------------------
|
578 |
+
|
579 |
+
@property
|
580 |
+
def dtype(self) -> ExtensionDtype:
|
581 |
+
"""
|
582 |
+
An instance of ExtensionDtype.
|
583 |
+
|
584 |
+
Examples
|
585 |
+
--------
|
586 |
+
>>> pd.array([1, 2, 3]).dtype
|
587 |
+
Int64Dtype()
|
588 |
+
"""
|
589 |
+
raise AbstractMethodError(self)
|
590 |
+
|
591 |
+
@property
|
592 |
+
def shape(self) -> Shape:
|
593 |
+
"""
|
594 |
+
Return a tuple of the array dimensions.
|
595 |
+
|
596 |
+
Examples
|
597 |
+
--------
|
598 |
+
>>> arr = pd.array([1, 2, 3])
|
599 |
+
>>> arr.shape
|
600 |
+
(3,)
|
601 |
+
"""
|
602 |
+
return (len(self),)
|
603 |
+
|
604 |
+
@property
|
605 |
+
def size(self) -> int:
|
606 |
+
"""
|
607 |
+
The number of elements in the array.
|
608 |
+
"""
|
609 |
+
# error: Incompatible return value type (got "signedinteger[_64Bit]",
|
610 |
+
# expected "int") [return-value]
|
611 |
+
return np.prod(self.shape) # type: ignore[return-value]
|
612 |
+
|
613 |
+
@property
|
614 |
+
def ndim(self) -> int:
|
615 |
+
"""
|
616 |
+
Extension Arrays are only allowed to be 1-dimensional.
|
617 |
+
|
618 |
+
Examples
|
619 |
+
--------
|
620 |
+
>>> arr = pd.array([1, 2, 3])
|
621 |
+
>>> arr.ndim
|
622 |
+
1
|
623 |
+
"""
|
624 |
+
return 1
|
625 |
+
|
626 |
+
@property
|
627 |
+
def nbytes(self) -> int:
|
628 |
+
"""
|
629 |
+
The number of bytes needed to store this object in memory.
|
630 |
+
|
631 |
+
Examples
|
632 |
+
--------
|
633 |
+
>>> pd.array([1, 2, 3]).nbytes
|
634 |
+
27
|
635 |
+
"""
|
636 |
+
# If this is expensive to compute, return an approximate lower bound
|
637 |
+
# on the number of bytes needed.
|
638 |
+
raise AbstractMethodError(self)
|
639 |
+
|
640 |
+
# ------------------------------------------------------------------------
|
641 |
+
# Additional Methods
|
642 |
+
# ------------------------------------------------------------------------
|
643 |
+
|
644 |
+
@overload
|
645 |
+
def astype(self, dtype: npt.DTypeLike, copy: bool = ...) -> np.ndarray:
|
646 |
+
...
|
647 |
+
|
648 |
+
@overload
|
649 |
+
def astype(self, dtype: ExtensionDtype, copy: bool = ...) -> ExtensionArray:
|
650 |
+
...
|
651 |
+
|
652 |
+
@overload
|
653 |
+
def astype(self, dtype: AstypeArg, copy: bool = ...) -> ArrayLike:
|
654 |
+
...
|
655 |
+
|
656 |
+
def astype(self, dtype: AstypeArg, copy: bool = True) -> ArrayLike:
|
657 |
+
"""
|
658 |
+
Cast to a NumPy array or ExtensionArray with 'dtype'.
|
659 |
+
|
660 |
+
Parameters
|
661 |
+
----------
|
662 |
+
dtype : str or dtype
|
663 |
+
Typecode or data-type to which the array is cast.
|
664 |
+
copy : bool, default True
|
665 |
+
Whether to copy the data, even if not necessary. If False,
|
666 |
+
a copy is made only if the old dtype does not match the
|
667 |
+
new dtype.
|
668 |
+
|
669 |
+
Returns
|
670 |
+
-------
|
671 |
+
np.ndarray or pandas.api.extensions.ExtensionArray
|
672 |
+
An ``ExtensionArray`` if ``dtype`` is ``ExtensionDtype``,
|
673 |
+
otherwise a Numpy ndarray with ``dtype`` for its dtype.
|
674 |
+
|
675 |
+
Examples
|
676 |
+
--------
|
677 |
+
>>> arr = pd.array([1, 2, 3])
|
678 |
+
>>> arr
|
679 |
+
<IntegerArray>
|
680 |
+
[1, 2, 3]
|
681 |
+
Length: 3, dtype: Int64
|
682 |
+
|
683 |
+
Casting to another ``ExtensionDtype`` returns an ``ExtensionArray``:
|
684 |
+
|
685 |
+
>>> arr1 = arr.astype('Float64')
|
686 |
+
>>> arr1
|
687 |
+
<FloatingArray>
|
688 |
+
[1.0, 2.0, 3.0]
|
689 |
+
Length: 3, dtype: Float64
|
690 |
+
>>> arr1.dtype
|
691 |
+
Float64Dtype()
|
692 |
+
|
693 |
+
Otherwise, we will get a Numpy ndarray:
|
694 |
+
|
695 |
+
>>> arr2 = arr.astype('float64')
|
696 |
+
>>> arr2
|
697 |
+
array([1., 2., 3.])
|
698 |
+
>>> arr2.dtype
|
699 |
+
dtype('float64')
|
700 |
+
"""
|
701 |
+
dtype = pandas_dtype(dtype)
|
702 |
+
if dtype == self.dtype:
|
703 |
+
if not copy:
|
704 |
+
return self
|
705 |
+
else:
|
706 |
+
return self.copy()
|
707 |
+
|
708 |
+
if isinstance(dtype, ExtensionDtype):
|
709 |
+
cls = dtype.construct_array_type()
|
710 |
+
return cls._from_sequence(self, dtype=dtype, copy=copy)
|
711 |
+
|
712 |
+
elif lib.is_np_dtype(dtype, "M"):
|
713 |
+
from pandas.core.arrays import DatetimeArray
|
714 |
+
|
715 |
+
return DatetimeArray._from_sequence(self, dtype=dtype, copy=copy)
|
716 |
+
|
717 |
+
elif lib.is_np_dtype(dtype, "m"):
|
718 |
+
from pandas.core.arrays import TimedeltaArray
|
719 |
+
|
720 |
+
return TimedeltaArray._from_sequence(self, dtype=dtype, copy=copy)
|
721 |
+
|
722 |
+
if not copy:
|
723 |
+
return np.asarray(self, dtype=dtype)
|
724 |
+
else:
|
725 |
+
return np.array(self, dtype=dtype, copy=copy)
|
726 |
+
|
727 |
+
def isna(self) -> np.ndarray | ExtensionArraySupportsAnyAll:
|
728 |
+
"""
|
729 |
+
A 1-D array indicating if each value is missing.
|
730 |
+
|
731 |
+
Returns
|
732 |
+
-------
|
733 |
+
numpy.ndarray or pandas.api.extensions.ExtensionArray
|
734 |
+
In most cases, this should return a NumPy ndarray. For
|
735 |
+
exceptional cases like ``SparseArray``, where returning
|
736 |
+
an ndarray would be expensive, an ExtensionArray may be
|
737 |
+
returned.
|
738 |
+
|
739 |
+
Notes
|
740 |
+
-----
|
741 |
+
If returning an ExtensionArray, then
|
742 |
+
|
743 |
+
* ``na_values._is_boolean`` should be True
|
744 |
+
* `na_values` should implement :func:`ExtensionArray._reduce`
|
745 |
+
* ``na_values.any`` and ``na_values.all`` should be implemented
|
746 |
+
|
747 |
+
Examples
|
748 |
+
--------
|
749 |
+
>>> arr = pd.array([1, 2, np.nan, np.nan])
|
750 |
+
>>> arr.isna()
|
751 |
+
array([False, False, True, True])
|
752 |
+
"""
|
753 |
+
raise AbstractMethodError(self)
|
754 |
+
|
755 |
+
@property
|
756 |
+
def _hasna(self) -> bool:
|
757 |
+
# GH#22680
|
758 |
+
"""
|
759 |
+
Equivalent to `self.isna().any()`.
|
760 |
+
|
761 |
+
Some ExtensionArray subclasses may be able to optimize this check.
|
762 |
+
"""
|
763 |
+
return bool(self.isna().any())
|
764 |
+
|
765 |
+
def _values_for_argsort(self) -> np.ndarray:
|
766 |
+
"""
|
767 |
+
Return values for sorting.
|
768 |
+
|
769 |
+
Returns
|
770 |
+
-------
|
771 |
+
ndarray
|
772 |
+
The transformed values should maintain the ordering between values
|
773 |
+
within the array.
|
774 |
+
|
775 |
+
See Also
|
776 |
+
--------
|
777 |
+
ExtensionArray.argsort : Return the indices that would sort this array.
|
778 |
+
|
779 |
+
Notes
|
780 |
+
-----
|
781 |
+
The caller is responsible for *not* modifying these values in-place, so
|
782 |
+
it is safe for implementers to give views on ``self``.
|
783 |
+
|
784 |
+
Functions that use this (e.g. ``ExtensionArray.argsort``) should ignore
|
785 |
+
entries with missing values in the original array (according to
|
786 |
+
``self.isna()``). This means that the corresponding entries in the returned
|
787 |
+
array don't need to be modified to sort correctly.
|
788 |
+
|
789 |
+
Examples
|
790 |
+
--------
|
791 |
+
In most cases, this is the underlying Numpy array of the ``ExtensionArray``:
|
792 |
+
|
793 |
+
>>> arr = pd.array([1, 2, 3])
|
794 |
+
>>> arr._values_for_argsort()
|
795 |
+
array([1, 2, 3])
|
796 |
+
"""
|
797 |
+
# Note: this is used in `ExtensionArray.argsort/argmin/argmax`.
|
798 |
+
return np.array(self)
|
799 |
+
|
800 |
+
def argsort(
|
801 |
+
self,
|
802 |
+
*,
|
803 |
+
ascending: bool = True,
|
804 |
+
kind: SortKind = "quicksort",
|
805 |
+
na_position: str = "last",
|
806 |
+
**kwargs,
|
807 |
+
) -> np.ndarray:
|
808 |
+
"""
|
809 |
+
Return the indices that would sort this array.
|
810 |
+
|
811 |
+
Parameters
|
812 |
+
----------
|
813 |
+
ascending : bool, default True
|
814 |
+
Whether the indices should result in an ascending
|
815 |
+
or descending sort.
|
816 |
+
kind : {'quicksort', 'mergesort', 'heapsort', 'stable'}, optional
|
817 |
+
Sorting algorithm.
|
818 |
+
na_position : {'first', 'last'}, default 'last'
|
819 |
+
If ``'first'``, put ``NaN`` values at the beginning.
|
820 |
+
If ``'last'``, put ``NaN`` values at the end.
|
821 |
+
*args, **kwargs:
|
822 |
+
Passed through to :func:`numpy.argsort`.
|
823 |
+
|
824 |
+
Returns
|
825 |
+
-------
|
826 |
+
np.ndarray[np.intp]
|
827 |
+
Array of indices that sort ``self``. If NaN values are contained,
|
828 |
+
NaN values are placed at the end.
|
829 |
+
|
830 |
+
See Also
|
831 |
+
--------
|
832 |
+
numpy.argsort : Sorting implementation used internally.
|
833 |
+
|
834 |
+
Examples
|
835 |
+
--------
|
836 |
+
>>> arr = pd.array([3, 1, 2, 5, 4])
|
837 |
+
>>> arr.argsort()
|
838 |
+
array([1, 2, 0, 4, 3])
|
839 |
+
"""
|
840 |
+
# Implementer note: You have two places to override the behavior of
|
841 |
+
# argsort.
|
842 |
+
# 1. _values_for_argsort : construct the values passed to np.argsort
|
843 |
+
# 2. argsort : total control over sorting. In case of overriding this,
|
844 |
+
# it is recommended to also override argmax/argmin
|
845 |
+
ascending = nv.validate_argsort_with_ascending(ascending, (), kwargs)
|
846 |
+
|
847 |
+
values = self._values_for_argsort()
|
848 |
+
return nargsort(
|
849 |
+
values,
|
850 |
+
kind=kind,
|
851 |
+
ascending=ascending,
|
852 |
+
na_position=na_position,
|
853 |
+
mask=np.asarray(self.isna()),
|
854 |
+
)
|
855 |
+
|
856 |
+
def argmin(self, skipna: bool = True) -> int:
|
857 |
+
"""
|
858 |
+
Return the index of minimum value.
|
859 |
+
|
860 |
+
In case of multiple occurrences of the minimum value, the index
|
861 |
+
corresponding to the first occurrence is returned.
|
862 |
+
|
863 |
+
Parameters
|
864 |
+
----------
|
865 |
+
skipna : bool, default True
|
866 |
+
|
867 |
+
Returns
|
868 |
+
-------
|
869 |
+
int
|
870 |
+
|
871 |
+
See Also
|
872 |
+
--------
|
873 |
+
ExtensionArray.argmax : Return the index of the maximum value.
|
874 |
+
|
875 |
+
Examples
|
876 |
+
--------
|
877 |
+
>>> arr = pd.array([3, 1, 2, 5, 4])
|
878 |
+
>>> arr.argmin()
|
879 |
+
1
|
880 |
+
"""
|
881 |
+
# Implementer note: You have two places to override the behavior of
|
882 |
+
# argmin.
|
883 |
+
# 1. _values_for_argsort : construct the values used in nargminmax
|
884 |
+
# 2. argmin itself : total control over sorting.
|
885 |
+
validate_bool_kwarg(skipna, "skipna")
|
886 |
+
if not skipna and self._hasna:
|
887 |
+
raise NotImplementedError
|
888 |
+
return nargminmax(self, "argmin")
|
889 |
+
|
890 |
+
def argmax(self, skipna: bool = True) -> int:
|
891 |
+
"""
|
892 |
+
Return the index of maximum value.
|
893 |
+
|
894 |
+
In case of multiple occurrences of the maximum value, the index
|
895 |
+
corresponding to the first occurrence is returned.
|
896 |
+
|
897 |
+
Parameters
|
898 |
+
----------
|
899 |
+
skipna : bool, default True
|
900 |
+
|
901 |
+
Returns
|
902 |
+
-------
|
903 |
+
int
|
904 |
+
|
905 |
+
See Also
|
906 |
+
--------
|
907 |
+
ExtensionArray.argmin : Return the index of the minimum value.
|
908 |
+
|
909 |
+
Examples
|
910 |
+
--------
|
911 |
+
>>> arr = pd.array([3, 1, 2, 5, 4])
|
912 |
+
>>> arr.argmax()
|
913 |
+
3
|
914 |
+
"""
|
915 |
+
# Implementer note: You have two places to override the behavior of
|
916 |
+
# argmax.
|
917 |
+
# 1. _values_for_argsort : construct the values used in nargminmax
|
918 |
+
# 2. argmax itself : total control over sorting.
|
919 |
+
validate_bool_kwarg(skipna, "skipna")
|
920 |
+
if not skipna and self._hasna:
|
921 |
+
raise NotImplementedError
|
922 |
+
return nargminmax(self, "argmax")
|
923 |
+
|
924 |
+
def interpolate(
|
925 |
+
self,
|
926 |
+
*,
|
927 |
+
method: InterpolateOptions,
|
928 |
+
axis: int,
|
929 |
+
index: Index,
|
930 |
+
limit,
|
931 |
+
limit_direction,
|
932 |
+
limit_area,
|
933 |
+
copy: bool,
|
934 |
+
**kwargs,
|
935 |
+
) -> Self:
|
936 |
+
"""
|
937 |
+
See DataFrame.interpolate.__doc__.
|
938 |
+
|
939 |
+
Examples
|
940 |
+
--------
|
941 |
+
>>> arr = pd.arrays.NumpyExtensionArray(np.array([0, 1, np.nan, 3]))
|
942 |
+
>>> arr.interpolate(method="linear",
|
943 |
+
... limit=3,
|
944 |
+
... limit_direction="forward",
|
945 |
+
... index=pd.Index([1, 2, 3, 4]),
|
946 |
+
... fill_value=1,
|
947 |
+
... copy=False,
|
948 |
+
... axis=0,
|
949 |
+
... limit_area="inside"
|
950 |
+
... )
|
951 |
+
<NumpyExtensionArray>
|
952 |
+
[0.0, 1.0, 2.0, 3.0]
|
953 |
+
Length: 4, dtype: float64
|
954 |
+
"""
|
955 |
+
# NB: we return type(self) even if copy=False
|
956 |
+
raise NotImplementedError(
|
957 |
+
f"{type(self).__name__} does not implement interpolate"
|
958 |
+
)
|
959 |
+
|
960 |
+
def _pad_or_backfill(
|
961 |
+
self,
|
962 |
+
*,
|
963 |
+
method: FillnaOptions,
|
964 |
+
limit: int | None = None,
|
965 |
+
limit_area: Literal["inside", "outside"] | None = None,
|
966 |
+
copy: bool = True,
|
967 |
+
) -> Self:
|
968 |
+
"""
|
969 |
+
Pad or backfill values, used by Series/DataFrame ffill and bfill.
|
970 |
+
|
971 |
+
Parameters
|
972 |
+
----------
|
973 |
+
method : {'backfill', 'bfill', 'pad', 'ffill'}
|
974 |
+
Method to use for filling holes in reindexed Series:
|
975 |
+
|
976 |
+
* pad / ffill: propagate last valid observation forward to next valid.
|
977 |
+
* backfill / bfill: use NEXT valid observation to fill gap.
|
978 |
+
|
979 |
+
limit : int, default None
|
980 |
+
This is the maximum number of consecutive
|
981 |
+
NaN values to forward/backward fill. In other words, if there is
|
982 |
+
a gap with more than this number of consecutive NaNs, it will only
|
983 |
+
be partially filled. If method is not specified, this is the
|
984 |
+
maximum number of entries along the entire axis where NaNs will be
|
985 |
+
filled.
|
986 |
+
|
987 |
+
copy : bool, default True
|
988 |
+
Whether to make a copy of the data before filling. If False, then
|
989 |
+
the original should be modified and no new memory should be allocated.
|
990 |
+
For ExtensionArray subclasses that cannot do this, it is at the
|
991 |
+
author's discretion whether to ignore "copy=False" or to raise.
|
992 |
+
The base class implementation ignores the keyword if any NAs are
|
993 |
+
present.
|
994 |
+
|
995 |
+
Returns
|
996 |
+
-------
|
997 |
+
Same type as self
|
998 |
+
|
999 |
+
Examples
|
1000 |
+
--------
|
1001 |
+
>>> arr = pd.array([np.nan, np.nan, 2, 3, np.nan, np.nan])
|
1002 |
+
>>> arr._pad_or_backfill(method="backfill", limit=1)
|
1003 |
+
<IntegerArray>
|
1004 |
+
[<NA>, 2, 2, 3, <NA>, <NA>]
|
1005 |
+
Length: 6, dtype: Int64
|
1006 |
+
"""
|
1007 |
+
|
1008 |
+
# If a 3rd-party EA has implemented this functionality in fillna,
|
1009 |
+
# we warn that they need to implement _pad_or_backfill instead.
|
1010 |
+
if (
|
1011 |
+
type(self).fillna is not ExtensionArray.fillna
|
1012 |
+
and type(self)._pad_or_backfill is ExtensionArray._pad_or_backfill
|
1013 |
+
):
|
1014 |
+
# Check for _pad_or_backfill here allows us to call
|
1015 |
+
# super()._pad_or_backfill without getting this warning
|
1016 |
+
warnings.warn(
|
1017 |
+
"ExtensionArray.fillna 'method' keyword is deprecated. "
|
1018 |
+
"In a future version. arr._pad_or_backfill will be called "
|
1019 |
+
"instead. 3rd-party ExtensionArray authors need to implement "
|
1020 |
+
"_pad_or_backfill.",
|
1021 |
+
DeprecationWarning,
|
1022 |
+
stacklevel=find_stack_level(),
|
1023 |
+
)
|
1024 |
+
if limit_area is not None:
|
1025 |
+
raise NotImplementedError(
|
1026 |
+
f"{type(self).__name__} does not implement limit_area "
|
1027 |
+
"(added in pandas 2.2). 3rd-party ExtnsionArray authors "
|
1028 |
+
"need to add this argument to _pad_or_backfill."
|
1029 |
+
)
|
1030 |
+
return self.fillna(method=method, limit=limit)
|
1031 |
+
|
1032 |
+
mask = self.isna()
|
1033 |
+
|
1034 |
+
if mask.any():
|
1035 |
+
# NB: the base class does not respect the "copy" keyword
|
1036 |
+
meth = missing.clean_fill_method(method)
|
1037 |
+
|
1038 |
+
npmask = np.asarray(mask)
|
1039 |
+
if limit_area is not None and not npmask.all():
|
1040 |
+
_fill_limit_area_1d(npmask, limit_area)
|
1041 |
+
if meth == "pad":
|
1042 |
+
indexer = libalgos.get_fill_indexer(npmask, limit=limit)
|
1043 |
+
return self.take(indexer, allow_fill=True)
|
1044 |
+
else:
|
1045 |
+
# i.e. meth == "backfill"
|
1046 |
+
indexer = libalgos.get_fill_indexer(npmask[::-1], limit=limit)[::-1]
|
1047 |
+
return self[::-1].take(indexer, allow_fill=True)
|
1048 |
+
|
1049 |
+
else:
|
1050 |
+
if not copy:
|
1051 |
+
return self
|
1052 |
+
new_values = self.copy()
|
1053 |
+
return new_values
|
1054 |
+
|
1055 |
+
def fillna(
|
1056 |
+
self,
|
1057 |
+
value: object | ArrayLike | None = None,
|
1058 |
+
method: FillnaOptions | None = None,
|
1059 |
+
limit: int | None = None,
|
1060 |
+
copy: bool = True,
|
1061 |
+
) -> Self:
|
1062 |
+
"""
|
1063 |
+
Fill NA/NaN values using the specified method.
|
1064 |
+
|
1065 |
+
Parameters
|
1066 |
+
----------
|
1067 |
+
value : scalar, array-like
|
1068 |
+
If a scalar value is passed it is used to fill all missing values.
|
1069 |
+
Alternatively, an array-like "value" can be given. It's expected
|
1070 |
+
that the array-like have the same length as 'self'.
|
1071 |
+
method : {'backfill', 'bfill', 'pad', 'ffill', None}, default None
|
1072 |
+
Method to use for filling holes in reindexed Series:
|
1073 |
+
|
1074 |
+
* pad / ffill: propagate last valid observation forward to next valid.
|
1075 |
+
* backfill / bfill: use NEXT valid observation to fill gap.
|
1076 |
+
|
1077 |
+
.. deprecated:: 2.1.0
|
1078 |
+
|
1079 |
+
limit : int, default None
|
1080 |
+
If method is specified, this is the maximum number of consecutive
|
1081 |
+
NaN values to forward/backward fill. In other words, if there is
|
1082 |
+
a gap with more than this number of consecutive NaNs, it will only
|
1083 |
+
be partially filled. If method is not specified, this is the
|
1084 |
+
maximum number of entries along the entire axis where NaNs will be
|
1085 |
+
filled.
|
1086 |
+
|
1087 |
+
.. deprecated:: 2.1.0
|
1088 |
+
|
1089 |
+
copy : bool, default True
|
1090 |
+
Whether to make a copy of the data before filling. If False, then
|
1091 |
+
the original should be modified and no new memory should be allocated.
|
1092 |
+
For ExtensionArray subclasses that cannot do this, it is at the
|
1093 |
+
author's discretion whether to ignore "copy=False" or to raise.
|
1094 |
+
The base class implementation ignores the keyword in pad/backfill
|
1095 |
+
cases.
|
1096 |
+
|
1097 |
+
Returns
|
1098 |
+
-------
|
1099 |
+
ExtensionArray
|
1100 |
+
With NA/NaN filled.
|
1101 |
+
|
1102 |
+
Examples
|
1103 |
+
--------
|
1104 |
+
>>> arr = pd.array([np.nan, np.nan, 2, 3, np.nan, np.nan])
|
1105 |
+
>>> arr.fillna(0)
|
1106 |
+
<IntegerArray>
|
1107 |
+
[0, 0, 2, 3, 0, 0]
|
1108 |
+
Length: 6, dtype: Int64
|
1109 |
+
"""
|
1110 |
+
if method is not None:
|
1111 |
+
warnings.warn(
|
1112 |
+
f"The 'method' keyword in {type(self).__name__}.fillna is "
|
1113 |
+
"deprecated and will be removed in a future version.",
|
1114 |
+
FutureWarning,
|
1115 |
+
stacklevel=find_stack_level(),
|
1116 |
+
)
|
1117 |
+
|
1118 |
+
value, method = validate_fillna_kwargs(value, method)
|
1119 |
+
|
1120 |
+
mask = self.isna()
|
1121 |
+
# error: Argument 2 to "check_value_size" has incompatible type
|
1122 |
+
# "ExtensionArray"; expected "ndarray"
|
1123 |
+
value = missing.check_value_size(
|
1124 |
+
value, mask, len(self) # type: ignore[arg-type]
|
1125 |
+
)
|
1126 |
+
|
1127 |
+
if mask.any():
|
1128 |
+
if method is not None:
|
1129 |
+
meth = missing.clean_fill_method(method)
|
1130 |
+
|
1131 |
+
npmask = np.asarray(mask)
|
1132 |
+
if meth == "pad":
|
1133 |
+
indexer = libalgos.get_fill_indexer(npmask, limit=limit)
|
1134 |
+
return self.take(indexer, allow_fill=True)
|
1135 |
+
else:
|
1136 |
+
# i.e. meth == "backfill"
|
1137 |
+
indexer = libalgos.get_fill_indexer(npmask[::-1], limit=limit)[::-1]
|
1138 |
+
return self[::-1].take(indexer, allow_fill=True)
|
1139 |
+
else:
|
1140 |
+
# fill with value
|
1141 |
+
if not copy:
|
1142 |
+
new_values = self[:]
|
1143 |
+
else:
|
1144 |
+
new_values = self.copy()
|
1145 |
+
new_values[mask] = value
|
1146 |
+
else:
|
1147 |
+
if not copy:
|
1148 |
+
new_values = self[:]
|
1149 |
+
else:
|
1150 |
+
new_values = self.copy()
|
1151 |
+
return new_values
|
1152 |
+
|
1153 |
+
def dropna(self) -> Self:
|
1154 |
+
"""
|
1155 |
+
Return ExtensionArray without NA values.
|
1156 |
+
|
1157 |
+
Returns
|
1158 |
+
-------
|
1159 |
+
|
1160 |
+
Examples
|
1161 |
+
--------
|
1162 |
+
>>> pd.array([1, 2, np.nan]).dropna()
|
1163 |
+
<IntegerArray>
|
1164 |
+
[1, 2]
|
1165 |
+
Length: 2, dtype: Int64
|
1166 |
+
"""
|
1167 |
+
# error: Unsupported operand type for ~ ("ExtensionArray")
|
1168 |
+
return self[~self.isna()] # type: ignore[operator]
|
1169 |
+
|
1170 |
+
def duplicated(
|
1171 |
+
self, keep: Literal["first", "last", False] = "first"
|
1172 |
+
) -> npt.NDArray[np.bool_]:
|
1173 |
+
"""
|
1174 |
+
Return boolean ndarray denoting duplicate values.
|
1175 |
+
|
1176 |
+
Parameters
|
1177 |
+
----------
|
1178 |
+
keep : {'first', 'last', False}, default 'first'
|
1179 |
+
- ``first`` : Mark duplicates as ``True`` except for the first occurrence.
|
1180 |
+
- ``last`` : Mark duplicates as ``True`` except for the last occurrence.
|
1181 |
+
- False : Mark all duplicates as ``True``.
|
1182 |
+
|
1183 |
+
Returns
|
1184 |
+
-------
|
1185 |
+
ndarray[bool]
|
1186 |
+
|
1187 |
+
Examples
|
1188 |
+
--------
|
1189 |
+
>>> pd.array([1, 1, 2, 3, 3], dtype="Int64").duplicated()
|
1190 |
+
array([False, True, False, False, True])
|
1191 |
+
"""
|
1192 |
+
mask = self.isna().astype(np.bool_, copy=False)
|
1193 |
+
return duplicated(values=self, keep=keep, mask=mask)
|
1194 |
+
|
1195 |
+
def shift(self, periods: int = 1, fill_value: object = None) -> ExtensionArray:
|
1196 |
+
"""
|
1197 |
+
Shift values by desired number.
|
1198 |
+
|
1199 |
+
Newly introduced missing values are filled with
|
1200 |
+
``self.dtype.na_value``.
|
1201 |
+
|
1202 |
+
Parameters
|
1203 |
+
----------
|
1204 |
+
periods : int, default 1
|
1205 |
+
The number of periods to shift. Negative values are allowed
|
1206 |
+
for shifting backwards.
|
1207 |
+
|
1208 |
+
fill_value : object, optional
|
1209 |
+
The scalar value to use for newly introduced missing values.
|
1210 |
+
The default is ``self.dtype.na_value``.
|
1211 |
+
|
1212 |
+
Returns
|
1213 |
+
-------
|
1214 |
+
ExtensionArray
|
1215 |
+
Shifted.
|
1216 |
+
|
1217 |
+
Notes
|
1218 |
+
-----
|
1219 |
+
If ``self`` is empty or ``periods`` is 0, a copy of ``self`` is
|
1220 |
+
returned.
|
1221 |
+
|
1222 |
+
If ``periods > len(self)``, then an array of size
|
1223 |
+
len(self) is returned, with all values filled with
|
1224 |
+
``self.dtype.na_value``.
|
1225 |
+
|
1226 |
+
For 2-dimensional ExtensionArrays, we are always shifting along axis=0.
|
1227 |
+
|
1228 |
+
Examples
|
1229 |
+
--------
|
1230 |
+
>>> arr = pd.array([1, 2, 3])
|
1231 |
+
>>> arr.shift(2)
|
1232 |
+
<IntegerArray>
|
1233 |
+
[<NA>, <NA>, 1]
|
1234 |
+
Length: 3, dtype: Int64
|
1235 |
+
"""
|
1236 |
+
# Note: this implementation assumes that `self.dtype.na_value` can be
|
1237 |
+
# stored in an instance of your ExtensionArray with `self.dtype`.
|
1238 |
+
if not len(self) or periods == 0:
|
1239 |
+
return self.copy()
|
1240 |
+
|
1241 |
+
if isna(fill_value):
|
1242 |
+
fill_value = self.dtype.na_value
|
1243 |
+
|
1244 |
+
empty = self._from_sequence(
|
1245 |
+
[fill_value] * min(abs(periods), len(self)), dtype=self.dtype
|
1246 |
+
)
|
1247 |
+
if periods > 0:
|
1248 |
+
a = empty
|
1249 |
+
b = self[:-periods]
|
1250 |
+
else:
|
1251 |
+
a = self[abs(periods) :]
|
1252 |
+
b = empty
|
1253 |
+
return self._concat_same_type([a, b])
|
1254 |
+
|
1255 |
+
def unique(self) -> Self:
|
1256 |
+
"""
|
1257 |
+
Compute the ExtensionArray of unique values.
|
1258 |
+
|
1259 |
+
Returns
|
1260 |
+
-------
|
1261 |
+
pandas.api.extensions.ExtensionArray
|
1262 |
+
|
1263 |
+
Examples
|
1264 |
+
--------
|
1265 |
+
>>> arr = pd.array([1, 2, 3, 1, 2, 3])
|
1266 |
+
>>> arr.unique()
|
1267 |
+
<IntegerArray>
|
1268 |
+
[1, 2, 3]
|
1269 |
+
Length: 3, dtype: Int64
|
1270 |
+
"""
|
1271 |
+
uniques = unique(self.astype(object))
|
1272 |
+
return self._from_sequence(uniques, dtype=self.dtype)
|
1273 |
+
|
1274 |
+
def searchsorted(
|
1275 |
+
self,
|
1276 |
+
value: NumpyValueArrayLike | ExtensionArray,
|
1277 |
+
side: Literal["left", "right"] = "left",
|
1278 |
+
sorter: NumpySorter | None = None,
|
1279 |
+
) -> npt.NDArray[np.intp] | np.intp:
|
1280 |
+
"""
|
1281 |
+
Find indices where elements should be inserted to maintain order.
|
1282 |
+
|
1283 |
+
Find the indices into a sorted array `self` (a) such that, if the
|
1284 |
+
corresponding elements in `value` were inserted before the indices,
|
1285 |
+
the order of `self` would be preserved.
|
1286 |
+
|
1287 |
+
Assuming that `self` is sorted:
|
1288 |
+
|
1289 |
+
====== ================================
|
1290 |
+
`side` returned index `i` satisfies
|
1291 |
+
====== ================================
|
1292 |
+
left ``self[i-1] < value <= self[i]``
|
1293 |
+
right ``self[i-1] <= value < self[i]``
|
1294 |
+
====== ================================
|
1295 |
+
|
1296 |
+
Parameters
|
1297 |
+
----------
|
1298 |
+
value : array-like, list or scalar
|
1299 |
+
Value(s) to insert into `self`.
|
1300 |
+
side : {'left', 'right'}, optional
|
1301 |
+
If 'left', the index of the first suitable location found is given.
|
1302 |
+
If 'right', return the last such index. If there is no suitable
|
1303 |
+
index, return either 0 or N (where N is the length of `self`).
|
1304 |
+
sorter : 1-D array-like, optional
|
1305 |
+
Optional array of integer indices that sort array a into ascending
|
1306 |
+
order. They are typically the result of argsort.
|
1307 |
+
|
1308 |
+
Returns
|
1309 |
+
-------
|
1310 |
+
array of ints or int
|
1311 |
+
If value is array-like, array of insertion points.
|
1312 |
+
If value is scalar, a single integer.
|
1313 |
+
|
1314 |
+
See Also
|
1315 |
+
--------
|
1316 |
+
numpy.searchsorted : Similar method from NumPy.
|
1317 |
+
|
1318 |
+
Examples
|
1319 |
+
--------
|
1320 |
+
>>> arr = pd.array([1, 2, 3, 5])
|
1321 |
+
>>> arr.searchsorted([4])
|
1322 |
+
array([3])
|
1323 |
+
"""
|
1324 |
+
# Note: the base tests provided by pandas only test the basics.
|
1325 |
+
# We do not test
|
1326 |
+
# 1. Values outside the range of the `data_for_sorting` fixture
|
1327 |
+
# 2. Values between the values in the `data_for_sorting` fixture
|
1328 |
+
# 3. Missing values.
|
1329 |
+
arr = self.astype(object)
|
1330 |
+
if isinstance(value, ExtensionArray):
|
1331 |
+
value = value.astype(object)
|
1332 |
+
return arr.searchsorted(value, side=side, sorter=sorter)
|
1333 |
+
|
1334 |
+
def equals(self, other: object) -> bool:
|
1335 |
+
"""
|
1336 |
+
Return if another array is equivalent to this array.
|
1337 |
+
|
1338 |
+
Equivalent means that both arrays have the same shape and dtype, and
|
1339 |
+
all values compare equal. Missing values in the same location are
|
1340 |
+
considered equal (in contrast with normal equality).
|
1341 |
+
|
1342 |
+
Parameters
|
1343 |
+
----------
|
1344 |
+
other : ExtensionArray
|
1345 |
+
Array to compare to this Array.
|
1346 |
+
|
1347 |
+
Returns
|
1348 |
+
-------
|
1349 |
+
boolean
|
1350 |
+
Whether the arrays are equivalent.
|
1351 |
+
|
1352 |
+
Examples
|
1353 |
+
--------
|
1354 |
+
>>> arr1 = pd.array([1, 2, np.nan])
|
1355 |
+
>>> arr2 = pd.array([1, 2, np.nan])
|
1356 |
+
>>> arr1.equals(arr2)
|
1357 |
+
True
|
1358 |
+
"""
|
1359 |
+
if type(self) != type(other):
|
1360 |
+
return False
|
1361 |
+
other = cast(ExtensionArray, other)
|
1362 |
+
if self.dtype != other.dtype:
|
1363 |
+
return False
|
1364 |
+
elif len(self) != len(other):
|
1365 |
+
return False
|
1366 |
+
else:
|
1367 |
+
equal_values = self == other
|
1368 |
+
if isinstance(equal_values, ExtensionArray):
|
1369 |
+
# boolean array with NA -> fill with False
|
1370 |
+
equal_values = equal_values.fillna(False)
|
1371 |
+
# error: Unsupported left operand type for & ("ExtensionArray")
|
1372 |
+
equal_na = self.isna() & other.isna() # type: ignore[operator]
|
1373 |
+
return bool((equal_values | equal_na).all())
|
1374 |
+
|
1375 |
+
def isin(self, values: ArrayLike) -> npt.NDArray[np.bool_]:
|
1376 |
+
"""
|
1377 |
+
Pointwise comparison for set containment in the given values.
|
1378 |
+
|
1379 |
+
Roughly equivalent to `np.array([x in values for x in self])`
|
1380 |
+
|
1381 |
+
Parameters
|
1382 |
+
----------
|
1383 |
+
values : np.ndarray or ExtensionArray
|
1384 |
+
|
1385 |
+
Returns
|
1386 |
+
-------
|
1387 |
+
np.ndarray[bool]
|
1388 |
+
|
1389 |
+
Examples
|
1390 |
+
--------
|
1391 |
+
>>> arr = pd.array([1, 2, 3])
|
1392 |
+
>>> arr.isin([1])
|
1393 |
+
<BooleanArray>
|
1394 |
+
[True, False, False]
|
1395 |
+
Length: 3, dtype: boolean
|
1396 |
+
"""
|
1397 |
+
return isin(np.asarray(self), values)
|
1398 |
+
|
1399 |
+
def _values_for_factorize(self) -> tuple[np.ndarray, Any]:
|
1400 |
+
"""
|
1401 |
+
Return an array and missing value suitable for factorization.
|
1402 |
+
|
1403 |
+
Returns
|
1404 |
+
-------
|
1405 |
+
values : ndarray
|
1406 |
+
An array suitable for factorization. This should maintain order
|
1407 |
+
and be a supported dtype (Float64, Int64, UInt64, String, Object).
|
1408 |
+
By default, the extension array is cast to object dtype.
|
1409 |
+
na_value : object
|
1410 |
+
The value in `values` to consider missing. This will be treated
|
1411 |
+
as NA in the factorization routines, so it will be coded as
|
1412 |
+
`-1` and not included in `uniques`. By default,
|
1413 |
+
``np.nan`` is used.
|
1414 |
+
|
1415 |
+
Notes
|
1416 |
+
-----
|
1417 |
+
The values returned by this method are also used in
|
1418 |
+
:func:`pandas.util.hash_pandas_object`. If needed, this can be
|
1419 |
+
overridden in the ``self._hash_pandas_object()`` method.
|
1420 |
+
|
1421 |
+
Examples
|
1422 |
+
--------
|
1423 |
+
>>> pd.array([1, 2, 3])._values_for_factorize()
|
1424 |
+
(array([1, 2, 3], dtype=object), nan)
|
1425 |
+
"""
|
1426 |
+
return self.astype(object), np.nan
|
1427 |
+
|
1428 |
+
def factorize(
|
1429 |
+
self,
|
1430 |
+
use_na_sentinel: bool = True,
|
1431 |
+
) -> tuple[np.ndarray, ExtensionArray]:
|
1432 |
+
"""
|
1433 |
+
Encode the extension array as an enumerated type.
|
1434 |
+
|
1435 |
+
Parameters
|
1436 |
+
----------
|
1437 |
+
use_na_sentinel : bool, default True
|
1438 |
+
If True, the sentinel -1 will be used for NaN values. If False,
|
1439 |
+
NaN values will be encoded as non-negative integers and will not drop the
|
1440 |
+
NaN from the uniques of the values.
|
1441 |
+
|
1442 |
+
.. versionadded:: 1.5.0
|
1443 |
+
|
1444 |
+
Returns
|
1445 |
+
-------
|
1446 |
+
codes : ndarray
|
1447 |
+
An integer NumPy array that's an indexer into the original
|
1448 |
+
ExtensionArray.
|
1449 |
+
uniques : ExtensionArray
|
1450 |
+
An ExtensionArray containing the unique values of `self`.
|
1451 |
+
|
1452 |
+
.. note::
|
1453 |
+
|
1454 |
+
uniques will *not* contain an entry for the NA value of
|
1455 |
+
the ExtensionArray if there are any missing values present
|
1456 |
+
in `self`.
|
1457 |
+
|
1458 |
+
See Also
|
1459 |
+
--------
|
1460 |
+
factorize : Top-level factorize method that dispatches here.
|
1461 |
+
|
1462 |
+
Notes
|
1463 |
+
-----
|
1464 |
+
:meth:`pandas.factorize` offers a `sort` keyword as well.
|
1465 |
+
|
1466 |
+
Examples
|
1467 |
+
--------
|
1468 |
+
>>> idx1 = pd.PeriodIndex(["2014-01", "2014-01", "2014-02", "2014-02",
|
1469 |
+
... "2014-03", "2014-03"], freq="M")
|
1470 |
+
>>> arr, idx = idx1.factorize()
|
1471 |
+
>>> arr
|
1472 |
+
array([0, 0, 1, 1, 2, 2])
|
1473 |
+
>>> idx
|
1474 |
+
PeriodIndex(['2014-01', '2014-02', '2014-03'], dtype='period[M]')
|
1475 |
+
"""
|
1476 |
+
# Implementer note: There are two ways to override the behavior of
|
1477 |
+
# pandas.factorize
|
1478 |
+
# 1. _values_for_factorize and _from_factorize.
|
1479 |
+
# Specify the values passed to pandas' internal factorization
|
1480 |
+
# routines, and how to convert from those values back to the
|
1481 |
+
# original ExtensionArray.
|
1482 |
+
# 2. ExtensionArray.factorize.
|
1483 |
+
# Complete control over factorization.
|
1484 |
+
arr, na_value = self._values_for_factorize()
|
1485 |
+
|
1486 |
+
codes, uniques = factorize_array(
|
1487 |
+
arr, use_na_sentinel=use_na_sentinel, na_value=na_value
|
1488 |
+
)
|
1489 |
+
|
1490 |
+
uniques_ea = self._from_factorized(uniques, self)
|
1491 |
+
return codes, uniques_ea
|
1492 |
+
|
1493 |
+
_extension_array_shared_docs[
|
1494 |
+
"repeat"
|
1495 |
+
] = """
|
1496 |
+
Repeat elements of a %(klass)s.
|
1497 |
+
|
1498 |
+
Returns a new %(klass)s where each element of the current %(klass)s
|
1499 |
+
is repeated consecutively a given number of times.
|
1500 |
+
|
1501 |
+
Parameters
|
1502 |
+
----------
|
1503 |
+
repeats : int or array of ints
|
1504 |
+
The number of repetitions for each element. This should be a
|
1505 |
+
non-negative integer. Repeating 0 times will return an empty
|
1506 |
+
%(klass)s.
|
1507 |
+
axis : None
|
1508 |
+
Must be ``None``. Has no effect but is accepted for compatibility
|
1509 |
+
with numpy.
|
1510 |
+
|
1511 |
+
Returns
|
1512 |
+
-------
|
1513 |
+
%(klass)s
|
1514 |
+
Newly created %(klass)s with repeated elements.
|
1515 |
+
|
1516 |
+
See Also
|
1517 |
+
--------
|
1518 |
+
Series.repeat : Equivalent function for Series.
|
1519 |
+
Index.repeat : Equivalent function for Index.
|
1520 |
+
numpy.repeat : Similar method for :class:`numpy.ndarray`.
|
1521 |
+
ExtensionArray.take : Take arbitrary positions.
|
1522 |
+
|
1523 |
+
Examples
|
1524 |
+
--------
|
1525 |
+
>>> cat = pd.Categorical(['a', 'b', 'c'])
|
1526 |
+
>>> cat
|
1527 |
+
['a', 'b', 'c']
|
1528 |
+
Categories (3, object): ['a', 'b', 'c']
|
1529 |
+
>>> cat.repeat(2)
|
1530 |
+
['a', 'a', 'b', 'b', 'c', 'c']
|
1531 |
+
Categories (3, object): ['a', 'b', 'c']
|
1532 |
+
>>> cat.repeat([1, 2, 3])
|
1533 |
+
['a', 'b', 'b', 'c', 'c', 'c']
|
1534 |
+
Categories (3, object): ['a', 'b', 'c']
|
1535 |
+
"""
|
1536 |
+
|
1537 |
+
@Substitution(klass="ExtensionArray")
|
1538 |
+
@Appender(_extension_array_shared_docs["repeat"])
|
1539 |
+
def repeat(self, repeats: int | Sequence[int], axis: AxisInt | None = None) -> Self:
|
1540 |
+
nv.validate_repeat((), {"axis": axis})
|
1541 |
+
ind = np.arange(len(self)).repeat(repeats)
|
1542 |
+
return self.take(ind)
|
1543 |
+
|
1544 |
+
# ------------------------------------------------------------------------
|
1545 |
+
# Indexing methods
|
1546 |
+
# ------------------------------------------------------------------------
|
1547 |
+
|
1548 |
+
def take(
|
1549 |
+
self,
|
1550 |
+
indices: TakeIndexer,
|
1551 |
+
*,
|
1552 |
+
allow_fill: bool = False,
|
1553 |
+
fill_value: Any = None,
|
1554 |
+
) -> Self:
|
1555 |
+
"""
|
1556 |
+
Take elements from an array.
|
1557 |
+
|
1558 |
+
Parameters
|
1559 |
+
----------
|
1560 |
+
indices : sequence of int or one-dimensional np.ndarray of int
|
1561 |
+
Indices to be taken.
|
1562 |
+
allow_fill : bool, default False
|
1563 |
+
How to handle negative values in `indices`.
|
1564 |
+
|
1565 |
+
* False: negative values in `indices` indicate positional indices
|
1566 |
+
from the right (the default). This is similar to
|
1567 |
+
:func:`numpy.take`.
|
1568 |
+
|
1569 |
+
* True: negative values in `indices` indicate
|
1570 |
+
missing values. These values are set to `fill_value`. Any other
|
1571 |
+
other negative values raise a ``ValueError``.
|
1572 |
+
|
1573 |
+
fill_value : any, optional
|
1574 |
+
Fill value to use for NA-indices when `allow_fill` is True.
|
1575 |
+
This may be ``None``, in which case the default NA value for
|
1576 |
+
the type, ``self.dtype.na_value``, is used.
|
1577 |
+
|
1578 |
+
For many ExtensionArrays, there will be two representations of
|
1579 |
+
`fill_value`: a user-facing "boxed" scalar, and a low-level
|
1580 |
+
physical NA value. `fill_value` should be the user-facing version,
|
1581 |
+
and the implementation should handle translating that to the
|
1582 |
+
physical version for processing the take if necessary.
|
1583 |
+
|
1584 |
+
Returns
|
1585 |
+
-------
|
1586 |
+
ExtensionArray
|
1587 |
+
|
1588 |
+
Raises
|
1589 |
+
------
|
1590 |
+
IndexError
|
1591 |
+
When the indices are out of bounds for the array.
|
1592 |
+
ValueError
|
1593 |
+
When `indices` contains negative values other than ``-1``
|
1594 |
+
and `allow_fill` is True.
|
1595 |
+
|
1596 |
+
See Also
|
1597 |
+
--------
|
1598 |
+
numpy.take : Take elements from an array along an axis.
|
1599 |
+
api.extensions.take : Take elements from an array.
|
1600 |
+
|
1601 |
+
Notes
|
1602 |
+
-----
|
1603 |
+
ExtensionArray.take is called by ``Series.__getitem__``, ``.loc``,
|
1604 |
+
``iloc``, when `indices` is a sequence of values. Additionally,
|
1605 |
+
it's called by :meth:`Series.reindex`, or any other method
|
1606 |
+
that causes realignment, with a `fill_value`.
|
1607 |
+
|
1608 |
+
Examples
|
1609 |
+
--------
|
1610 |
+
Here's an example implementation, which relies on casting the
|
1611 |
+
extension array to object dtype. This uses the helper method
|
1612 |
+
:func:`pandas.api.extensions.take`.
|
1613 |
+
|
1614 |
+
.. code-block:: python
|
1615 |
+
|
1616 |
+
def take(self, indices, allow_fill=False, fill_value=None):
|
1617 |
+
from pandas.core.algorithms import take
|
1618 |
+
|
1619 |
+
# If the ExtensionArray is backed by an ndarray, then
|
1620 |
+
# just pass that here instead of coercing to object.
|
1621 |
+
data = self.astype(object)
|
1622 |
+
|
1623 |
+
if allow_fill and fill_value is None:
|
1624 |
+
fill_value = self.dtype.na_value
|
1625 |
+
|
1626 |
+
# fill value should always be translated from the scalar
|
1627 |
+
# type for the array, to the physical storage type for
|
1628 |
+
# the data, before passing to take.
|
1629 |
+
|
1630 |
+
result = take(data, indices, fill_value=fill_value,
|
1631 |
+
allow_fill=allow_fill)
|
1632 |
+
return self._from_sequence(result, dtype=self.dtype)
|
1633 |
+
"""
|
1634 |
+
# Implementer note: The `fill_value` parameter should be a user-facing
|
1635 |
+
# value, an instance of self.dtype.type. When passed `fill_value=None`,
|
1636 |
+
# the default of `self.dtype.na_value` should be used.
|
1637 |
+
# This may differ from the physical storage type your ExtensionArray
|
1638 |
+
# uses. In this case, your implementation is responsible for casting
|
1639 |
+
# the user-facing type to the storage type, before using
|
1640 |
+
# pandas.api.extensions.take
|
1641 |
+
raise AbstractMethodError(self)
|
1642 |
+
|
1643 |
+
def copy(self) -> Self:
|
1644 |
+
"""
|
1645 |
+
Return a copy of the array.
|
1646 |
+
|
1647 |
+
Returns
|
1648 |
+
-------
|
1649 |
+
ExtensionArray
|
1650 |
+
|
1651 |
+
Examples
|
1652 |
+
--------
|
1653 |
+
>>> arr = pd.array([1, 2, 3])
|
1654 |
+
>>> arr2 = arr.copy()
|
1655 |
+
>>> arr[0] = 2
|
1656 |
+
>>> arr2
|
1657 |
+
<IntegerArray>
|
1658 |
+
[1, 2, 3]
|
1659 |
+
Length: 3, dtype: Int64
|
1660 |
+
"""
|
1661 |
+
raise AbstractMethodError(self)
|
1662 |
+
|
1663 |
+
def view(self, dtype: Dtype | None = None) -> ArrayLike:
|
1664 |
+
"""
|
1665 |
+
Return a view on the array.
|
1666 |
+
|
1667 |
+
Parameters
|
1668 |
+
----------
|
1669 |
+
dtype : str, np.dtype, or ExtensionDtype, optional
|
1670 |
+
Default None.
|
1671 |
+
|
1672 |
+
Returns
|
1673 |
+
-------
|
1674 |
+
ExtensionArray or np.ndarray
|
1675 |
+
A view on the :class:`ExtensionArray`'s data.
|
1676 |
+
|
1677 |
+
Examples
|
1678 |
+
--------
|
1679 |
+
This gives view on the underlying data of an ``ExtensionArray`` and is not a
|
1680 |
+
copy. Modifications on either the view or the original ``ExtensionArray``
|
1681 |
+
will be reflectd on the underlying data:
|
1682 |
+
|
1683 |
+
>>> arr = pd.array([1, 2, 3])
|
1684 |
+
>>> arr2 = arr.view()
|
1685 |
+
>>> arr[0] = 2
|
1686 |
+
>>> arr2
|
1687 |
+
<IntegerArray>
|
1688 |
+
[2, 2, 3]
|
1689 |
+
Length: 3, dtype: Int64
|
1690 |
+
"""
|
1691 |
+
# NB:
|
1692 |
+
# - This must return a *new* object referencing the same data, not self.
|
1693 |
+
# - The only case that *must* be implemented is with dtype=None,
|
1694 |
+
# giving a view with the same dtype as self.
|
1695 |
+
if dtype is not None:
|
1696 |
+
raise NotImplementedError(dtype)
|
1697 |
+
return self[:]
|
1698 |
+
|
1699 |
+
# ------------------------------------------------------------------------
|
1700 |
+
# Printing
|
1701 |
+
# ------------------------------------------------------------------------
|
1702 |
+
|
1703 |
+
def __repr__(self) -> str:
|
1704 |
+
if self.ndim > 1:
|
1705 |
+
return self._repr_2d()
|
1706 |
+
|
1707 |
+
from pandas.io.formats.printing import format_object_summary
|
1708 |
+
|
1709 |
+
# the short repr has no trailing newline, while the truncated
|
1710 |
+
# repr does. So we include a newline in our template, and strip
|
1711 |
+
# any trailing newlines from format_object_summary
|
1712 |
+
data = format_object_summary(
|
1713 |
+
self, self._formatter(), indent_for_name=False
|
1714 |
+
).rstrip(", \n")
|
1715 |
+
class_name = f"<{type(self).__name__}>\n"
|
1716 |
+
footer = self._get_repr_footer()
|
1717 |
+
return f"{class_name}{data}\n{footer}"
|
1718 |
+
|
1719 |
+
def _get_repr_footer(self) -> str:
|
1720 |
+
# GH#24278
|
1721 |
+
if self.ndim > 1:
|
1722 |
+
return f"Shape: {self.shape}, dtype: {self.dtype}"
|
1723 |
+
return f"Length: {len(self)}, dtype: {self.dtype}"
|
1724 |
+
|
1725 |
+
def _repr_2d(self) -> str:
|
1726 |
+
from pandas.io.formats.printing import format_object_summary
|
1727 |
+
|
1728 |
+
# the short repr has no trailing newline, while the truncated
|
1729 |
+
# repr does. So we include a newline in our template, and strip
|
1730 |
+
# any trailing newlines from format_object_summary
|
1731 |
+
lines = [
|
1732 |
+
format_object_summary(x, self._formatter(), indent_for_name=False).rstrip(
|
1733 |
+
", \n"
|
1734 |
+
)
|
1735 |
+
for x in self
|
1736 |
+
]
|
1737 |
+
data = ",\n".join(lines)
|
1738 |
+
class_name = f"<{type(self).__name__}>"
|
1739 |
+
footer = self._get_repr_footer()
|
1740 |
+
return f"{class_name}\n[\n{data}\n]\n{footer}"
|
1741 |
+
|
1742 |
+
def _formatter(self, boxed: bool = False) -> Callable[[Any], str | None]:
|
1743 |
+
"""
|
1744 |
+
Formatting function for scalar values.
|
1745 |
+
|
1746 |
+
This is used in the default '__repr__'. The returned formatting
|
1747 |
+
function receives instances of your scalar type.
|
1748 |
+
|
1749 |
+
Parameters
|
1750 |
+
----------
|
1751 |
+
boxed : bool, default False
|
1752 |
+
An indicated for whether or not your array is being printed
|
1753 |
+
within a Series, DataFrame, or Index (True), or just by
|
1754 |
+
itself (False). This may be useful if you want scalar values
|
1755 |
+
to appear differently within a Series versus on its own (e.g.
|
1756 |
+
quoted or not).
|
1757 |
+
|
1758 |
+
Returns
|
1759 |
+
-------
|
1760 |
+
Callable[[Any], str]
|
1761 |
+
A callable that gets instances of the scalar type and
|
1762 |
+
returns a string. By default, :func:`repr` is used
|
1763 |
+
when ``boxed=False`` and :func:`str` is used when
|
1764 |
+
``boxed=True``.
|
1765 |
+
|
1766 |
+
Examples
|
1767 |
+
--------
|
1768 |
+
>>> class MyExtensionArray(pd.arrays.NumpyExtensionArray):
|
1769 |
+
... def _formatter(self, boxed=False):
|
1770 |
+
... return lambda x: '*' + str(x) + '*' if boxed else repr(x) + '*'
|
1771 |
+
>>> MyExtensionArray(np.array([1, 2, 3, 4]))
|
1772 |
+
<MyExtensionArray>
|
1773 |
+
[1*, 2*, 3*, 4*]
|
1774 |
+
Length: 4, dtype: int64
|
1775 |
+
"""
|
1776 |
+
if boxed:
|
1777 |
+
return str
|
1778 |
+
return repr
|
1779 |
+
|
1780 |
+
# ------------------------------------------------------------------------
|
1781 |
+
# Reshaping
|
1782 |
+
# ------------------------------------------------------------------------
|
1783 |
+
|
1784 |
+
def transpose(self, *axes: int) -> ExtensionArray:
|
1785 |
+
"""
|
1786 |
+
Return a transposed view on this array.
|
1787 |
+
|
1788 |
+
Because ExtensionArrays are always 1D, this is a no-op. It is included
|
1789 |
+
for compatibility with np.ndarray.
|
1790 |
+
|
1791 |
+
Returns
|
1792 |
+
-------
|
1793 |
+
ExtensionArray
|
1794 |
+
|
1795 |
+
Examples
|
1796 |
+
--------
|
1797 |
+
>>> pd.array([1, 2, 3]).transpose()
|
1798 |
+
<IntegerArray>
|
1799 |
+
[1, 2, 3]
|
1800 |
+
Length: 3, dtype: Int64
|
1801 |
+
"""
|
1802 |
+
return self[:]
|
1803 |
+
|
1804 |
+
@property
|
1805 |
+
def T(self) -> ExtensionArray:
|
1806 |
+
return self.transpose()
|
1807 |
+
|
1808 |
+
def ravel(self, order: Literal["C", "F", "A", "K"] | None = "C") -> ExtensionArray:
|
1809 |
+
"""
|
1810 |
+
Return a flattened view on this array.
|
1811 |
+
|
1812 |
+
Parameters
|
1813 |
+
----------
|
1814 |
+
order : {None, 'C', 'F', 'A', 'K'}, default 'C'
|
1815 |
+
|
1816 |
+
Returns
|
1817 |
+
-------
|
1818 |
+
ExtensionArray
|
1819 |
+
|
1820 |
+
Notes
|
1821 |
+
-----
|
1822 |
+
- Because ExtensionArrays are 1D-only, this is a no-op.
|
1823 |
+
- The "order" argument is ignored, is for compatibility with NumPy.
|
1824 |
+
|
1825 |
+
Examples
|
1826 |
+
--------
|
1827 |
+
>>> pd.array([1, 2, 3]).ravel()
|
1828 |
+
<IntegerArray>
|
1829 |
+
[1, 2, 3]
|
1830 |
+
Length: 3, dtype: Int64
|
1831 |
+
"""
|
1832 |
+
return self
|
1833 |
+
|
1834 |
+
@classmethod
|
1835 |
+
def _concat_same_type(cls, to_concat: Sequence[Self]) -> Self:
|
1836 |
+
"""
|
1837 |
+
Concatenate multiple array of this dtype.
|
1838 |
+
|
1839 |
+
Parameters
|
1840 |
+
----------
|
1841 |
+
to_concat : sequence of this type
|
1842 |
+
|
1843 |
+
Returns
|
1844 |
+
-------
|
1845 |
+
ExtensionArray
|
1846 |
+
|
1847 |
+
Examples
|
1848 |
+
--------
|
1849 |
+
>>> arr1 = pd.array([1, 2, 3])
|
1850 |
+
>>> arr2 = pd.array([4, 5, 6])
|
1851 |
+
>>> pd.arrays.IntegerArray._concat_same_type([arr1, arr2])
|
1852 |
+
<IntegerArray>
|
1853 |
+
[1, 2, 3, 4, 5, 6]
|
1854 |
+
Length: 6, dtype: Int64
|
1855 |
+
"""
|
1856 |
+
# Implementer note: this method will only be called with a sequence of
|
1857 |
+
# ExtensionArrays of this class and with the same dtype as self. This
|
1858 |
+
# should allow "easy" concatenation (no upcasting needed), and result
|
1859 |
+
# in a new ExtensionArray of the same dtype.
|
1860 |
+
# Note: this strict behaviour is only guaranteed starting with pandas 1.1
|
1861 |
+
raise AbstractMethodError(cls)
|
1862 |
+
|
1863 |
+
# The _can_hold_na attribute is set to True so that pandas internals
|
1864 |
+
# will use the ExtensionDtype.na_value as the NA value in operations
|
1865 |
+
# such as take(), reindex(), shift(), etc. In addition, those results
|
1866 |
+
# will then be of the ExtensionArray subclass rather than an array
|
1867 |
+
# of objects
|
1868 |
+
@cache_readonly
|
1869 |
+
def _can_hold_na(self) -> bool:
|
1870 |
+
return self.dtype._can_hold_na
|
1871 |
+
|
1872 |
+
def _accumulate(
|
1873 |
+
self, name: str, *, skipna: bool = True, **kwargs
|
1874 |
+
) -> ExtensionArray:
|
1875 |
+
"""
|
1876 |
+
Return an ExtensionArray performing an accumulation operation.
|
1877 |
+
|
1878 |
+
The underlying data type might change.
|
1879 |
+
|
1880 |
+
Parameters
|
1881 |
+
----------
|
1882 |
+
name : str
|
1883 |
+
Name of the function, supported values are:
|
1884 |
+
- cummin
|
1885 |
+
- cummax
|
1886 |
+
- cumsum
|
1887 |
+
- cumprod
|
1888 |
+
skipna : bool, default True
|
1889 |
+
If True, skip NA values.
|
1890 |
+
**kwargs
|
1891 |
+
Additional keyword arguments passed to the accumulation function.
|
1892 |
+
Currently, there is no supported kwarg.
|
1893 |
+
|
1894 |
+
Returns
|
1895 |
+
-------
|
1896 |
+
array
|
1897 |
+
|
1898 |
+
Raises
|
1899 |
+
------
|
1900 |
+
NotImplementedError : subclass does not define accumulations
|
1901 |
+
|
1902 |
+
Examples
|
1903 |
+
--------
|
1904 |
+
>>> arr = pd.array([1, 2, 3])
|
1905 |
+
>>> arr._accumulate(name='cumsum')
|
1906 |
+
<IntegerArray>
|
1907 |
+
[1, 3, 6]
|
1908 |
+
Length: 3, dtype: Int64
|
1909 |
+
"""
|
1910 |
+
raise NotImplementedError(f"cannot perform {name} with type {self.dtype}")
|
1911 |
+
|
1912 |
+
def _reduce(
|
1913 |
+
self, name: str, *, skipna: bool = True, keepdims: bool = False, **kwargs
|
1914 |
+
):
|
1915 |
+
"""
|
1916 |
+
Return a scalar result of performing the reduction operation.
|
1917 |
+
|
1918 |
+
Parameters
|
1919 |
+
----------
|
1920 |
+
name : str
|
1921 |
+
Name of the function, supported values are:
|
1922 |
+
{ any, all, min, max, sum, mean, median, prod,
|
1923 |
+
std, var, sem, kurt, skew }.
|
1924 |
+
skipna : bool, default True
|
1925 |
+
If True, skip NaN values.
|
1926 |
+
keepdims : bool, default False
|
1927 |
+
If False, a scalar is returned.
|
1928 |
+
If True, the result has dimension with size one along the reduced axis.
|
1929 |
+
|
1930 |
+
.. versionadded:: 2.1
|
1931 |
+
|
1932 |
+
This parameter is not required in the _reduce signature to keep backward
|
1933 |
+
compatibility, but will become required in the future. If the parameter
|
1934 |
+
is not found in the method signature, a FutureWarning will be emitted.
|
1935 |
+
**kwargs
|
1936 |
+
Additional keyword arguments passed to the reduction function.
|
1937 |
+
Currently, `ddof` is the only supported kwarg.
|
1938 |
+
|
1939 |
+
Returns
|
1940 |
+
-------
|
1941 |
+
scalar
|
1942 |
+
|
1943 |
+
Raises
|
1944 |
+
------
|
1945 |
+
TypeError : subclass does not define reductions
|
1946 |
+
|
1947 |
+
Examples
|
1948 |
+
--------
|
1949 |
+
>>> pd.array([1, 2, 3])._reduce("min")
|
1950 |
+
1
|
1951 |
+
"""
|
1952 |
+
meth = getattr(self, name, None)
|
1953 |
+
if meth is None:
|
1954 |
+
raise TypeError(
|
1955 |
+
f"'{type(self).__name__}' with dtype {self.dtype} "
|
1956 |
+
f"does not support reduction '{name}'"
|
1957 |
+
)
|
1958 |
+
result = meth(skipna=skipna, **kwargs)
|
1959 |
+
if keepdims:
|
1960 |
+
result = np.array([result])
|
1961 |
+
|
1962 |
+
return result
|
1963 |
+
|
1964 |
+
# https://github.com/python/typeshed/issues/2148#issuecomment-520783318
|
1965 |
+
# Incompatible types in assignment (expression has type "None", base class
|
1966 |
+
# "object" defined the type as "Callable[[object], int]")
|
1967 |
+
__hash__: ClassVar[None] # type: ignore[assignment]
|
1968 |
+
|
1969 |
+
# ------------------------------------------------------------------------
|
1970 |
+
# Non-Optimized Default Methods; in the case of the private methods here,
|
1971 |
+
# these are not guaranteed to be stable across pandas versions.
|
1972 |
+
|
1973 |
+
def _values_for_json(self) -> np.ndarray:
|
1974 |
+
"""
|
1975 |
+
Specify how to render our entries in to_json.
|
1976 |
+
|
1977 |
+
Notes
|
1978 |
+
-----
|
1979 |
+
The dtype on the returned ndarray is not restricted, but for non-native
|
1980 |
+
types that are not specifically handled in objToJSON.c, to_json is
|
1981 |
+
liable to raise. In these cases, it may be safer to return an ndarray
|
1982 |
+
of strings.
|
1983 |
+
"""
|
1984 |
+
return np.asarray(self)
|
1985 |
+
|
1986 |
+
def _hash_pandas_object(
|
1987 |
+
self, *, encoding: str, hash_key: str, categorize: bool
|
1988 |
+
) -> npt.NDArray[np.uint64]:
|
1989 |
+
"""
|
1990 |
+
Hook for hash_pandas_object.
|
1991 |
+
|
1992 |
+
Default is to use the values returned by _values_for_factorize.
|
1993 |
+
|
1994 |
+
Parameters
|
1995 |
+
----------
|
1996 |
+
encoding : str
|
1997 |
+
Encoding for data & key when strings.
|
1998 |
+
hash_key : str
|
1999 |
+
Hash_key for string key to encode.
|
2000 |
+
categorize : bool
|
2001 |
+
Whether to first categorize object arrays before hashing. This is more
|
2002 |
+
efficient when the array contains duplicate values.
|
2003 |
+
|
2004 |
+
Returns
|
2005 |
+
-------
|
2006 |
+
np.ndarray[uint64]
|
2007 |
+
|
2008 |
+
Examples
|
2009 |
+
--------
|
2010 |
+
>>> pd.array([1, 2])._hash_pandas_object(encoding='utf-8',
|
2011 |
+
... hash_key="1000000000000000",
|
2012 |
+
... categorize=False
|
2013 |
+
... )
|
2014 |
+
array([ 6238072747940578789, 15839785061582574730], dtype=uint64)
|
2015 |
+
"""
|
2016 |
+
from pandas.core.util.hashing import hash_array
|
2017 |
+
|
2018 |
+
values, _ = self._values_for_factorize()
|
2019 |
+
return hash_array(
|
2020 |
+
values, encoding=encoding, hash_key=hash_key, categorize=categorize
|
2021 |
+
)
|
2022 |
+
|
2023 |
+
def _explode(self) -> tuple[Self, npt.NDArray[np.uint64]]:
|
2024 |
+
"""
|
2025 |
+
Transform each element of list-like to a row.
|
2026 |
+
|
2027 |
+
For arrays that do not contain list-like elements the default
|
2028 |
+
implementation of this method just returns a copy and an array
|
2029 |
+
of ones (unchanged index).
|
2030 |
+
|
2031 |
+
Returns
|
2032 |
+
-------
|
2033 |
+
ExtensionArray
|
2034 |
+
Array with the exploded values.
|
2035 |
+
np.ndarray[uint64]
|
2036 |
+
The original lengths of each list-like for determining the
|
2037 |
+
resulting index.
|
2038 |
+
|
2039 |
+
See Also
|
2040 |
+
--------
|
2041 |
+
Series.explode : The method on the ``Series`` object that this
|
2042 |
+
extension array method is meant to support.
|
2043 |
+
|
2044 |
+
Examples
|
2045 |
+
--------
|
2046 |
+
>>> import pyarrow as pa
|
2047 |
+
>>> a = pd.array([[1, 2, 3], [4], [5, 6]],
|
2048 |
+
... dtype=pd.ArrowDtype(pa.list_(pa.int64())))
|
2049 |
+
>>> a._explode()
|
2050 |
+
(<ArrowExtensionArray>
|
2051 |
+
[1, 2, 3, 4, 5, 6]
|
2052 |
+
Length: 6, dtype: int64[pyarrow], array([3, 1, 2], dtype=int32))
|
2053 |
+
"""
|
2054 |
+
values = self.copy()
|
2055 |
+
counts = np.ones(shape=(len(self),), dtype=np.uint64)
|
2056 |
+
return values, counts
|
2057 |
+
|
2058 |
+
def tolist(self) -> list:
|
2059 |
+
"""
|
2060 |
+
Return a list of the values.
|
2061 |
+
|
2062 |
+
These are each a scalar type, which is a Python scalar
|
2063 |
+
(for str, int, float) or a pandas scalar
|
2064 |
+
(for Timestamp/Timedelta/Interval/Period)
|
2065 |
+
|
2066 |
+
Returns
|
2067 |
+
-------
|
2068 |
+
list
|
2069 |
+
|
2070 |
+
Examples
|
2071 |
+
--------
|
2072 |
+
>>> arr = pd.array([1, 2, 3])
|
2073 |
+
>>> arr.tolist()
|
2074 |
+
[1, 2, 3]
|
2075 |
+
"""
|
2076 |
+
if self.ndim > 1:
|
2077 |
+
return [x.tolist() for x in self]
|
2078 |
+
return list(self)
|
2079 |
+
|
2080 |
+
def delete(self, loc: PositionalIndexer) -> Self:
|
2081 |
+
indexer = np.delete(np.arange(len(self)), loc)
|
2082 |
+
return self.take(indexer)
|
2083 |
+
|
2084 |
+
def insert(self, loc: int, item) -> Self:
|
2085 |
+
"""
|
2086 |
+
Insert an item at the given position.
|
2087 |
+
|
2088 |
+
Parameters
|
2089 |
+
----------
|
2090 |
+
loc : int
|
2091 |
+
item : scalar-like
|
2092 |
+
|
2093 |
+
Returns
|
2094 |
+
-------
|
2095 |
+
same type as self
|
2096 |
+
|
2097 |
+
Notes
|
2098 |
+
-----
|
2099 |
+
This method should be both type and dtype-preserving. If the item
|
2100 |
+
cannot be held in an array of this type/dtype, either ValueError or
|
2101 |
+
TypeError should be raised.
|
2102 |
+
|
2103 |
+
The default implementation relies on _from_sequence to raise on invalid
|
2104 |
+
items.
|
2105 |
+
|
2106 |
+
Examples
|
2107 |
+
--------
|
2108 |
+
>>> arr = pd.array([1, 2, 3])
|
2109 |
+
>>> arr.insert(2, -1)
|
2110 |
+
<IntegerArray>
|
2111 |
+
[1, 2, -1, 3]
|
2112 |
+
Length: 4, dtype: Int64
|
2113 |
+
"""
|
2114 |
+
loc = validate_insert_loc(loc, len(self))
|
2115 |
+
|
2116 |
+
item_arr = type(self)._from_sequence([item], dtype=self.dtype)
|
2117 |
+
|
2118 |
+
return type(self)._concat_same_type([self[:loc], item_arr, self[loc:]])
|
2119 |
+
|
2120 |
+
def _putmask(self, mask: npt.NDArray[np.bool_], value) -> None:
|
2121 |
+
"""
|
2122 |
+
Analogue to np.putmask(self, mask, value)
|
2123 |
+
|
2124 |
+
Parameters
|
2125 |
+
----------
|
2126 |
+
mask : np.ndarray[bool]
|
2127 |
+
value : scalar or listlike
|
2128 |
+
If listlike, must be arraylike with same length as self.
|
2129 |
+
|
2130 |
+
Returns
|
2131 |
+
-------
|
2132 |
+
None
|
2133 |
+
|
2134 |
+
Notes
|
2135 |
+
-----
|
2136 |
+
Unlike np.putmask, we do not repeat listlike values with mismatched length.
|
2137 |
+
'value' should either be a scalar or an arraylike with the same length
|
2138 |
+
as self.
|
2139 |
+
"""
|
2140 |
+
if is_list_like(value):
|
2141 |
+
val = value[mask]
|
2142 |
+
else:
|
2143 |
+
val = value
|
2144 |
+
|
2145 |
+
self[mask] = val
|
2146 |
+
|
2147 |
+
def _where(self, mask: npt.NDArray[np.bool_], value) -> Self:
|
2148 |
+
"""
|
2149 |
+
Analogue to np.where(mask, self, value)
|
2150 |
+
|
2151 |
+
Parameters
|
2152 |
+
----------
|
2153 |
+
mask : np.ndarray[bool]
|
2154 |
+
value : scalar or listlike
|
2155 |
+
|
2156 |
+
Returns
|
2157 |
+
-------
|
2158 |
+
same type as self
|
2159 |
+
"""
|
2160 |
+
result = self.copy()
|
2161 |
+
|
2162 |
+
if is_list_like(value):
|
2163 |
+
val = value[~mask]
|
2164 |
+
else:
|
2165 |
+
val = value
|
2166 |
+
|
2167 |
+
result[~mask] = val
|
2168 |
+
return result
|
2169 |
+
|
2170 |
+
# TODO(3.0): this can be removed once GH#33302 deprecation is enforced
|
2171 |
+
def _fill_mask_inplace(
|
2172 |
+
self, method: str, limit: int | None, mask: npt.NDArray[np.bool_]
|
2173 |
+
) -> None:
|
2174 |
+
"""
|
2175 |
+
Replace values in locations specified by 'mask' using pad or backfill.
|
2176 |
+
|
2177 |
+
See also
|
2178 |
+
--------
|
2179 |
+
ExtensionArray.fillna
|
2180 |
+
"""
|
2181 |
+
func = missing.get_fill_func(method)
|
2182 |
+
npvalues = self.astype(object)
|
2183 |
+
# NB: if we don't copy mask here, it may be altered inplace, which
|
2184 |
+
# would mess up the `self[mask] = ...` below.
|
2185 |
+
func(npvalues, limit=limit, mask=mask.copy())
|
2186 |
+
new_values = self._from_sequence(npvalues, dtype=self.dtype)
|
2187 |
+
self[mask] = new_values[mask]
|
2188 |
+
|
2189 |
+
def _rank(
|
2190 |
+
self,
|
2191 |
+
*,
|
2192 |
+
axis: AxisInt = 0,
|
2193 |
+
method: str = "average",
|
2194 |
+
na_option: str = "keep",
|
2195 |
+
ascending: bool = True,
|
2196 |
+
pct: bool = False,
|
2197 |
+
):
|
2198 |
+
"""
|
2199 |
+
See Series.rank.__doc__.
|
2200 |
+
"""
|
2201 |
+
if axis != 0:
|
2202 |
+
raise NotImplementedError
|
2203 |
+
|
2204 |
+
return rank(
|
2205 |
+
self._values_for_argsort(),
|
2206 |
+
axis=axis,
|
2207 |
+
method=method,
|
2208 |
+
na_option=na_option,
|
2209 |
+
ascending=ascending,
|
2210 |
+
pct=pct,
|
2211 |
+
)
|
2212 |
+
|
2213 |
+
@classmethod
|
2214 |
+
def _empty(cls, shape: Shape, dtype: ExtensionDtype):
|
2215 |
+
"""
|
2216 |
+
Create an ExtensionArray with the given shape and dtype.
|
2217 |
+
|
2218 |
+
See also
|
2219 |
+
--------
|
2220 |
+
ExtensionDtype.empty
|
2221 |
+
ExtensionDtype.empty is the 'official' public version of this API.
|
2222 |
+
"""
|
2223 |
+
# Implementer note: while ExtensionDtype.empty is the public way to
|
2224 |
+
# call this method, it is still required to implement this `_empty`
|
2225 |
+
# method as well (it is called internally in pandas)
|
2226 |
+
obj = cls._from_sequence([], dtype=dtype)
|
2227 |
+
|
2228 |
+
taker = np.broadcast_to(np.intp(-1), shape)
|
2229 |
+
result = obj.take(taker, allow_fill=True)
|
2230 |
+
if not isinstance(result, cls) or dtype != result.dtype:
|
2231 |
+
raise NotImplementedError(
|
2232 |
+
f"Default 'empty' implementation is invalid for dtype='{dtype}'"
|
2233 |
+
)
|
2234 |
+
return result
|
2235 |
+
|
2236 |
+
def _quantile(self, qs: npt.NDArray[np.float64], interpolation: str) -> Self:
|
2237 |
+
"""
|
2238 |
+
Compute the quantiles of self for each quantile in `qs`.
|
2239 |
+
|
2240 |
+
Parameters
|
2241 |
+
----------
|
2242 |
+
qs : np.ndarray[float64]
|
2243 |
+
interpolation: str
|
2244 |
+
|
2245 |
+
Returns
|
2246 |
+
-------
|
2247 |
+
same type as self
|
2248 |
+
"""
|
2249 |
+
mask = np.asarray(self.isna())
|
2250 |
+
arr = np.asarray(self)
|
2251 |
+
fill_value = np.nan
|
2252 |
+
|
2253 |
+
res_values = quantile_with_mask(arr, mask, fill_value, qs, interpolation)
|
2254 |
+
return type(self)._from_sequence(res_values)
|
2255 |
+
|
2256 |
+
def _mode(self, dropna: bool = True) -> Self:
|
2257 |
+
"""
|
2258 |
+
Returns the mode(s) of the ExtensionArray.
|
2259 |
+
|
2260 |
+
Always returns `ExtensionArray` even if only one value.
|
2261 |
+
|
2262 |
+
Parameters
|
2263 |
+
----------
|
2264 |
+
dropna : bool, default True
|
2265 |
+
Don't consider counts of NA values.
|
2266 |
+
|
2267 |
+
Returns
|
2268 |
+
-------
|
2269 |
+
same type as self
|
2270 |
+
Sorted, if possible.
|
2271 |
+
"""
|
2272 |
+
# error: Incompatible return value type (got "Union[ExtensionArray,
|
2273 |
+
# ndarray[Any, Any]]", expected "Self")
|
2274 |
+
return mode(self, dropna=dropna) # type: ignore[return-value]
|
2275 |
+
|
2276 |
+
def __array_ufunc__(self, ufunc: np.ufunc, method: str, *inputs, **kwargs):
|
2277 |
+
if any(
|
2278 |
+
isinstance(other, (ABCSeries, ABCIndex, ABCDataFrame)) for other in inputs
|
2279 |
+
):
|
2280 |
+
return NotImplemented
|
2281 |
+
|
2282 |
+
result = arraylike.maybe_dispatch_ufunc_to_dunder_op(
|
2283 |
+
self, ufunc, method, *inputs, **kwargs
|
2284 |
+
)
|
2285 |
+
if result is not NotImplemented:
|
2286 |
+
return result
|
2287 |
+
|
2288 |
+
if "out" in kwargs:
|
2289 |
+
return arraylike.dispatch_ufunc_with_out(
|
2290 |
+
self, ufunc, method, *inputs, **kwargs
|
2291 |
+
)
|
2292 |
+
|
2293 |
+
if method == "reduce":
|
2294 |
+
result = arraylike.dispatch_reduction_ufunc(
|
2295 |
+
self, ufunc, method, *inputs, **kwargs
|
2296 |
+
)
|
2297 |
+
if result is not NotImplemented:
|
2298 |
+
return result
|
2299 |
+
|
2300 |
+
return arraylike.default_array_ufunc(self, ufunc, method, *inputs, **kwargs)
|
2301 |
+
|
2302 |
+
def map(self, mapper, na_action=None):
|
2303 |
+
"""
|
2304 |
+
Map values using an input mapping or function.
|
2305 |
+
|
2306 |
+
Parameters
|
2307 |
+
----------
|
2308 |
+
mapper : function, dict, or Series
|
2309 |
+
Mapping correspondence.
|
2310 |
+
na_action : {None, 'ignore'}, default None
|
2311 |
+
If 'ignore', propagate NA values, without passing them to the
|
2312 |
+
mapping correspondence. If 'ignore' is not supported, a
|
2313 |
+
``NotImplementedError`` should be raised.
|
2314 |
+
|
2315 |
+
Returns
|
2316 |
+
-------
|
2317 |
+
Union[ndarray, Index, ExtensionArray]
|
2318 |
+
The output of the mapping function applied to the array.
|
2319 |
+
If the function returns a tuple with more than one element
|
2320 |
+
a MultiIndex will be returned.
|
2321 |
+
"""
|
2322 |
+
return map_array(self, mapper, na_action=na_action)
|
2323 |
+
|
2324 |
+
# ------------------------------------------------------------------------
|
2325 |
+
# GroupBy Methods
|
2326 |
+
|
2327 |
+
def _groupby_op(
|
2328 |
+
self,
|
2329 |
+
*,
|
2330 |
+
how: str,
|
2331 |
+
has_dropped_na: bool,
|
2332 |
+
min_count: int,
|
2333 |
+
ngroups: int,
|
2334 |
+
ids: npt.NDArray[np.intp],
|
2335 |
+
**kwargs,
|
2336 |
+
) -> ArrayLike:
|
2337 |
+
"""
|
2338 |
+
Dispatch GroupBy reduction or transformation operation.
|
2339 |
+
|
2340 |
+
This is an *experimental* API to allow ExtensionArray authors to implement
|
2341 |
+
reductions and transformations. The API is subject to change.
|
2342 |
+
|
2343 |
+
Parameters
|
2344 |
+
----------
|
2345 |
+
how : {'any', 'all', 'sum', 'prod', 'min', 'max', 'mean', 'median',
|
2346 |
+
'median', 'var', 'std', 'sem', 'nth', 'last', 'ohlc',
|
2347 |
+
'cumprod', 'cumsum', 'cummin', 'cummax', 'rank'}
|
2348 |
+
has_dropped_na : bool
|
2349 |
+
min_count : int
|
2350 |
+
ngroups : int
|
2351 |
+
ids : np.ndarray[np.intp]
|
2352 |
+
ids[i] gives the integer label for the group that self[i] belongs to.
|
2353 |
+
**kwargs : operation-specific
|
2354 |
+
'any', 'all' -> ['skipna']
|
2355 |
+
'var', 'std', 'sem' -> ['ddof']
|
2356 |
+
'cumprod', 'cumsum', 'cummin', 'cummax' -> ['skipna']
|
2357 |
+
'rank' -> ['ties_method', 'ascending', 'na_option', 'pct']
|
2358 |
+
|
2359 |
+
Returns
|
2360 |
+
-------
|
2361 |
+
np.ndarray or ExtensionArray
|
2362 |
+
"""
|
2363 |
+
from pandas.core.arrays.string_ import StringDtype
|
2364 |
+
from pandas.core.groupby.ops import WrappedCythonOp
|
2365 |
+
|
2366 |
+
kind = WrappedCythonOp.get_kind_from_how(how)
|
2367 |
+
op = WrappedCythonOp(how=how, kind=kind, has_dropped_na=has_dropped_na)
|
2368 |
+
|
2369 |
+
# GH#43682
|
2370 |
+
if isinstance(self.dtype, StringDtype):
|
2371 |
+
# StringArray
|
2372 |
+
if op.how not in ["any", "all"]:
|
2373 |
+
# Fail early to avoid conversion to object
|
2374 |
+
op._get_cython_function(op.kind, op.how, np.dtype(object), False)
|
2375 |
+
npvalues = self.to_numpy(object, na_value=np.nan)
|
2376 |
+
else:
|
2377 |
+
raise NotImplementedError(
|
2378 |
+
f"function is not implemented for this dtype: {self.dtype}"
|
2379 |
+
)
|
2380 |
+
|
2381 |
+
res_values = op._cython_op_ndim_compat(
|
2382 |
+
npvalues,
|
2383 |
+
min_count=min_count,
|
2384 |
+
ngroups=ngroups,
|
2385 |
+
comp_ids=ids,
|
2386 |
+
mask=None,
|
2387 |
+
**kwargs,
|
2388 |
+
)
|
2389 |
+
|
2390 |
+
if op.how in op.cast_blocklist:
|
2391 |
+
# i.e. how in ["rank"], since other cast_blocklist methods don't go
|
2392 |
+
# through cython_operation
|
2393 |
+
return res_values
|
2394 |
+
|
2395 |
+
if isinstance(self.dtype, StringDtype):
|
2396 |
+
dtype = self.dtype
|
2397 |
+
string_array_cls = dtype.construct_array_type()
|
2398 |
+
return string_array_cls._from_sequence(res_values, dtype=dtype)
|
2399 |
+
|
2400 |
+
else:
|
2401 |
+
raise NotImplementedError
|
2402 |
+
|
2403 |
+
|
2404 |
+
class ExtensionArraySupportsAnyAll(ExtensionArray):
|
2405 |
+
def any(self, *, skipna: bool = True) -> bool:
|
2406 |
+
raise AbstractMethodError(self)
|
2407 |
+
|
2408 |
+
def all(self, *, skipna: bool = True) -> bool:
|
2409 |
+
raise AbstractMethodError(self)
|
2410 |
+
|
2411 |
+
|
2412 |
+
class ExtensionOpsMixin:
|
2413 |
+
"""
|
2414 |
+
A base class for linking the operators to their dunder names.
|
2415 |
+
|
2416 |
+
.. note::
|
2417 |
+
|
2418 |
+
You may want to set ``__array_priority__`` if you want your
|
2419 |
+
implementation to be called when involved in binary operations
|
2420 |
+
with NumPy arrays.
|
2421 |
+
"""
|
2422 |
+
|
2423 |
+
@classmethod
|
2424 |
+
def _create_arithmetic_method(cls, op):
|
2425 |
+
raise AbstractMethodError(cls)
|
2426 |
+
|
2427 |
+
@classmethod
|
2428 |
+
def _add_arithmetic_ops(cls) -> None:
|
2429 |
+
setattr(cls, "__add__", cls._create_arithmetic_method(operator.add))
|
2430 |
+
setattr(cls, "__radd__", cls._create_arithmetic_method(roperator.radd))
|
2431 |
+
setattr(cls, "__sub__", cls._create_arithmetic_method(operator.sub))
|
2432 |
+
setattr(cls, "__rsub__", cls._create_arithmetic_method(roperator.rsub))
|
2433 |
+
setattr(cls, "__mul__", cls._create_arithmetic_method(operator.mul))
|
2434 |
+
setattr(cls, "__rmul__", cls._create_arithmetic_method(roperator.rmul))
|
2435 |
+
setattr(cls, "__pow__", cls._create_arithmetic_method(operator.pow))
|
2436 |
+
setattr(cls, "__rpow__", cls._create_arithmetic_method(roperator.rpow))
|
2437 |
+
setattr(cls, "__mod__", cls._create_arithmetic_method(operator.mod))
|
2438 |
+
setattr(cls, "__rmod__", cls._create_arithmetic_method(roperator.rmod))
|
2439 |
+
setattr(cls, "__floordiv__", cls._create_arithmetic_method(operator.floordiv))
|
2440 |
+
setattr(
|
2441 |
+
cls, "__rfloordiv__", cls._create_arithmetic_method(roperator.rfloordiv)
|
2442 |
+
)
|
2443 |
+
setattr(cls, "__truediv__", cls._create_arithmetic_method(operator.truediv))
|
2444 |
+
setattr(cls, "__rtruediv__", cls._create_arithmetic_method(roperator.rtruediv))
|
2445 |
+
setattr(cls, "__divmod__", cls._create_arithmetic_method(divmod))
|
2446 |
+
setattr(cls, "__rdivmod__", cls._create_arithmetic_method(roperator.rdivmod))
|
2447 |
+
|
2448 |
+
@classmethod
|
2449 |
+
def _create_comparison_method(cls, op):
|
2450 |
+
raise AbstractMethodError(cls)
|
2451 |
+
|
2452 |
+
@classmethod
|
2453 |
+
def _add_comparison_ops(cls) -> None:
|
2454 |
+
setattr(cls, "__eq__", cls._create_comparison_method(operator.eq))
|
2455 |
+
setattr(cls, "__ne__", cls._create_comparison_method(operator.ne))
|
2456 |
+
setattr(cls, "__lt__", cls._create_comparison_method(operator.lt))
|
2457 |
+
setattr(cls, "__gt__", cls._create_comparison_method(operator.gt))
|
2458 |
+
setattr(cls, "__le__", cls._create_comparison_method(operator.le))
|
2459 |
+
setattr(cls, "__ge__", cls._create_comparison_method(operator.ge))
|
2460 |
+
|
2461 |
+
@classmethod
|
2462 |
+
def _create_logical_method(cls, op):
|
2463 |
+
raise AbstractMethodError(cls)
|
2464 |
+
|
2465 |
+
@classmethod
|
2466 |
+
def _add_logical_ops(cls) -> None:
|
2467 |
+
setattr(cls, "__and__", cls._create_logical_method(operator.and_))
|
2468 |
+
setattr(cls, "__rand__", cls._create_logical_method(roperator.rand_))
|
2469 |
+
setattr(cls, "__or__", cls._create_logical_method(operator.or_))
|
2470 |
+
setattr(cls, "__ror__", cls._create_logical_method(roperator.ror_))
|
2471 |
+
setattr(cls, "__xor__", cls._create_logical_method(operator.xor))
|
2472 |
+
setattr(cls, "__rxor__", cls._create_logical_method(roperator.rxor))
|
2473 |
+
|
2474 |
+
|
2475 |
+
class ExtensionScalarOpsMixin(ExtensionOpsMixin):
|
2476 |
+
"""
|
2477 |
+
A mixin for defining ops on an ExtensionArray.
|
2478 |
+
|
2479 |
+
It is assumed that the underlying scalar objects have the operators
|
2480 |
+
already defined.
|
2481 |
+
|
2482 |
+
Notes
|
2483 |
+
-----
|
2484 |
+
If you have defined a subclass MyExtensionArray(ExtensionArray), then
|
2485 |
+
use MyExtensionArray(ExtensionArray, ExtensionScalarOpsMixin) to
|
2486 |
+
get the arithmetic operators. After the definition of MyExtensionArray,
|
2487 |
+
insert the lines
|
2488 |
+
|
2489 |
+
MyExtensionArray._add_arithmetic_ops()
|
2490 |
+
MyExtensionArray._add_comparison_ops()
|
2491 |
+
|
2492 |
+
to link the operators to your class.
|
2493 |
+
|
2494 |
+
.. note::
|
2495 |
+
|
2496 |
+
You may want to set ``__array_priority__`` if you want your
|
2497 |
+
implementation to be called when involved in binary operations
|
2498 |
+
with NumPy arrays.
|
2499 |
+
"""
|
2500 |
+
|
2501 |
+
@classmethod
|
2502 |
+
def _create_method(cls, op, coerce_to_dtype: bool = True, result_dtype=None):
|
2503 |
+
"""
|
2504 |
+
A class method that returns a method that will correspond to an
|
2505 |
+
operator for an ExtensionArray subclass, by dispatching to the
|
2506 |
+
relevant operator defined on the individual elements of the
|
2507 |
+
ExtensionArray.
|
2508 |
+
|
2509 |
+
Parameters
|
2510 |
+
----------
|
2511 |
+
op : function
|
2512 |
+
An operator that takes arguments op(a, b)
|
2513 |
+
coerce_to_dtype : bool, default True
|
2514 |
+
boolean indicating whether to attempt to convert
|
2515 |
+
the result to the underlying ExtensionArray dtype.
|
2516 |
+
If it's not possible to create a new ExtensionArray with the
|
2517 |
+
values, an ndarray is returned instead.
|
2518 |
+
|
2519 |
+
Returns
|
2520 |
+
-------
|
2521 |
+
Callable[[Any, Any], Union[ndarray, ExtensionArray]]
|
2522 |
+
A method that can be bound to a class. When used, the method
|
2523 |
+
receives the two arguments, one of which is the instance of
|
2524 |
+
this class, and should return an ExtensionArray or an ndarray.
|
2525 |
+
|
2526 |
+
Returning an ndarray may be necessary when the result of the
|
2527 |
+
`op` cannot be stored in the ExtensionArray. The dtype of the
|
2528 |
+
ndarray uses NumPy's normal inference rules.
|
2529 |
+
|
2530 |
+
Examples
|
2531 |
+
--------
|
2532 |
+
Given an ExtensionArray subclass called MyExtensionArray, use
|
2533 |
+
|
2534 |
+
__add__ = cls._create_method(operator.add)
|
2535 |
+
|
2536 |
+
in the class definition of MyExtensionArray to create the operator
|
2537 |
+
for addition, that will be based on the operator implementation
|
2538 |
+
of the underlying elements of the ExtensionArray
|
2539 |
+
"""
|
2540 |
+
|
2541 |
+
def _binop(self, other):
|
2542 |
+
def convert_values(param):
|
2543 |
+
if isinstance(param, ExtensionArray) or is_list_like(param):
|
2544 |
+
ovalues = param
|
2545 |
+
else: # Assume its an object
|
2546 |
+
ovalues = [param] * len(self)
|
2547 |
+
return ovalues
|
2548 |
+
|
2549 |
+
if isinstance(other, (ABCSeries, ABCIndex, ABCDataFrame)):
|
2550 |
+
# rely on pandas to unbox and dispatch to us
|
2551 |
+
return NotImplemented
|
2552 |
+
|
2553 |
+
lvalues = self
|
2554 |
+
rvalues = convert_values(other)
|
2555 |
+
|
2556 |
+
# If the operator is not defined for the underlying objects,
|
2557 |
+
# a TypeError should be raised
|
2558 |
+
res = [op(a, b) for (a, b) in zip(lvalues, rvalues)]
|
2559 |
+
|
2560 |
+
def _maybe_convert(arr):
|
2561 |
+
if coerce_to_dtype:
|
2562 |
+
# https://github.com/pandas-dev/pandas/issues/22850
|
2563 |
+
# We catch all regular exceptions here, and fall back
|
2564 |
+
# to an ndarray.
|
2565 |
+
res = maybe_cast_pointwise_result(arr, self.dtype, same_dtype=False)
|
2566 |
+
if not isinstance(res, type(self)):
|
2567 |
+
# exception raised in _from_sequence; ensure we have ndarray
|
2568 |
+
res = np.asarray(arr)
|
2569 |
+
else:
|
2570 |
+
res = np.asarray(arr, dtype=result_dtype)
|
2571 |
+
return res
|
2572 |
+
|
2573 |
+
if op.__name__ in {"divmod", "rdivmod"}:
|
2574 |
+
a, b = zip(*res)
|
2575 |
+
return _maybe_convert(a), _maybe_convert(b)
|
2576 |
+
|
2577 |
+
return _maybe_convert(res)
|
2578 |
+
|
2579 |
+
op_name = f"__{op.__name__}__"
|
2580 |
+
return set_function_name(_binop, op_name, cls)
|
2581 |
+
|
2582 |
+
@classmethod
|
2583 |
+
def _create_arithmetic_method(cls, op):
|
2584 |
+
return cls._create_method(op)
|
2585 |
+
|
2586 |
+
@classmethod
|
2587 |
+
def _create_comparison_method(cls, op):
|
2588 |
+
return cls._create_method(op, coerce_to_dtype=False, result_dtype=bool)
|
env-llmeval/lib/python3.10/site-packages/pandas/core/arrays/boolean.py
ADDED
@@ -0,0 +1,407 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from __future__ import annotations
|
2 |
+
|
3 |
+
import numbers
|
4 |
+
from typing import (
|
5 |
+
TYPE_CHECKING,
|
6 |
+
ClassVar,
|
7 |
+
cast,
|
8 |
+
)
|
9 |
+
|
10 |
+
import numpy as np
|
11 |
+
|
12 |
+
from pandas._libs import (
|
13 |
+
lib,
|
14 |
+
missing as libmissing,
|
15 |
+
)
|
16 |
+
|
17 |
+
from pandas.core.dtypes.common import is_list_like
|
18 |
+
from pandas.core.dtypes.dtypes import register_extension_dtype
|
19 |
+
from pandas.core.dtypes.missing import isna
|
20 |
+
|
21 |
+
from pandas.core import ops
|
22 |
+
from pandas.core.array_algos import masked_accumulations
|
23 |
+
from pandas.core.arrays.masked import (
|
24 |
+
BaseMaskedArray,
|
25 |
+
BaseMaskedDtype,
|
26 |
+
)
|
27 |
+
|
28 |
+
if TYPE_CHECKING:
|
29 |
+
import pyarrow
|
30 |
+
|
31 |
+
from pandas._typing import (
|
32 |
+
Dtype,
|
33 |
+
DtypeObj,
|
34 |
+
Self,
|
35 |
+
npt,
|
36 |
+
type_t,
|
37 |
+
)
|
38 |
+
|
39 |
+
|
40 |
+
@register_extension_dtype
|
41 |
+
class BooleanDtype(BaseMaskedDtype):
|
42 |
+
"""
|
43 |
+
Extension dtype for boolean data.
|
44 |
+
|
45 |
+
.. warning::
|
46 |
+
|
47 |
+
BooleanDtype is considered experimental. The implementation and
|
48 |
+
parts of the API may change without warning.
|
49 |
+
|
50 |
+
Attributes
|
51 |
+
----------
|
52 |
+
None
|
53 |
+
|
54 |
+
Methods
|
55 |
+
-------
|
56 |
+
None
|
57 |
+
|
58 |
+
Examples
|
59 |
+
--------
|
60 |
+
>>> pd.BooleanDtype()
|
61 |
+
BooleanDtype
|
62 |
+
"""
|
63 |
+
|
64 |
+
name: ClassVar[str] = "boolean"
|
65 |
+
|
66 |
+
# https://github.com/python/mypy/issues/4125
|
67 |
+
# error: Signature of "type" incompatible with supertype "BaseMaskedDtype"
|
68 |
+
@property
|
69 |
+
def type(self) -> type: # type: ignore[override]
|
70 |
+
return np.bool_
|
71 |
+
|
72 |
+
@property
|
73 |
+
def kind(self) -> str:
|
74 |
+
return "b"
|
75 |
+
|
76 |
+
@property
|
77 |
+
def numpy_dtype(self) -> np.dtype:
|
78 |
+
return np.dtype("bool")
|
79 |
+
|
80 |
+
@classmethod
|
81 |
+
def construct_array_type(cls) -> type_t[BooleanArray]:
|
82 |
+
"""
|
83 |
+
Return the array type associated with this dtype.
|
84 |
+
|
85 |
+
Returns
|
86 |
+
-------
|
87 |
+
type
|
88 |
+
"""
|
89 |
+
return BooleanArray
|
90 |
+
|
91 |
+
def __repr__(self) -> str:
|
92 |
+
return "BooleanDtype"
|
93 |
+
|
94 |
+
@property
|
95 |
+
def _is_boolean(self) -> bool:
|
96 |
+
return True
|
97 |
+
|
98 |
+
@property
|
99 |
+
def _is_numeric(self) -> bool:
|
100 |
+
return True
|
101 |
+
|
102 |
+
def __from_arrow__(
|
103 |
+
self, array: pyarrow.Array | pyarrow.ChunkedArray
|
104 |
+
) -> BooleanArray:
|
105 |
+
"""
|
106 |
+
Construct BooleanArray from pyarrow Array/ChunkedArray.
|
107 |
+
"""
|
108 |
+
import pyarrow
|
109 |
+
|
110 |
+
if array.type != pyarrow.bool_() and not pyarrow.types.is_null(array.type):
|
111 |
+
raise TypeError(f"Expected array of boolean type, got {array.type} instead")
|
112 |
+
|
113 |
+
if isinstance(array, pyarrow.Array):
|
114 |
+
chunks = [array]
|
115 |
+
length = len(array)
|
116 |
+
else:
|
117 |
+
# pyarrow.ChunkedArray
|
118 |
+
chunks = array.chunks
|
119 |
+
length = array.length()
|
120 |
+
|
121 |
+
if pyarrow.types.is_null(array.type):
|
122 |
+
mask = np.ones(length, dtype=bool)
|
123 |
+
# No need to init data, since all null
|
124 |
+
data = np.empty(length, dtype=bool)
|
125 |
+
return BooleanArray(data, mask)
|
126 |
+
|
127 |
+
results = []
|
128 |
+
for arr in chunks:
|
129 |
+
buflist = arr.buffers()
|
130 |
+
data = pyarrow.BooleanArray.from_buffers(
|
131 |
+
arr.type, len(arr), [None, buflist[1]], offset=arr.offset
|
132 |
+
).to_numpy(zero_copy_only=False)
|
133 |
+
if arr.null_count != 0:
|
134 |
+
mask = pyarrow.BooleanArray.from_buffers(
|
135 |
+
arr.type, len(arr), [None, buflist[0]], offset=arr.offset
|
136 |
+
).to_numpy(zero_copy_only=False)
|
137 |
+
mask = ~mask
|
138 |
+
else:
|
139 |
+
mask = np.zeros(len(arr), dtype=bool)
|
140 |
+
|
141 |
+
bool_arr = BooleanArray(data, mask)
|
142 |
+
results.append(bool_arr)
|
143 |
+
|
144 |
+
if not results:
|
145 |
+
return BooleanArray(
|
146 |
+
np.array([], dtype=np.bool_), np.array([], dtype=np.bool_)
|
147 |
+
)
|
148 |
+
else:
|
149 |
+
return BooleanArray._concat_same_type(results)
|
150 |
+
|
151 |
+
|
152 |
+
def coerce_to_array(
|
153 |
+
values, mask=None, copy: bool = False
|
154 |
+
) -> tuple[np.ndarray, np.ndarray]:
|
155 |
+
"""
|
156 |
+
Coerce the input values array to numpy arrays with a mask.
|
157 |
+
|
158 |
+
Parameters
|
159 |
+
----------
|
160 |
+
values : 1D list-like
|
161 |
+
mask : bool 1D array, optional
|
162 |
+
copy : bool, default False
|
163 |
+
if True, copy the input
|
164 |
+
|
165 |
+
Returns
|
166 |
+
-------
|
167 |
+
tuple of (values, mask)
|
168 |
+
"""
|
169 |
+
if isinstance(values, BooleanArray):
|
170 |
+
if mask is not None:
|
171 |
+
raise ValueError("cannot pass mask for BooleanArray input")
|
172 |
+
values, mask = values._data, values._mask
|
173 |
+
if copy:
|
174 |
+
values = values.copy()
|
175 |
+
mask = mask.copy()
|
176 |
+
return values, mask
|
177 |
+
|
178 |
+
mask_values = None
|
179 |
+
if isinstance(values, np.ndarray) and values.dtype == np.bool_:
|
180 |
+
if copy:
|
181 |
+
values = values.copy()
|
182 |
+
elif isinstance(values, np.ndarray) and values.dtype.kind in "iufcb":
|
183 |
+
mask_values = isna(values)
|
184 |
+
|
185 |
+
values_bool = np.zeros(len(values), dtype=bool)
|
186 |
+
values_bool[~mask_values] = values[~mask_values].astype(bool)
|
187 |
+
|
188 |
+
if not np.all(
|
189 |
+
values_bool[~mask_values].astype(values.dtype) == values[~mask_values]
|
190 |
+
):
|
191 |
+
raise TypeError("Need to pass bool-like values")
|
192 |
+
|
193 |
+
values = values_bool
|
194 |
+
else:
|
195 |
+
values_object = np.asarray(values, dtype=object)
|
196 |
+
|
197 |
+
inferred_dtype = lib.infer_dtype(values_object, skipna=True)
|
198 |
+
integer_like = ("floating", "integer", "mixed-integer-float")
|
199 |
+
if inferred_dtype not in ("boolean", "empty") + integer_like:
|
200 |
+
raise TypeError("Need to pass bool-like values")
|
201 |
+
|
202 |
+
# mypy does not narrow the type of mask_values to npt.NDArray[np.bool_]
|
203 |
+
# within this branch, it assumes it can also be None
|
204 |
+
mask_values = cast("npt.NDArray[np.bool_]", isna(values_object))
|
205 |
+
values = np.zeros(len(values), dtype=bool)
|
206 |
+
values[~mask_values] = values_object[~mask_values].astype(bool)
|
207 |
+
|
208 |
+
# if the values were integer-like, validate it were actually 0/1's
|
209 |
+
if (inferred_dtype in integer_like) and not (
|
210 |
+
np.all(
|
211 |
+
values[~mask_values].astype(float)
|
212 |
+
== values_object[~mask_values].astype(float)
|
213 |
+
)
|
214 |
+
):
|
215 |
+
raise TypeError("Need to pass bool-like values")
|
216 |
+
|
217 |
+
if mask is None and mask_values is None:
|
218 |
+
mask = np.zeros(values.shape, dtype=bool)
|
219 |
+
elif mask is None:
|
220 |
+
mask = mask_values
|
221 |
+
else:
|
222 |
+
if isinstance(mask, np.ndarray) and mask.dtype == np.bool_:
|
223 |
+
if mask_values is not None:
|
224 |
+
mask = mask | mask_values
|
225 |
+
else:
|
226 |
+
if copy:
|
227 |
+
mask = mask.copy()
|
228 |
+
else:
|
229 |
+
mask = np.array(mask, dtype=bool)
|
230 |
+
if mask_values is not None:
|
231 |
+
mask = mask | mask_values
|
232 |
+
|
233 |
+
if values.shape != mask.shape:
|
234 |
+
raise ValueError("values.shape and mask.shape must match")
|
235 |
+
|
236 |
+
return values, mask
|
237 |
+
|
238 |
+
|
239 |
+
class BooleanArray(BaseMaskedArray):
|
240 |
+
"""
|
241 |
+
Array of boolean (True/False) data with missing values.
|
242 |
+
|
243 |
+
This is a pandas Extension array for boolean data, under the hood
|
244 |
+
represented by 2 numpy arrays: a boolean array with the data and
|
245 |
+
a boolean array with the mask (True indicating missing).
|
246 |
+
|
247 |
+
BooleanArray implements Kleene logic (sometimes called three-value
|
248 |
+
logic) for logical operations. See :ref:`boolean.kleene` for more.
|
249 |
+
|
250 |
+
To construct an BooleanArray from generic array-like input, use
|
251 |
+
:func:`pandas.array` specifying ``dtype="boolean"`` (see examples
|
252 |
+
below).
|
253 |
+
|
254 |
+
.. warning::
|
255 |
+
|
256 |
+
BooleanArray is considered experimental. The implementation and
|
257 |
+
parts of the API may change without warning.
|
258 |
+
|
259 |
+
Parameters
|
260 |
+
----------
|
261 |
+
values : numpy.ndarray
|
262 |
+
A 1-d boolean-dtype array with the data.
|
263 |
+
mask : numpy.ndarray
|
264 |
+
A 1-d boolean-dtype array indicating missing values (True
|
265 |
+
indicates missing).
|
266 |
+
copy : bool, default False
|
267 |
+
Whether to copy the `values` and `mask` arrays.
|
268 |
+
|
269 |
+
Attributes
|
270 |
+
----------
|
271 |
+
None
|
272 |
+
|
273 |
+
Methods
|
274 |
+
-------
|
275 |
+
None
|
276 |
+
|
277 |
+
Returns
|
278 |
+
-------
|
279 |
+
BooleanArray
|
280 |
+
|
281 |
+
Examples
|
282 |
+
--------
|
283 |
+
Create an BooleanArray with :func:`pandas.array`:
|
284 |
+
|
285 |
+
>>> pd.array([True, False, None], dtype="boolean")
|
286 |
+
<BooleanArray>
|
287 |
+
[True, False, <NA>]
|
288 |
+
Length: 3, dtype: boolean
|
289 |
+
"""
|
290 |
+
|
291 |
+
# The value used to fill '_data' to avoid upcasting
|
292 |
+
_internal_fill_value = False
|
293 |
+
# Fill values used for any/all
|
294 |
+
# Incompatible types in assignment (expression has type "bool", base class
|
295 |
+
# "BaseMaskedArray" defined the type as "<typing special form>")
|
296 |
+
_truthy_value = True # type: ignore[assignment]
|
297 |
+
_falsey_value = False # type: ignore[assignment]
|
298 |
+
_TRUE_VALUES = {"True", "TRUE", "true", "1", "1.0"}
|
299 |
+
_FALSE_VALUES = {"False", "FALSE", "false", "0", "0.0"}
|
300 |
+
|
301 |
+
@classmethod
|
302 |
+
def _simple_new(cls, values: np.ndarray, mask: npt.NDArray[np.bool_]) -> Self:
|
303 |
+
result = super()._simple_new(values, mask)
|
304 |
+
result._dtype = BooleanDtype()
|
305 |
+
return result
|
306 |
+
|
307 |
+
def __init__(
|
308 |
+
self, values: np.ndarray, mask: np.ndarray, copy: bool = False
|
309 |
+
) -> None:
|
310 |
+
if not (isinstance(values, np.ndarray) and values.dtype == np.bool_):
|
311 |
+
raise TypeError(
|
312 |
+
"values should be boolean numpy array. Use "
|
313 |
+
"the 'pd.array' function instead"
|
314 |
+
)
|
315 |
+
self._dtype = BooleanDtype()
|
316 |
+
super().__init__(values, mask, copy=copy)
|
317 |
+
|
318 |
+
@property
|
319 |
+
def dtype(self) -> BooleanDtype:
|
320 |
+
return self._dtype
|
321 |
+
|
322 |
+
@classmethod
|
323 |
+
def _from_sequence_of_strings(
|
324 |
+
cls,
|
325 |
+
strings: list[str],
|
326 |
+
*,
|
327 |
+
dtype: Dtype | None = None,
|
328 |
+
copy: bool = False,
|
329 |
+
true_values: list[str] | None = None,
|
330 |
+
false_values: list[str] | None = None,
|
331 |
+
) -> BooleanArray:
|
332 |
+
true_values_union = cls._TRUE_VALUES.union(true_values or [])
|
333 |
+
false_values_union = cls._FALSE_VALUES.union(false_values or [])
|
334 |
+
|
335 |
+
def map_string(s) -> bool:
|
336 |
+
if s in true_values_union:
|
337 |
+
return True
|
338 |
+
elif s in false_values_union:
|
339 |
+
return False
|
340 |
+
else:
|
341 |
+
raise ValueError(f"{s} cannot be cast to bool")
|
342 |
+
|
343 |
+
scalars = np.array(strings, dtype=object)
|
344 |
+
mask = isna(scalars)
|
345 |
+
scalars[~mask] = list(map(map_string, scalars[~mask]))
|
346 |
+
return cls._from_sequence(scalars, dtype=dtype, copy=copy)
|
347 |
+
|
348 |
+
_HANDLED_TYPES = (np.ndarray, numbers.Number, bool, np.bool_)
|
349 |
+
|
350 |
+
@classmethod
|
351 |
+
def _coerce_to_array(
|
352 |
+
cls, value, *, dtype: DtypeObj, copy: bool = False
|
353 |
+
) -> tuple[np.ndarray, np.ndarray]:
|
354 |
+
if dtype:
|
355 |
+
assert dtype == "boolean"
|
356 |
+
return coerce_to_array(value, copy=copy)
|
357 |
+
|
358 |
+
def _logical_method(self, other, op):
|
359 |
+
assert op.__name__ in {"or_", "ror_", "and_", "rand_", "xor", "rxor"}
|
360 |
+
other_is_scalar = lib.is_scalar(other)
|
361 |
+
mask = None
|
362 |
+
|
363 |
+
if isinstance(other, BooleanArray):
|
364 |
+
other, mask = other._data, other._mask
|
365 |
+
elif is_list_like(other):
|
366 |
+
other = np.asarray(other, dtype="bool")
|
367 |
+
if other.ndim > 1:
|
368 |
+
raise NotImplementedError("can only perform ops with 1-d structures")
|
369 |
+
other, mask = coerce_to_array(other, copy=False)
|
370 |
+
elif isinstance(other, np.bool_):
|
371 |
+
other = other.item()
|
372 |
+
|
373 |
+
if other_is_scalar and other is not libmissing.NA and not lib.is_bool(other):
|
374 |
+
raise TypeError(
|
375 |
+
"'other' should be pandas.NA or a bool. "
|
376 |
+
f"Got {type(other).__name__} instead."
|
377 |
+
)
|
378 |
+
|
379 |
+
if not other_is_scalar and len(self) != len(other):
|
380 |
+
raise ValueError("Lengths must match")
|
381 |
+
|
382 |
+
if op.__name__ in {"or_", "ror_"}:
|
383 |
+
result, mask = ops.kleene_or(self._data, other, self._mask, mask)
|
384 |
+
elif op.__name__ in {"and_", "rand_"}:
|
385 |
+
result, mask = ops.kleene_and(self._data, other, self._mask, mask)
|
386 |
+
else:
|
387 |
+
# i.e. xor, rxor
|
388 |
+
result, mask = ops.kleene_xor(self._data, other, self._mask, mask)
|
389 |
+
|
390 |
+
# i.e. BooleanArray
|
391 |
+
return self._maybe_mask_result(result, mask)
|
392 |
+
|
393 |
+
def _accumulate(
|
394 |
+
self, name: str, *, skipna: bool = True, **kwargs
|
395 |
+
) -> BaseMaskedArray:
|
396 |
+
data = self._data
|
397 |
+
mask = self._mask
|
398 |
+
if name in ("cummin", "cummax"):
|
399 |
+
op = getattr(masked_accumulations, name)
|
400 |
+
data, mask = op(data, mask, skipna=skipna, **kwargs)
|
401 |
+
return self._simple_new(data, mask)
|
402 |
+
else:
|
403 |
+
from pandas.core.arrays import IntegerArray
|
404 |
+
|
405 |
+
return IntegerArray(data.astype(int), mask)._accumulate(
|
406 |
+
name, skipna=skipna, **kwargs
|
407 |
+
)
|
env-llmeval/lib/python3.10/site-packages/pandas/core/arrays/categorical.py
ADDED
The diff for this file is too large to render.
See raw diff
|
|
env-llmeval/lib/python3.10/site-packages/pandas/core/arrays/datetimelike.py
ADDED
@@ -0,0 +1,2556 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from __future__ import annotations
|
2 |
+
|
3 |
+
from datetime import (
|
4 |
+
datetime,
|
5 |
+
timedelta,
|
6 |
+
)
|
7 |
+
from functools import wraps
|
8 |
+
import operator
|
9 |
+
from typing import (
|
10 |
+
TYPE_CHECKING,
|
11 |
+
Any,
|
12 |
+
Callable,
|
13 |
+
Literal,
|
14 |
+
Union,
|
15 |
+
cast,
|
16 |
+
final,
|
17 |
+
overload,
|
18 |
+
)
|
19 |
+
import warnings
|
20 |
+
|
21 |
+
import numpy as np
|
22 |
+
|
23 |
+
from pandas._libs import (
|
24 |
+
algos,
|
25 |
+
lib,
|
26 |
+
)
|
27 |
+
from pandas._libs.arrays import NDArrayBacked
|
28 |
+
from pandas._libs.tslibs import (
|
29 |
+
BaseOffset,
|
30 |
+
IncompatibleFrequency,
|
31 |
+
NaT,
|
32 |
+
NaTType,
|
33 |
+
Period,
|
34 |
+
Resolution,
|
35 |
+
Tick,
|
36 |
+
Timedelta,
|
37 |
+
Timestamp,
|
38 |
+
add_overflowsafe,
|
39 |
+
astype_overflowsafe,
|
40 |
+
get_unit_from_dtype,
|
41 |
+
iNaT,
|
42 |
+
ints_to_pydatetime,
|
43 |
+
ints_to_pytimedelta,
|
44 |
+
periods_per_day,
|
45 |
+
to_offset,
|
46 |
+
)
|
47 |
+
from pandas._libs.tslibs.fields import (
|
48 |
+
RoundTo,
|
49 |
+
round_nsint64,
|
50 |
+
)
|
51 |
+
from pandas._libs.tslibs.np_datetime import compare_mismatched_resolutions
|
52 |
+
from pandas._libs.tslibs.timedeltas import get_unit_for_round
|
53 |
+
from pandas._libs.tslibs.timestamps import integer_op_not_supported
|
54 |
+
from pandas._typing import (
|
55 |
+
ArrayLike,
|
56 |
+
AxisInt,
|
57 |
+
DatetimeLikeScalar,
|
58 |
+
Dtype,
|
59 |
+
DtypeObj,
|
60 |
+
F,
|
61 |
+
InterpolateOptions,
|
62 |
+
NpDtype,
|
63 |
+
PositionalIndexer2D,
|
64 |
+
PositionalIndexerTuple,
|
65 |
+
ScalarIndexer,
|
66 |
+
Self,
|
67 |
+
SequenceIndexer,
|
68 |
+
TimeAmbiguous,
|
69 |
+
TimeNonexistent,
|
70 |
+
npt,
|
71 |
+
)
|
72 |
+
from pandas.compat.numpy import function as nv
|
73 |
+
from pandas.errors import (
|
74 |
+
AbstractMethodError,
|
75 |
+
InvalidComparison,
|
76 |
+
PerformanceWarning,
|
77 |
+
)
|
78 |
+
from pandas.util._decorators import (
|
79 |
+
Appender,
|
80 |
+
Substitution,
|
81 |
+
cache_readonly,
|
82 |
+
)
|
83 |
+
from pandas.util._exceptions import find_stack_level
|
84 |
+
|
85 |
+
from pandas.core.dtypes.cast import construct_1d_object_array_from_listlike
|
86 |
+
from pandas.core.dtypes.common import (
|
87 |
+
is_all_strings,
|
88 |
+
is_integer_dtype,
|
89 |
+
is_list_like,
|
90 |
+
is_object_dtype,
|
91 |
+
is_string_dtype,
|
92 |
+
pandas_dtype,
|
93 |
+
)
|
94 |
+
from pandas.core.dtypes.dtypes import (
|
95 |
+
ArrowDtype,
|
96 |
+
CategoricalDtype,
|
97 |
+
DatetimeTZDtype,
|
98 |
+
ExtensionDtype,
|
99 |
+
PeriodDtype,
|
100 |
+
)
|
101 |
+
from pandas.core.dtypes.generic import (
|
102 |
+
ABCCategorical,
|
103 |
+
ABCMultiIndex,
|
104 |
+
)
|
105 |
+
from pandas.core.dtypes.missing import (
|
106 |
+
is_valid_na_for_dtype,
|
107 |
+
isna,
|
108 |
+
)
|
109 |
+
|
110 |
+
from pandas.core import (
|
111 |
+
algorithms,
|
112 |
+
missing,
|
113 |
+
nanops,
|
114 |
+
ops,
|
115 |
+
)
|
116 |
+
from pandas.core.algorithms import (
|
117 |
+
isin,
|
118 |
+
map_array,
|
119 |
+
unique1d,
|
120 |
+
)
|
121 |
+
from pandas.core.array_algos import datetimelike_accumulations
|
122 |
+
from pandas.core.arraylike import OpsMixin
|
123 |
+
from pandas.core.arrays._mixins import (
|
124 |
+
NDArrayBackedExtensionArray,
|
125 |
+
ravel_compat,
|
126 |
+
)
|
127 |
+
from pandas.core.arrays.arrow.array import ArrowExtensionArray
|
128 |
+
from pandas.core.arrays.base import ExtensionArray
|
129 |
+
from pandas.core.arrays.integer import IntegerArray
|
130 |
+
import pandas.core.common as com
|
131 |
+
from pandas.core.construction import (
|
132 |
+
array as pd_array,
|
133 |
+
ensure_wrapped_if_datetimelike,
|
134 |
+
extract_array,
|
135 |
+
)
|
136 |
+
from pandas.core.indexers import (
|
137 |
+
check_array_indexer,
|
138 |
+
check_setitem_lengths,
|
139 |
+
)
|
140 |
+
from pandas.core.ops.common import unpack_zerodim_and_defer
|
141 |
+
from pandas.core.ops.invalid import (
|
142 |
+
invalid_comparison,
|
143 |
+
make_invalid_op,
|
144 |
+
)
|
145 |
+
|
146 |
+
from pandas.tseries import frequencies
|
147 |
+
|
148 |
+
if TYPE_CHECKING:
|
149 |
+
from collections.abc import (
|
150 |
+
Iterator,
|
151 |
+
Sequence,
|
152 |
+
)
|
153 |
+
|
154 |
+
from pandas import Index
|
155 |
+
from pandas.core.arrays import (
|
156 |
+
DatetimeArray,
|
157 |
+
PeriodArray,
|
158 |
+
TimedeltaArray,
|
159 |
+
)
|
160 |
+
|
161 |
+
DTScalarOrNaT = Union[DatetimeLikeScalar, NaTType]
|
162 |
+
|
163 |
+
|
164 |
+
def _make_unpacked_invalid_op(op_name: str):
|
165 |
+
op = make_invalid_op(op_name)
|
166 |
+
return unpack_zerodim_and_defer(op_name)(op)
|
167 |
+
|
168 |
+
|
169 |
+
def _period_dispatch(meth: F) -> F:
|
170 |
+
"""
|
171 |
+
For PeriodArray methods, dispatch to DatetimeArray and re-wrap the results
|
172 |
+
in PeriodArray. We cannot use ._ndarray directly for the affected
|
173 |
+
methods because the i8 data has different semantics on NaT values.
|
174 |
+
"""
|
175 |
+
|
176 |
+
@wraps(meth)
|
177 |
+
def new_meth(self, *args, **kwargs):
|
178 |
+
if not isinstance(self.dtype, PeriodDtype):
|
179 |
+
return meth(self, *args, **kwargs)
|
180 |
+
|
181 |
+
arr = self.view("M8[ns]")
|
182 |
+
result = meth(arr, *args, **kwargs)
|
183 |
+
if result is NaT:
|
184 |
+
return NaT
|
185 |
+
elif isinstance(result, Timestamp):
|
186 |
+
return self._box_func(result._value)
|
187 |
+
|
188 |
+
res_i8 = result.view("i8")
|
189 |
+
return self._from_backing_data(res_i8)
|
190 |
+
|
191 |
+
return cast(F, new_meth)
|
192 |
+
|
193 |
+
|
194 |
+
# error: Definition of "_concat_same_type" in base class "NDArrayBacked" is
|
195 |
+
# incompatible with definition in base class "ExtensionArray"
|
196 |
+
class DatetimeLikeArrayMixin( # type: ignore[misc]
|
197 |
+
OpsMixin, NDArrayBackedExtensionArray
|
198 |
+
):
|
199 |
+
"""
|
200 |
+
Shared Base/Mixin class for DatetimeArray, TimedeltaArray, PeriodArray
|
201 |
+
|
202 |
+
Assumes that __new__/__init__ defines:
|
203 |
+
_ndarray
|
204 |
+
|
205 |
+
and that inheriting subclass implements:
|
206 |
+
freq
|
207 |
+
"""
|
208 |
+
|
209 |
+
# _infer_matches -> which infer_dtype strings are close enough to our own
|
210 |
+
_infer_matches: tuple[str, ...]
|
211 |
+
_is_recognized_dtype: Callable[[DtypeObj], bool]
|
212 |
+
_recognized_scalars: tuple[type, ...]
|
213 |
+
_ndarray: np.ndarray
|
214 |
+
freq: BaseOffset | None
|
215 |
+
|
216 |
+
@cache_readonly
|
217 |
+
def _can_hold_na(self) -> bool:
|
218 |
+
return True
|
219 |
+
|
220 |
+
def __init__(
|
221 |
+
self, data, dtype: Dtype | None = None, freq=None, copy: bool = False
|
222 |
+
) -> None:
|
223 |
+
raise AbstractMethodError(self)
|
224 |
+
|
225 |
+
@property
|
226 |
+
def _scalar_type(self) -> type[DatetimeLikeScalar]:
|
227 |
+
"""
|
228 |
+
The scalar associated with this datelike
|
229 |
+
|
230 |
+
* PeriodArray : Period
|
231 |
+
* DatetimeArray : Timestamp
|
232 |
+
* TimedeltaArray : Timedelta
|
233 |
+
"""
|
234 |
+
raise AbstractMethodError(self)
|
235 |
+
|
236 |
+
def _scalar_from_string(self, value: str) -> DTScalarOrNaT:
|
237 |
+
"""
|
238 |
+
Construct a scalar type from a string.
|
239 |
+
|
240 |
+
Parameters
|
241 |
+
----------
|
242 |
+
value : str
|
243 |
+
|
244 |
+
Returns
|
245 |
+
-------
|
246 |
+
Period, Timestamp, or Timedelta, or NaT
|
247 |
+
Whatever the type of ``self._scalar_type`` is.
|
248 |
+
|
249 |
+
Notes
|
250 |
+
-----
|
251 |
+
This should call ``self._check_compatible_with`` before
|
252 |
+
unboxing the result.
|
253 |
+
"""
|
254 |
+
raise AbstractMethodError(self)
|
255 |
+
|
256 |
+
def _unbox_scalar(
|
257 |
+
self, value: DTScalarOrNaT
|
258 |
+
) -> np.int64 | np.datetime64 | np.timedelta64:
|
259 |
+
"""
|
260 |
+
Unbox the integer value of a scalar `value`.
|
261 |
+
|
262 |
+
Parameters
|
263 |
+
----------
|
264 |
+
value : Period, Timestamp, Timedelta, or NaT
|
265 |
+
Depending on subclass.
|
266 |
+
|
267 |
+
Returns
|
268 |
+
-------
|
269 |
+
int
|
270 |
+
|
271 |
+
Examples
|
272 |
+
--------
|
273 |
+
>>> arr = pd.array(np.array(['1970-01-01'], 'datetime64[ns]'))
|
274 |
+
>>> arr._unbox_scalar(arr[0])
|
275 |
+
numpy.datetime64('1970-01-01T00:00:00.000000000')
|
276 |
+
"""
|
277 |
+
raise AbstractMethodError(self)
|
278 |
+
|
279 |
+
def _check_compatible_with(self, other: DTScalarOrNaT) -> None:
|
280 |
+
"""
|
281 |
+
Verify that `self` and `other` are compatible.
|
282 |
+
|
283 |
+
* DatetimeArray verifies that the timezones (if any) match
|
284 |
+
* PeriodArray verifies that the freq matches
|
285 |
+
* Timedelta has no verification
|
286 |
+
|
287 |
+
In each case, NaT is considered compatible.
|
288 |
+
|
289 |
+
Parameters
|
290 |
+
----------
|
291 |
+
other
|
292 |
+
|
293 |
+
Raises
|
294 |
+
------
|
295 |
+
Exception
|
296 |
+
"""
|
297 |
+
raise AbstractMethodError(self)
|
298 |
+
|
299 |
+
# ------------------------------------------------------------------
|
300 |
+
|
301 |
+
def _box_func(self, x):
|
302 |
+
"""
|
303 |
+
box function to get object from internal representation
|
304 |
+
"""
|
305 |
+
raise AbstractMethodError(self)
|
306 |
+
|
307 |
+
def _box_values(self, values) -> np.ndarray:
|
308 |
+
"""
|
309 |
+
apply box func to passed values
|
310 |
+
"""
|
311 |
+
return lib.map_infer(values, self._box_func, convert=False)
|
312 |
+
|
313 |
+
def __iter__(self) -> Iterator:
|
314 |
+
if self.ndim > 1:
|
315 |
+
return (self[n] for n in range(len(self)))
|
316 |
+
else:
|
317 |
+
return (self._box_func(v) for v in self.asi8)
|
318 |
+
|
319 |
+
@property
|
320 |
+
def asi8(self) -> npt.NDArray[np.int64]:
|
321 |
+
"""
|
322 |
+
Integer representation of the values.
|
323 |
+
|
324 |
+
Returns
|
325 |
+
-------
|
326 |
+
ndarray
|
327 |
+
An ndarray with int64 dtype.
|
328 |
+
"""
|
329 |
+
# do not cache or you'll create a memory leak
|
330 |
+
return self._ndarray.view("i8")
|
331 |
+
|
332 |
+
# ----------------------------------------------------------------
|
333 |
+
# Rendering Methods
|
334 |
+
|
335 |
+
def _format_native_types(
|
336 |
+
self, *, na_rep: str | float = "NaT", date_format=None
|
337 |
+
) -> npt.NDArray[np.object_]:
|
338 |
+
"""
|
339 |
+
Helper method for astype when converting to strings.
|
340 |
+
|
341 |
+
Returns
|
342 |
+
-------
|
343 |
+
ndarray[str]
|
344 |
+
"""
|
345 |
+
raise AbstractMethodError(self)
|
346 |
+
|
347 |
+
def _formatter(self, boxed: bool = False):
|
348 |
+
# TODO: Remove Datetime & DatetimeTZ formatters.
|
349 |
+
return "'{}'".format
|
350 |
+
|
351 |
+
# ----------------------------------------------------------------
|
352 |
+
# Array-Like / EA-Interface Methods
|
353 |
+
|
354 |
+
def __array__(
|
355 |
+
self, dtype: NpDtype | None = None, copy: bool | None = None
|
356 |
+
) -> np.ndarray:
|
357 |
+
# used for Timedelta/DatetimeArray, overwritten by PeriodArray
|
358 |
+
if is_object_dtype(dtype):
|
359 |
+
return np.array(list(self), dtype=object)
|
360 |
+
return self._ndarray
|
361 |
+
|
362 |
+
@overload
|
363 |
+
def __getitem__(self, item: ScalarIndexer) -> DTScalarOrNaT:
|
364 |
+
...
|
365 |
+
|
366 |
+
@overload
|
367 |
+
def __getitem__(
|
368 |
+
self,
|
369 |
+
item: SequenceIndexer | PositionalIndexerTuple,
|
370 |
+
) -> Self:
|
371 |
+
...
|
372 |
+
|
373 |
+
def __getitem__(self, key: PositionalIndexer2D) -> Self | DTScalarOrNaT:
|
374 |
+
"""
|
375 |
+
This getitem defers to the underlying array, which by-definition can
|
376 |
+
only handle list-likes, slices, and integer scalars
|
377 |
+
"""
|
378 |
+
# Use cast as we know we will get back a DatetimeLikeArray or DTScalar,
|
379 |
+
# but skip evaluating the Union at runtime for performance
|
380 |
+
# (see https://github.com/pandas-dev/pandas/pull/44624)
|
381 |
+
result = cast("Union[Self, DTScalarOrNaT]", super().__getitem__(key))
|
382 |
+
if lib.is_scalar(result):
|
383 |
+
return result
|
384 |
+
else:
|
385 |
+
# At this point we know the result is an array.
|
386 |
+
result = cast(Self, result)
|
387 |
+
result._freq = self._get_getitem_freq(key)
|
388 |
+
return result
|
389 |
+
|
390 |
+
def _get_getitem_freq(self, key) -> BaseOffset | None:
|
391 |
+
"""
|
392 |
+
Find the `freq` attribute to assign to the result of a __getitem__ lookup.
|
393 |
+
"""
|
394 |
+
is_period = isinstance(self.dtype, PeriodDtype)
|
395 |
+
if is_period:
|
396 |
+
freq = self.freq
|
397 |
+
elif self.ndim != 1:
|
398 |
+
freq = None
|
399 |
+
else:
|
400 |
+
key = check_array_indexer(self, key) # maybe ndarray[bool] -> slice
|
401 |
+
freq = None
|
402 |
+
if isinstance(key, slice):
|
403 |
+
if self.freq is not None and key.step is not None:
|
404 |
+
freq = key.step * self.freq
|
405 |
+
else:
|
406 |
+
freq = self.freq
|
407 |
+
elif key is Ellipsis:
|
408 |
+
# GH#21282 indexing with Ellipsis is similar to a full slice,
|
409 |
+
# should preserve `freq` attribute
|
410 |
+
freq = self.freq
|
411 |
+
elif com.is_bool_indexer(key):
|
412 |
+
new_key = lib.maybe_booleans_to_slice(key.view(np.uint8))
|
413 |
+
if isinstance(new_key, slice):
|
414 |
+
return self._get_getitem_freq(new_key)
|
415 |
+
return freq
|
416 |
+
|
417 |
+
# error: Argument 1 of "__setitem__" is incompatible with supertype
|
418 |
+
# "ExtensionArray"; supertype defines the argument type as "Union[int,
|
419 |
+
# ndarray]"
|
420 |
+
def __setitem__(
|
421 |
+
self,
|
422 |
+
key: int | Sequence[int] | Sequence[bool] | slice,
|
423 |
+
value: NaTType | Any | Sequence[Any],
|
424 |
+
) -> None:
|
425 |
+
# I'm fudging the types a bit here. "Any" above really depends
|
426 |
+
# on type(self). For PeriodArray, it's Period (or stuff coercible
|
427 |
+
# to a period in from_sequence). For DatetimeArray, it's Timestamp...
|
428 |
+
# I don't know if mypy can do that, possibly with Generics.
|
429 |
+
# https://mypy.readthedocs.io/en/latest/generics.html
|
430 |
+
|
431 |
+
no_op = check_setitem_lengths(key, value, self)
|
432 |
+
|
433 |
+
# Calling super() before the no_op short-circuit means that we raise
|
434 |
+
# on invalid 'value' even if this is a no-op, e.g. wrong-dtype empty array.
|
435 |
+
super().__setitem__(key, value)
|
436 |
+
|
437 |
+
if no_op:
|
438 |
+
return
|
439 |
+
|
440 |
+
self._maybe_clear_freq()
|
441 |
+
|
442 |
+
def _maybe_clear_freq(self) -> None:
|
443 |
+
# inplace operations like __setitem__ may invalidate the freq of
|
444 |
+
# DatetimeArray and TimedeltaArray
|
445 |
+
pass
|
446 |
+
|
447 |
+
def astype(self, dtype, copy: bool = True):
|
448 |
+
# Some notes on cases we don't have to handle here in the base class:
|
449 |
+
# 1. PeriodArray.astype handles period -> period
|
450 |
+
# 2. DatetimeArray.astype handles conversion between tz.
|
451 |
+
# 3. DatetimeArray.astype handles datetime -> period
|
452 |
+
dtype = pandas_dtype(dtype)
|
453 |
+
|
454 |
+
if dtype == object:
|
455 |
+
if self.dtype.kind == "M":
|
456 |
+
self = cast("DatetimeArray", self)
|
457 |
+
# *much* faster than self._box_values
|
458 |
+
# for e.g. test_get_loc_tuple_monotonic_above_size_cutoff
|
459 |
+
i8data = self.asi8
|
460 |
+
converted = ints_to_pydatetime(
|
461 |
+
i8data,
|
462 |
+
tz=self.tz,
|
463 |
+
box="timestamp",
|
464 |
+
reso=self._creso,
|
465 |
+
)
|
466 |
+
return converted
|
467 |
+
|
468 |
+
elif self.dtype.kind == "m":
|
469 |
+
return ints_to_pytimedelta(self._ndarray, box=True)
|
470 |
+
|
471 |
+
return self._box_values(self.asi8.ravel()).reshape(self.shape)
|
472 |
+
|
473 |
+
elif isinstance(dtype, ExtensionDtype):
|
474 |
+
return super().astype(dtype, copy=copy)
|
475 |
+
elif is_string_dtype(dtype):
|
476 |
+
return self._format_native_types()
|
477 |
+
elif dtype.kind in "iu":
|
478 |
+
# we deliberately ignore int32 vs. int64 here.
|
479 |
+
# See https://github.com/pandas-dev/pandas/issues/24381 for more.
|
480 |
+
values = self.asi8
|
481 |
+
if dtype != np.int64:
|
482 |
+
raise TypeError(
|
483 |
+
f"Converting from {self.dtype} to {dtype} is not supported. "
|
484 |
+
"Do obj.astype('int64').astype(dtype) instead"
|
485 |
+
)
|
486 |
+
|
487 |
+
if copy:
|
488 |
+
values = values.copy()
|
489 |
+
return values
|
490 |
+
elif (dtype.kind in "mM" and self.dtype != dtype) or dtype.kind == "f":
|
491 |
+
# disallow conversion between datetime/timedelta,
|
492 |
+
# and conversions for any datetimelike to float
|
493 |
+
msg = f"Cannot cast {type(self).__name__} to dtype {dtype}"
|
494 |
+
raise TypeError(msg)
|
495 |
+
else:
|
496 |
+
return np.asarray(self, dtype=dtype)
|
497 |
+
|
498 |
+
@overload
|
499 |
+
def view(self) -> Self:
|
500 |
+
...
|
501 |
+
|
502 |
+
@overload
|
503 |
+
def view(self, dtype: Literal["M8[ns]"]) -> DatetimeArray:
|
504 |
+
...
|
505 |
+
|
506 |
+
@overload
|
507 |
+
def view(self, dtype: Literal["m8[ns]"]) -> TimedeltaArray:
|
508 |
+
...
|
509 |
+
|
510 |
+
@overload
|
511 |
+
def view(self, dtype: Dtype | None = ...) -> ArrayLike:
|
512 |
+
...
|
513 |
+
|
514 |
+
# pylint: disable-next=useless-parent-delegation
|
515 |
+
def view(self, dtype: Dtype | None = None) -> ArrayLike:
|
516 |
+
# we need to explicitly call super() method as long as the `@overload`s
|
517 |
+
# are present in this file.
|
518 |
+
return super().view(dtype)
|
519 |
+
|
520 |
+
# ------------------------------------------------------------------
|
521 |
+
# Validation Methods
|
522 |
+
# TODO: try to de-duplicate these, ensure identical behavior
|
523 |
+
|
524 |
+
def _validate_comparison_value(self, other):
|
525 |
+
if isinstance(other, str):
|
526 |
+
try:
|
527 |
+
# GH#18435 strings get a pass from tzawareness compat
|
528 |
+
other = self._scalar_from_string(other)
|
529 |
+
except (ValueError, IncompatibleFrequency):
|
530 |
+
# failed to parse as Timestamp/Timedelta/Period
|
531 |
+
raise InvalidComparison(other)
|
532 |
+
|
533 |
+
if isinstance(other, self._recognized_scalars) or other is NaT:
|
534 |
+
other = self._scalar_type(other)
|
535 |
+
try:
|
536 |
+
self._check_compatible_with(other)
|
537 |
+
except (TypeError, IncompatibleFrequency) as err:
|
538 |
+
# e.g. tzawareness mismatch
|
539 |
+
raise InvalidComparison(other) from err
|
540 |
+
|
541 |
+
elif not is_list_like(other):
|
542 |
+
raise InvalidComparison(other)
|
543 |
+
|
544 |
+
elif len(other) != len(self):
|
545 |
+
raise ValueError("Lengths must match")
|
546 |
+
|
547 |
+
else:
|
548 |
+
try:
|
549 |
+
other = self._validate_listlike(other, allow_object=True)
|
550 |
+
self._check_compatible_with(other)
|
551 |
+
except (TypeError, IncompatibleFrequency) as err:
|
552 |
+
if is_object_dtype(getattr(other, "dtype", None)):
|
553 |
+
# We will have to operate element-wise
|
554 |
+
pass
|
555 |
+
else:
|
556 |
+
raise InvalidComparison(other) from err
|
557 |
+
|
558 |
+
return other
|
559 |
+
|
560 |
+
def _validate_scalar(
|
561 |
+
self,
|
562 |
+
value,
|
563 |
+
*,
|
564 |
+
allow_listlike: bool = False,
|
565 |
+
unbox: bool = True,
|
566 |
+
):
|
567 |
+
"""
|
568 |
+
Validate that the input value can be cast to our scalar_type.
|
569 |
+
|
570 |
+
Parameters
|
571 |
+
----------
|
572 |
+
value : object
|
573 |
+
allow_listlike: bool, default False
|
574 |
+
When raising an exception, whether the message should say
|
575 |
+
listlike inputs are allowed.
|
576 |
+
unbox : bool, default True
|
577 |
+
Whether to unbox the result before returning. Note: unbox=False
|
578 |
+
skips the setitem compatibility check.
|
579 |
+
|
580 |
+
Returns
|
581 |
+
-------
|
582 |
+
self._scalar_type or NaT
|
583 |
+
"""
|
584 |
+
if isinstance(value, self._scalar_type):
|
585 |
+
pass
|
586 |
+
|
587 |
+
elif isinstance(value, str):
|
588 |
+
# NB: Careful about tzawareness
|
589 |
+
try:
|
590 |
+
value = self._scalar_from_string(value)
|
591 |
+
except ValueError as err:
|
592 |
+
msg = self._validation_error_message(value, allow_listlike)
|
593 |
+
raise TypeError(msg) from err
|
594 |
+
|
595 |
+
elif is_valid_na_for_dtype(value, self.dtype):
|
596 |
+
# GH#18295
|
597 |
+
value = NaT
|
598 |
+
|
599 |
+
elif isna(value):
|
600 |
+
# if we are dt64tz and value is dt64("NaT"), dont cast to NaT,
|
601 |
+
# or else we'll fail to raise in _unbox_scalar
|
602 |
+
msg = self._validation_error_message(value, allow_listlike)
|
603 |
+
raise TypeError(msg)
|
604 |
+
|
605 |
+
elif isinstance(value, self._recognized_scalars):
|
606 |
+
# error: Argument 1 to "Timestamp" has incompatible type "object"; expected
|
607 |
+
# "integer[Any] | float | str | date | datetime | datetime64"
|
608 |
+
value = self._scalar_type(value) # type: ignore[arg-type]
|
609 |
+
|
610 |
+
else:
|
611 |
+
msg = self._validation_error_message(value, allow_listlike)
|
612 |
+
raise TypeError(msg)
|
613 |
+
|
614 |
+
if not unbox:
|
615 |
+
# NB: In general NDArrayBackedExtensionArray will unbox here;
|
616 |
+
# this option exists to prevent a performance hit in
|
617 |
+
# TimedeltaIndex.get_loc
|
618 |
+
return value
|
619 |
+
return self._unbox_scalar(value)
|
620 |
+
|
621 |
+
def _validation_error_message(self, value, allow_listlike: bool = False) -> str:
|
622 |
+
"""
|
623 |
+
Construct an exception message on validation error.
|
624 |
+
|
625 |
+
Some methods allow only scalar inputs, while others allow either scalar
|
626 |
+
or listlike.
|
627 |
+
|
628 |
+
Parameters
|
629 |
+
----------
|
630 |
+
allow_listlike: bool, default False
|
631 |
+
|
632 |
+
Returns
|
633 |
+
-------
|
634 |
+
str
|
635 |
+
"""
|
636 |
+
if hasattr(value, "dtype") and getattr(value, "ndim", 0) > 0:
|
637 |
+
msg_got = f"{value.dtype} array"
|
638 |
+
else:
|
639 |
+
msg_got = f"'{type(value).__name__}'"
|
640 |
+
if allow_listlike:
|
641 |
+
msg = (
|
642 |
+
f"value should be a '{self._scalar_type.__name__}', 'NaT', "
|
643 |
+
f"or array of those. Got {msg_got} instead."
|
644 |
+
)
|
645 |
+
else:
|
646 |
+
msg = (
|
647 |
+
f"value should be a '{self._scalar_type.__name__}' or 'NaT'. "
|
648 |
+
f"Got {msg_got} instead."
|
649 |
+
)
|
650 |
+
return msg
|
651 |
+
|
652 |
+
def _validate_listlike(self, value, allow_object: bool = False):
|
653 |
+
if isinstance(value, type(self)):
|
654 |
+
if self.dtype.kind in "mM" and not allow_object:
|
655 |
+
# error: "DatetimeLikeArrayMixin" has no attribute "as_unit"
|
656 |
+
value = value.as_unit(self.unit, round_ok=False) # type: ignore[attr-defined]
|
657 |
+
return value
|
658 |
+
|
659 |
+
if isinstance(value, list) and len(value) == 0:
|
660 |
+
# We treat empty list as our own dtype.
|
661 |
+
return type(self)._from_sequence([], dtype=self.dtype)
|
662 |
+
|
663 |
+
if hasattr(value, "dtype") and value.dtype == object:
|
664 |
+
# `array` below won't do inference if value is an Index or Series.
|
665 |
+
# so do so here. in the Index case, inferred_type may be cached.
|
666 |
+
if lib.infer_dtype(value) in self._infer_matches:
|
667 |
+
try:
|
668 |
+
value = type(self)._from_sequence(value)
|
669 |
+
except (ValueError, TypeError):
|
670 |
+
if allow_object:
|
671 |
+
return value
|
672 |
+
msg = self._validation_error_message(value, True)
|
673 |
+
raise TypeError(msg)
|
674 |
+
|
675 |
+
# Do type inference if necessary up front (after unpacking
|
676 |
+
# NumpyExtensionArray)
|
677 |
+
# e.g. we passed PeriodIndex.values and got an ndarray of Periods
|
678 |
+
value = extract_array(value, extract_numpy=True)
|
679 |
+
value = pd_array(value)
|
680 |
+
value = extract_array(value, extract_numpy=True)
|
681 |
+
|
682 |
+
if is_all_strings(value):
|
683 |
+
# We got a StringArray
|
684 |
+
try:
|
685 |
+
# TODO: Could use from_sequence_of_strings if implemented
|
686 |
+
# Note: passing dtype is necessary for PeriodArray tests
|
687 |
+
value = type(self)._from_sequence(value, dtype=self.dtype)
|
688 |
+
except ValueError:
|
689 |
+
pass
|
690 |
+
|
691 |
+
if isinstance(value.dtype, CategoricalDtype):
|
692 |
+
# e.g. we have a Categorical holding self.dtype
|
693 |
+
if value.categories.dtype == self.dtype:
|
694 |
+
# TODO: do we need equal dtype or just comparable?
|
695 |
+
value = value._internal_get_values()
|
696 |
+
value = extract_array(value, extract_numpy=True)
|
697 |
+
|
698 |
+
if allow_object and is_object_dtype(value.dtype):
|
699 |
+
pass
|
700 |
+
|
701 |
+
elif not type(self)._is_recognized_dtype(value.dtype):
|
702 |
+
msg = self._validation_error_message(value, True)
|
703 |
+
raise TypeError(msg)
|
704 |
+
|
705 |
+
if self.dtype.kind in "mM" and not allow_object:
|
706 |
+
# error: "DatetimeLikeArrayMixin" has no attribute "as_unit"
|
707 |
+
value = value.as_unit(self.unit, round_ok=False) # type: ignore[attr-defined]
|
708 |
+
return value
|
709 |
+
|
710 |
+
def _validate_setitem_value(self, value):
|
711 |
+
if is_list_like(value):
|
712 |
+
value = self._validate_listlike(value)
|
713 |
+
else:
|
714 |
+
return self._validate_scalar(value, allow_listlike=True)
|
715 |
+
|
716 |
+
return self._unbox(value)
|
717 |
+
|
718 |
+
@final
|
719 |
+
def _unbox(self, other) -> np.int64 | np.datetime64 | np.timedelta64 | np.ndarray:
|
720 |
+
"""
|
721 |
+
Unbox either a scalar with _unbox_scalar or an instance of our own type.
|
722 |
+
"""
|
723 |
+
if lib.is_scalar(other):
|
724 |
+
other = self._unbox_scalar(other)
|
725 |
+
else:
|
726 |
+
# same type as self
|
727 |
+
self._check_compatible_with(other)
|
728 |
+
other = other._ndarray
|
729 |
+
return other
|
730 |
+
|
731 |
+
# ------------------------------------------------------------------
|
732 |
+
# Additional array methods
|
733 |
+
# These are not part of the EA API, but we implement them because
|
734 |
+
# pandas assumes they're there.
|
735 |
+
|
736 |
+
@ravel_compat
|
737 |
+
def map(self, mapper, na_action=None):
|
738 |
+
from pandas import Index
|
739 |
+
|
740 |
+
result = map_array(self, mapper, na_action=na_action)
|
741 |
+
result = Index(result)
|
742 |
+
|
743 |
+
if isinstance(result, ABCMultiIndex):
|
744 |
+
return result.to_numpy()
|
745 |
+
else:
|
746 |
+
return result.array
|
747 |
+
|
748 |
+
def isin(self, values: ArrayLike) -> npt.NDArray[np.bool_]:
|
749 |
+
"""
|
750 |
+
Compute boolean array of whether each value is found in the
|
751 |
+
passed set of values.
|
752 |
+
|
753 |
+
Parameters
|
754 |
+
----------
|
755 |
+
values : np.ndarray or ExtensionArray
|
756 |
+
|
757 |
+
Returns
|
758 |
+
-------
|
759 |
+
ndarray[bool]
|
760 |
+
"""
|
761 |
+
if values.dtype.kind in "fiuc":
|
762 |
+
# TODO: de-duplicate with equals, validate_comparison_value
|
763 |
+
return np.zeros(self.shape, dtype=bool)
|
764 |
+
|
765 |
+
values = ensure_wrapped_if_datetimelike(values)
|
766 |
+
|
767 |
+
if not isinstance(values, type(self)):
|
768 |
+
inferable = [
|
769 |
+
"timedelta",
|
770 |
+
"timedelta64",
|
771 |
+
"datetime",
|
772 |
+
"datetime64",
|
773 |
+
"date",
|
774 |
+
"period",
|
775 |
+
]
|
776 |
+
if values.dtype == object:
|
777 |
+
values = lib.maybe_convert_objects(
|
778 |
+
values, # type: ignore[arg-type]
|
779 |
+
convert_non_numeric=True,
|
780 |
+
dtype_if_all_nat=self.dtype,
|
781 |
+
)
|
782 |
+
if values.dtype != object:
|
783 |
+
return self.isin(values)
|
784 |
+
|
785 |
+
inferred = lib.infer_dtype(values, skipna=False)
|
786 |
+
if inferred not in inferable:
|
787 |
+
if inferred == "string":
|
788 |
+
pass
|
789 |
+
|
790 |
+
elif "mixed" in inferred:
|
791 |
+
return isin(self.astype(object), values)
|
792 |
+
else:
|
793 |
+
return np.zeros(self.shape, dtype=bool)
|
794 |
+
|
795 |
+
try:
|
796 |
+
values = type(self)._from_sequence(values)
|
797 |
+
except ValueError:
|
798 |
+
return isin(self.astype(object), values)
|
799 |
+
else:
|
800 |
+
warnings.warn(
|
801 |
+
# GH#53111
|
802 |
+
f"The behavior of 'isin' with dtype={self.dtype} and "
|
803 |
+
"castable values (e.g. strings) is deprecated. In a "
|
804 |
+
"future version, these will not be considered matching "
|
805 |
+
"by isin. Explicitly cast to the appropriate dtype before "
|
806 |
+
"calling isin instead.",
|
807 |
+
FutureWarning,
|
808 |
+
stacklevel=find_stack_level(),
|
809 |
+
)
|
810 |
+
|
811 |
+
if self.dtype.kind in "mM":
|
812 |
+
self = cast("DatetimeArray | TimedeltaArray", self)
|
813 |
+
# error: Item "ExtensionArray" of "ExtensionArray | ndarray[Any, Any]"
|
814 |
+
# has no attribute "as_unit"
|
815 |
+
values = values.as_unit(self.unit) # type: ignore[union-attr]
|
816 |
+
|
817 |
+
try:
|
818 |
+
# error: Argument 1 to "_check_compatible_with" of "DatetimeLikeArrayMixin"
|
819 |
+
# has incompatible type "ExtensionArray | ndarray[Any, Any]"; expected
|
820 |
+
# "Period | Timestamp | Timedelta | NaTType"
|
821 |
+
self._check_compatible_with(values) # type: ignore[arg-type]
|
822 |
+
except (TypeError, ValueError):
|
823 |
+
# Includes tzawareness mismatch and IncompatibleFrequencyError
|
824 |
+
return np.zeros(self.shape, dtype=bool)
|
825 |
+
|
826 |
+
# error: Item "ExtensionArray" of "ExtensionArray | ndarray[Any, Any]"
|
827 |
+
# has no attribute "asi8"
|
828 |
+
return isin(self.asi8, values.asi8) # type: ignore[union-attr]
|
829 |
+
|
830 |
+
# ------------------------------------------------------------------
|
831 |
+
# Null Handling
|
832 |
+
|
833 |
+
def isna(self) -> npt.NDArray[np.bool_]:
|
834 |
+
return self._isnan
|
835 |
+
|
836 |
+
@property # NB: override with cache_readonly in immutable subclasses
|
837 |
+
def _isnan(self) -> npt.NDArray[np.bool_]:
|
838 |
+
"""
|
839 |
+
return if each value is nan
|
840 |
+
"""
|
841 |
+
return self.asi8 == iNaT
|
842 |
+
|
843 |
+
@property # NB: override with cache_readonly in immutable subclasses
|
844 |
+
def _hasna(self) -> bool:
|
845 |
+
"""
|
846 |
+
return if I have any nans; enables various perf speedups
|
847 |
+
"""
|
848 |
+
return bool(self._isnan.any())
|
849 |
+
|
850 |
+
def _maybe_mask_results(
|
851 |
+
self, result: np.ndarray, fill_value=iNaT, convert=None
|
852 |
+
) -> np.ndarray:
|
853 |
+
"""
|
854 |
+
Parameters
|
855 |
+
----------
|
856 |
+
result : np.ndarray
|
857 |
+
fill_value : object, default iNaT
|
858 |
+
convert : str, dtype or None
|
859 |
+
|
860 |
+
Returns
|
861 |
+
-------
|
862 |
+
result : ndarray with values replace by the fill_value
|
863 |
+
|
864 |
+
mask the result if needed, convert to the provided dtype if its not
|
865 |
+
None
|
866 |
+
|
867 |
+
This is an internal routine.
|
868 |
+
"""
|
869 |
+
if self._hasna:
|
870 |
+
if convert:
|
871 |
+
result = result.astype(convert)
|
872 |
+
if fill_value is None:
|
873 |
+
fill_value = np.nan
|
874 |
+
np.putmask(result, self._isnan, fill_value)
|
875 |
+
return result
|
876 |
+
|
877 |
+
# ------------------------------------------------------------------
|
878 |
+
# Frequency Properties/Methods
|
879 |
+
|
880 |
+
@property
|
881 |
+
def freqstr(self) -> str | None:
|
882 |
+
"""
|
883 |
+
Return the frequency object as a string if it's set, otherwise None.
|
884 |
+
|
885 |
+
Examples
|
886 |
+
--------
|
887 |
+
For DatetimeIndex:
|
888 |
+
|
889 |
+
>>> idx = pd.DatetimeIndex(["1/1/2020 10:00:00+00:00"], freq="D")
|
890 |
+
>>> idx.freqstr
|
891 |
+
'D'
|
892 |
+
|
893 |
+
The frequency can be inferred if there are more than 2 points:
|
894 |
+
|
895 |
+
>>> idx = pd.DatetimeIndex(["2018-01-01", "2018-01-03", "2018-01-05"],
|
896 |
+
... freq="infer")
|
897 |
+
>>> idx.freqstr
|
898 |
+
'2D'
|
899 |
+
|
900 |
+
For PeriodIndex:
|
901 |
+
|
902 |
+
>>> idx = pd.PeriodIndex(["2023-1", "2023-2", "2023-3"], freq="M")
|
903 |
+
>>> idx.freqstr
|
904 |
+
'M'
|
905 |
+
"""
|
906 |
+
if self.freq is None:
|
907 |
+
return None
|
908 |
+
return self.freq.freqstr
|
909 |
+
|
910 |
+
@property # NB: override with cache_readonly in immutable subclasses
|
911 |
+
def inferred_freq(self) -> str | None:
|
912 |
+
"""
|
913 |
+
Tries to return a string representing a frequency generated by infer_freq.
|
914 |
+
|
915 |
+
Returns None if it can't autodetect the frequency.
|
916 |
+
|
917 |
+
Examples
|
918 |
+
--------
|
919 |
+
For DatetimeIndex:
|
920 |
+
|
921 |
+
>>> idx = pd.DatetimeIndex(["2018-01-01", "2018-01-03", "2018-01-05"])
|
922 |
+
>>> idx.inferred_freq
|
923 |
+
'2D'
|
924 |
+
|
925 |
+
For TimedeltaIndex:
|
926 |
+
|
927 |
+
>>> tdelta_idx = pd.to_timedelta(["0 days", "10 days", "20 days"])
|
928 |
+
>>> tdelta_idx
|
929 |
+
TimedeltaIndex(['0 days', '10 days', '20 days'],
|
930 |
+
dtype='timedelta64[ns]', freq=None)
|
931 |
+
>>> tdelta_idx.inferred_freq
|
932 |
+
'10D'
|
933 |
+
"""
|
934 |
+
if self.ndim != 1:
|
935 |
+
return None
|
936 |
+
try:
|
937 |
+
return frequencies.infer_freq(self)
|
938 |
+
except ValueError:
|
939 |
+
return None
|
940 |
+
|
941 |
+
@property # NB: override with cache_readonly in immutable subclasses
|
942 |
+
def _resolution_obj(self) -> Resolution | None:
|
943 |
+
freqstr = self.freqstr
|
944 |
+
if freqstr is None:
|
945 |
+
return None
|
946 |
+
try:
|
947 |
+
return Resolution.get_reso_from_freqstr(freqstr)
|
948 |
+
except KeyError:
|
949 |
+
return None
|
950 |
+
|
951 |
+
@property # NB: override with cache_readonly in immutable subclasses
|
952 |
+
def resolution(self) -> str:
|
953 |
+
"""
|
954 |
+
Returns day, hour, minute, second, millisecond or microsecond
|
955 |
+
"""
|
956 |
+
# error: Item "None" of "Optional[Any]" has no attribute "attrname"
|
957 |
+
return self._resolution_obj.attrname # type: ignore[union-attr]
|
958 |
+
|
959 |
+
# monotonicity/uniqueness properties are called via frequencies.infer_freq,
|
960 |
+
# see GH#23789
|
961 |
+
|
962 |
+
@property
|
963 |
+
def _is_monotonic_increasing(self) -> bool:
|
964 |
+
return algos.is_monotonic(self.asi8, timelike=True)[0]
|
965 |
+
|
966 |
+
@property
|
967 |
+
def _is_monotonic_decreasing(self) -> bool:
|
968 |
+
return algos.is_monotonic(self.asi8, timelike=True)[1]
|
969 |
+
|
970 |
+
@property
|
971 |
+
def _is_unique(self) -> bool:
|
972 |
+
return len(unique1d(self.asi8.ravel("K"))) == self.size
|
973 |
+
|
974 |
+
# ------------------------------------------------------------------
|
975 |
+
# Arithmetic Methods
|
976 |
+
|
977 |
+
def _cmp_method(self, other, op):
|
978 |
+
if self.ndim > 1 and getattr(other, "shape", None) == self.shape:
|
979 |
+
# TODO: handle 2D-like listlikes
|
980 |
+
return op(self.ravel(), other.ravel()).reshape(self.shape)
|
981 |
+
|
982 |
+
try:
|
983 |
+
other = self._validate_comparison_value(other)
|
984 |
+
except InvalidComparison:
|
985 |
+
return invalid_comparison(self, other, op)
|
986 |
+
|
987 |
+
dtype = getattr(other, "dtype", None)
|
988 |
+
if is_object_dtype(dtype):
|
989 |
+
# We have to use comp_method_OBJECT_ARRAY instead of numpy
|
990 |
+
# comparison otherwise it would raise when comparing to None
|
991 |
+
result = ops.comp_method_OBJECT_ARRAY(
|
992 |
+
op, np.asarray(self.astype(object)), other
|
993 |
+
)
|
994 |
+
return result
|
995 |
+
if other is NaT:
|
996 |
+
if op is operator.ne:
|
997 |
+
result = np.ones(self.shape, dtype=bool)
|
998 |
+
else:
|
999 |
+
result = np.zeros(self.shape, dtype=bool)
|
1000 |
+
return result
|
1001 |
+
|
1002 |
+
if not isinstance(self.dtype, PeriodDtype):
|
1003 |
+
self = cast(TimelikeOps, self)
|
1004 |
+
if self._creso != other._creso:
|
1005 |
+
if not isinstance(other, type(self)):
|
1006 |
+
# i.e. Timedelta/Timestamp, cast to ndarray and let
|
1007 |
+
# compare_mismatched_resolutions handle broadcasting
|
1008 |
+
try:
|
1009 |
+
# GH#52080 see if we can losslessly cast to shared unit
|
1010 |
+
other = other.as_unit(self.unit, round_ok=False)
|
1011 |
+
except ValueError:
|
1012 |
+
other_arr = np.array(other.asm8)
|
1013 |
+
return compare_mismatched_resolutions(
|
1014 |
+
self._ndarray, other_arr, op
|
1015 |
+
)
|
1016 |
+
else:
|
1017 |
+
other_arr = other._ndarray
|
1018 |
+
return compare_mismatched_resolutions(self._ndarray, other_arr, op)
|
1019 |
+
|
1020 |
+
other_vals = self._unbox(other)
|
1021 |
+
# GH#37462 comparison on i8 values is almost 2x faster than M8/m8
|
1022 |
+
result = op(self._ndarray.view("i8"), other_vals.view("i8"))
|
1023 |
+
|
1024 |
+
o_mask = isna(other)
|
1025 |
+
mask = self._isnan | o_mask
|
1026 |
+
if mask.any():
|
1027 |
+
nat_result = op is operator.ne
|
1028 |
+
np.putmask(result, mask, nat_result)
|
1029 |
+
|
1030 |
+
return result
|
1031 |
+
|
1032 |
+
# pow is invalid for all three subclasses; TimedeltaArray will override
|
1033 |
+
# the multiplication and division ops
|
1034 |
+
__pow__ = _make_unpacked_invalid_op("__pow__")
|
1035 |
+
__rpow__ = _make_unpacked_invalid_op("__rpow__")
|
1036 |
+
__mul__ = _make_unpacked_invalid_op("__mul__")
|
1037 |
+
__rmul__ = _make_unpacked_invalid_op("__rmul__")
|
1038 |
+
__truediv__ = _make_unpacked_invalid_op("__truediv__")
|
1039 |
+
__rtruediv__ = _make_unpacked_invalid_op("__rtruediv__")
|
1040 |
+
__floordiv__ = _make_unpacked_invalid_op("__floordiv__")
|
1041 |
+
__rfloordiv__ = _make_unpacked_invalid_op("__rfloordiv__")
|
1042 |
+
__mod__ = _make_unpacked_invalid_op("__mod__")
|
1043 |
+
__rmod__ = _make_unpacked_invalid_op("__rmod__")
|
1044 |
+
__divmod__ = _make_unpacked_invalid_op("__divmod__")
|
1045 |
+
__rdivmod__ = _make_unpacked_invalid_op("__rdivmod__")
|
1046 |
+
|
1047 |
+
@final
|
1048 |
+
def _get_i8_values_and_mask(
|
1049 |
+
self, other
|
1050 |
+
) -> tuple[int | npt.NDArray[np.int64], None | npt.NDArray[np.bool_]]:
|
1051 |
+
"""
|
1052 |
+
Get the int64 values and b_mask to pass to add_overflowsafe.
|
1053 |
+
"""
|
1054 |
+
if isinstance(other, Period):
|
1055 |
+
i8values = other.ordinal
|
1056 |
+
mask = None
|
1057 |
+
elif isinstance(other, (Timestamp, Timedelta)):
|
1058 |
+
i8values = other._value
|
1059 |
+
mask = None
|
1060 |
+
else:
|
1061 |
+
# PeriodArray, DatetimeArray, TimedeltaArray
|
1062 |
+
mask = other._isnan
|
1063 |
+
i8values = other.asi8
|
1064 |
+
return i8values, mask
|
1065 |
+
|
1066 |
+
@final
|
1067 |
+
def _get_arithmetic_result_freq(self, other) -> BaseOffset | None:
|
1068 |
+
"""
|
1069 |
+
Check if we can preserve self.freq in addition or subtraction.
|
1070 |
+
"""
|
1071 |
+
# Adding or subtracting a Timedelta/Timestamp scalar is freq-preserving
|
1072 |
+
# whenever self.freq is a Tick
|
1073 |
+
if isinstance(self.dtype, PeriodDtype):
|
1074 |
+
return self.freq
|
1075 |
+
elif not lib.is_scalar(other):
|
1076 |
+
return None
|
1077 |
+
elif isinstance(self.freq, Tick):
|
1078 |
+
# In these cases
|
1079 |
+
return self.freq
|
1080 |
+
return None
|
1081 |
+
|
1082 |
+
@final
|
1083 |
+
def _add_datetimelike_scalar(self, other) -> DatetimeArray:
|
1084 |
+
if not lib.is_np_dtype(self.dtype, "m"):
|
1085 |
+
raise TypeError(
|
1086 |
+
f"cannot add {type(self).__name__} and {type(other).__name__}"
|
1087 |
+
)
|
1088 |
+
|
1089 |
+
self = cast("TimedeltaArray", self)
|
1090 |
+
|
1091 |
+
from pandas.core.arrays import DatetimeArray
|
1092 |
+
from pandas.core.arrays.datetimes import tz_to_dtype
|
1093 |
+
|
1094 |
+
assert other is not NaT
|
1095 |
+
if isna(other):
|
1096 |
+
# i.e. np.datetime64("NaT")
|
1097 |
+
# In this case we specifically interpret NaT as a datetime, not
|
1098 |
+
# the timedelta interpretation we would get by returning self + NaT
|
1099 |
+
result = self._ndarray + NaT.to_datetime64().astype(f"M8[{self.unit}]")
|
1100 |
+
# Preserve our resolution
|
1101 |
+
return DatetimeArray._simple_new(result, dtype=result.dtype)
|
1102 |
+
|
1103 |
+
other = Timestamp(other)
|
1104 |
+
self, other = self._ensure_matching_resos(other)
|
1105 |
+
self = cast("TimedeltaArray", self)
|
1106 |
+
|
1107 |
+
other_i8, o_mask = self._get_i8_values_and_mask(other)
|
1108 |
+
result = add_overflowsafe(self.asi8, np.asarray(other_i8, dtype="i8"))
|
1109 |
+
res_values = result.view(f"M8[{self.unit}]")
|
1110 |
+
|
1111 |
+
dtype = tz_to_dtype(tz=other.tz, unit=self.unit)
|
1112 |
+
res_values = result.view(f"M8[{self.unit}]")
|
1113 |
+
new_freq = self._get_arithmetic_result_freq(other)
|
1114 |
+
return DatetimeArray._simple_new(res_values, dtype=dtype, freq=new_freq)
|
1115 |
+
|
1116 |
+
@final
|
1117 |
+
def _add_datetime_arraylike(self, other: DatetimeArray) -> DatetimeArray:
|
1118 |
+
if not lib.is_np_dtype(self.dtype, "m"):
|
1119 |
+
raise TypeError(
|
1120 |
+
f"cannot add {type(self).__name__} and {type(other).__name__}"
|
1121 |
+
)
|
1122 |
+
|
1123 |
+
# defer to DatetimeArray.__add__
|
1124 |
+
return other + self
|
1125 |
+
|
1126 |
+
@final
|
1127 |
+
def _sub_datetimelike_scalar(
|
1128 |
+
self, other: datetime | np.datetime64
|
1129 |
+
) -> TimedeltaArray:
|
1130 |
+
if self.dtype.kind != "M":
|
1131 |
+
raise TypeError(f"cannot subtract a datelike from a {type(self).__name__}")
|
1132 |
+
|
1133 |
+
self = cast("DatetimeArray", self)
|
1134 |
+
# subtract a datetime from myself, yielding a ndarray[timedelta64[ns]]
|
1135 |
+
|
1136 |
+
if isna(other):
|
1137 |
+
# i.e. np.datetime64("NaT")
|
1138 |
+
return self - NaT
|
1139 |
+
|
1140 |
+
ts = Timestamp(other)
|
1141 |
+
|
1142 |
+
self, ts = self._ensure_matching_resos(ts)
|
1143 |
+
return self._sub_datetimelike(ts)
|
1144 |
+
|
1145 |
+
@final
|
1146 |
+
def _sub_datetime_arraylike(self, other: DatetimeArray) -> TimedeltaArray:
|
1147 |
+
if self.dtype.kind != "M":
|
1148 |
+
raise TypeError(f"cannot subtract a datelike from a {type(self).__name__}")
|
1149 |
+
|
1150 |
+
if len(self) != len(other):
|
1151 |
+
raise ValueError("cannot add indices of unequal length")
|
1152 |
+
|
1153 |
+
self = cast("DatetimeArray", self)
|
1154 |
+
|
1155 |
+
self, other = self._ensure_matching_resos(other)
|
1156 |
+
return self._sub_datetimelike(other)
|
1157 |
+
|
1158 |
+
@final
|
1159 |
+
def _sub_datetimelike(self, other: Timestamp | DatetimeArray) -> TimedeltaArray:
|
1160 |
+
self = cast("DatetimeArray", self)
|
1161 |
+
|
1162 |
+
from pandas.core.arrays import TimedeltaArray
|
1163 |
+
|
1164 |
+
try:
|
1165 |
+
self._assert_tzawareness_compat(other)
|
1166 |
+
except TypeError as err:
|
1167 |
+
new_message = str(err).replace("compare", "subtract")
|
1168 |
+
raise type(err)(new_message) from err
|
1169 |
+
|
1170 |
+
other_i8, o_mask = self._get_i8_values_and_mask(other)
|
1171 |
+
res_values = add_overflowsafe(self.asi8, np.asarray(-other_i8, dtype="i8"))
|
1172 |
+
res_m8 = res_values.view(f"timedelta64[{self.unit}]")
|
1173 |
+
|
1174 |
+
new_freq = self._get_arithmetic_result_freq(other)
|
1175 |
+
new_freq = cast("Tick | None", new_freq)
|
1176 |
+
return TimedeltaArray._simple_new(res_m8, dtype=res_m8.dtype, freq=new_freq)
|
1177 |
+
|
1178 |
+
@final
|
1179 |
+
def _add_period(self, other: Period) -> PeriodArray:
|
1180 |
+
if not lib.is_np_dtype(self.dtype, "m"):
|
1181 |
+
raise TypeError(f"cannot add Period to a {type(self).__name__}")
|
1182 |
+
|
1183 |
+
# We will wrap in a PeriodArray and defer to the reversed operation
|
1184 |
+
from pandas.core.arrays.period import PeriodArray
|
1185 |
+
|
1186 |
+
i8vals = np.broadcast_to(other.ordinal, self.shape)
|
1187 |
+
dtype = PeriodDtype(other.freq)
|
1188 |
+
parr = PeriodArray(i8vals, dtype=dtype)
|
1189 |
+
return parr + self
|
1190 |
+
|
1191 |
+
def _add_offset(self, offset):
|
1192 |
+
raise AbstractMethodError(self)
|
1193 |
+
|
1194 |
+
def _add_timedeltalike_scalar(self, other):
|
1195 |
+
"""
|
1196 |
+
Add a delta of a timedeltalike
|
1197 |
+
|
1198 |
+
Returns
|
1199 |
+
-------
|
1200 |
+
Same type as self
|
1201 |
+
"""
|
1202 |
+
if isna(other):
|
1203 |
+
# i.e np.timedelta64("NaT")
|
1204 |
+
new_values = np.empty(self.shape, dtype="i8").view(self._ndarray.dtype)
|
1205 |
+
new_values.fill(iNaT)
|
1206 |
+
return type(self)._simple_new(new_values, dtype=self.dtype)
|
1207 |
+
|
1208 |
+
# PeriodArray overrides, so we only get here with DTA/TDA
|
1209 |
+
self = cast("DatetimeArray | TimedeltaArray", self)
|
1210 |
+
other = Timedelta(other)
|
1211 |
+
self, other = self._ensure_matching_resos(other)
|
1212 |
+
return self._add_timedeltalike(other)
|
1213 |
+
|
1214 |
+
def _add_timedelta_arraylike(self, other: TimedeltaArray):
|
1215 |
+
"""
|
1216 |
+
Add a delta of a TimedeltaIndex
|
1217 |
+
|
1218 |
+
Returns
|
1219 |
+
-------
|
1220 |
+
Same type as self
|
1221 |
+
"""
|
1222 |
+
# overridden by PeriodArray
|
1223 |
+
|
1224 |
+
if len(self) != len(other):
|
1225 |
+
raise ValueError("cannot add indices of unequal length")
|
1226 |
+
|
1227 |
+
self = cast("DatetimeArray | TimedeltaArray", self)
|
1228 |
+
|
1229 |
+
self, other = self._ensure_matching_resos(other)
|
1230 |
+
return self._add_timedeltalike(other)
|
1231 |
+
|
1232 |
+
@final
|
1233 |
+
def _add_timedeltalike(self, other: Timedelta | TimedeltaArray):
|
1234 |
+
self = cast("DatetimeArray | TimedeltaArray", self)
|
1235 |
+
|
1236 |
+
other_i8, o_mask = self._get_i8_values_and_mask(other)
|
1237 |
+
new_values = add_overflowsafe(self.asi8, np.asarray(other_i8, dtype="i8"))
|
1238 |
+
res_values = new_values.view(self._ndarray.dtype)
|
1239 |
+
|
1240 |
+
new_freq = self._get_arithmetic_result_freq(other)
|
1241 |
+
|
1242 |
+
# error: Argument "dtype" to "_simple_new" of "DatetimeArray" has
|
1243 |
+
# incompatible type "Union[dtype[datetime64], DatetimeTZDtype,
|
1244 |
+
# dtype[timedelta64]]"; expected "Union[dtype[datetime64], DatetimeTZDtype]"
|
1245 |
+
return type(self)._simple_new(
|
1246 |
+
res_values, dtype=self.dtype, freq=new_freq # type: ignore[arg-type]
|
1247 |
+
)
|
1248 |
+
|
1249 |
+
@final
|
1250 |
+
def _add_nat(self):
|
1251 |
+
"""
|
1252 |
+
Add pd.NaT to self
|
1253 |
+
"""
|
1254 |
+
if isinstance(self.dtype, PeriodDtype):
|
1255 |
+
raise TypeError(
|
1256 |
+
f"Cannot add {type(self).__name__} and {type(NaT).__name__}"
|
1257 |
+
)
|
1258 |
+
self = cast("TimedeltaArray | DatetimeArray", self)
|
1259 |
+
|
1260 |
+
# GH#19124 pd.NaT is treated like a timedelta for both timedelta
|
1261 |
+
# and datetime dtypes
|
1262 |
+
result = np.empty(self.shape, dtype=np.int64)
|
1263 |
+
result.fill(iNaT)
|
1264 |
+
result = result.view(self._ndarray.dtype) # preserve reso
|
1265 |
+
# error: Argument "dtype" to "_simple_new" of "DatetimeArray" has
|
1266 |
+
# incompatible type "Union[dtype[timedelta64], dtype[datetime64],
|
1267 |
+
# DatetimeTZDtype]"; expected "Union[dtype[datetime64], DatetimeTZDtype]"
|
1268 |
+
return type(self)._simple_new(
|
1269 |
+
result, dtype=self.dtype, freq=None # type: ignore[arg-type]
|
1270 |
+
)
|
1271 |
+
|
1272 |
+
@final
|
1273 |
+
def _sub_nat(self):
|
1274 |
+
"""
|
1275 |
+
Subtract pd.NaT from self
|
1276 |
+
"""
|
1277 |
+
# GH#19124 Timedelta - datetime is not in general well-defined.
|
1278 |
+
# We make an exception for pd.NaT, which in this case quacks
|
1279 |
+
# like a timedelta.
|
1280 |
+
# For datetime64 dtypes by convention we treat NaT as a datetime, so
|
1281 |
+
# this subtraction returns a timedelta64 dtype.
|
1282 |
+
# For period dtype, timedelta64 is a close-enough return dtype.
|
1283 |
+
result = np.empty(self.shape, dtype=np.int64)
|
1284 |
+
result.fill(iNaT)
|
1285 |
+
if self.dtype.kind in "mM":
|
1286 |
+
# We can retain unit in dtype
|
1287 |
+
self = cast("DatetimeArray| TimedeltaArray", self)
|
1288 |
+
return result.view(f"timedelta64[{self.unit}]")
|
1289 |
+
else:
|
1290 |
+
return result.view("timedelta64[ns]")
|
1291 |
+
|
1292 |
+
@final
|
1293 |
+
def _sub_periodlike(self, other: Period | PeriodArray) -> npt.NDArray[np.object_]:
|
1294 |
+
# If the operation is well-defined, we return an object-dtype ndarray
|
1295 |
+
# of DateOffsets. Null entries are filled with pd.NaT
|
1296 |
+
if not isinstance(self.dtype, PeriodDtype):
|
1297 |
+
raise TypeError(
|
1298 |
+
f"cannot subtract {type(other).__name__} from {type(self).__name__}"
|
1299 |
+
)
|
1300 |
+
|
1301 |
+
self = cast("PeriodArray", self)
|
1302 |
+
self._check_compatible_with(other)
|
1303 |
+
|
1304 |
+
other_i8, o_mask = self._get_i8_values_and_mask(other)
|
1305 |
+
new_i8_data = add_overflowsafe(self.asi8, np.asarray(-other_i8, dtype="i8"))
|
1306 |
+
new_data = np.array([self.freq.base * x for x in new_i8_data])
|
1307 |
+
|
1308 |
+
if o_mask is None:
|
1309 |
+
# i.e. Period scalar
|
1310 |
+
mask = self._isnan
|
1311 |
+
else:
|
1312 |
+
# i.e. PeriodArray
|
1313 |
+
mask = self._isnan | o_mask
|
1314 |
+
new_data[mask] = NaT
|
1315 |
+
return new_data
|
1316 |
+
|
1317 |
+
@final
|
1318 |
+
def _addsub_object_array(self, other: npt.NDArray[np.object_], op):
|
1319 |
+
"""
|
1320 |
+
Add or subtract array-like of DateOffset objects
|
1321 |
+
|
1322 |
+
Parameters
|
1323 |
+
----------
|
1324 |
+
other : np.ndarray[object]
|
1325 |
+
op : {operator.add, operator.sub}
|
1326 |
+
|
1327 |
+
Returns
|
1328 |
+
-------
|
1329 |
+
np.ndarray[object]
|
1330 |
+
Except in fastpath case with length 1 where we operate on the
|
1331 |
+
contained scalar.
|
1332 |
+
"""
|
1333 |
+
assert op in [operator.add, operator.sub]
|
1334 |
+
if len(other) == 1 and self.ndim == 1:
|
1335 |
+
# Note: without this special case, we could annotate return type
|
1336 |
+
# as ndarray[object]
|
1337 |
+
# If both 1D then broadcasting is unambiguous
|
1338 |
+
return op(self, other[0])
|
1339 |
+
|
1340 |
+
warnings.warn(
|
1341 |
+
"Adding/subtracting object-dtype array to "
|
1342 |
+
f"{type(self).__name__} not vectorized.",
|
1343 |
+
PerformanceWarning,
|
1344 |
+
stacklevel=find_stack_level(),
|
1345 |
+
)
|
1346 |
+
|
1347 |
+
# Caller is responsible for broadcasting if necessary
|
1348 |
+
assert self.shape == other.shape, (self.shape, other.shape)
|
1349 |
+
|
1350 |
+
res_values = op(self.astype("O"), np.asarray(other))
|
1351 |
+
return res_values
|
1352 |
+
|
1353 |
+
def _accumulate(self, name: str, *, skipna: bool = True, **kwargs) -> Self:
|
1354 |
+
if name not in {"cummin", "cummax"}:
|
1355 |
+
raise TypeError(f"Accumulation {name} not supported for {type(self)}")
|
1356 |
+
|
1357 |
+
op = getattr(datetimelike_accumulations, name)
|
1358 |
+
result = op(self.copy(), skipna=skipna, **kwargs)
|
1359 |
+
|
1360 |
+
return type(self)._simple_new(result, dtype=self.dtype)
|
1361 |
+
|
1362 |
+
@unpack_zerodim_and_defer("__add__")
|
1363 |
+
def __add__(self, other):
|
1364 |
+
other_dtype = getattr(other, "dtype", None)
|
1365 |
+
other = ensure_wrapped_if_datetimelike(other)
|
1366 |
+
|
1367 |
+
# scalar others
|
1368 |
+
if other is NaT:
|
1369 |
+
result = self._add_nat()
|
1370 |
+
elif isinstance(other, (Tick, timedelta, np.timedelta64)):
|
1371 |
+
result = self._add_timedeltalike_scalar(other)
|
1372 |
+
elif isinstance(other, BaseOffset):
|
1373 |
+
# specifically _not_ a Tick
|
1374 |
+
result = self._add_offset(other)
|
1375 |
+
elif isinstance(other, (datetime, np.datetime64)):
|
1376 |
+
result = self._add_datetimelike_scalar(other)
|
1377 |
+
elif isinstance(other, Period) and lib.is_np_dtype(self.dtype, "m"):
|
1378 |
+
result = self._add_period(other)
|
1379 |
+
elif lib.is_integer(other):
|
1380 |
+
# This check must come after the check for np.timedelta64
|
1381 |
+
# as is_integer returns True for these
|
1382 |
+
if not isinstance(self.dtype, PeriodDtype):
|
1383 |
+
raise integer_op_not_supported(self)
|
1384 |
+
obj = cast("PeriodArray", self)
|
1385 |
+
result = obj._addsub_int_array_or_scalar(other * obj.dtype._n, operator.add)
|
1386 |
+
|
1387 |
+
# array-like others
|
1388 |
+
elif lib.is_np_dtype(other_dtype, "m"):
|
1389 |
+
# TimedeltaIndex, ndarray[timedelta64]
|
1390 |
+
result = self._add_timedelta_arraylike(other)
|
1391 |
+
elif is_object_dtype(other_dtype):
|
1392 |
+
# e.g. Array/Index of DateOffset objects
|
1393 |
+
result = self._addsub_object_array(other, operator.add)
|
1394 |
+
elif lib.is_np_dtype(other_dtype, "M") or isinstance(
|
1395 |
+
other_dtype, DatetimeTZDtype
|
1396 |
+
):
|
1397 |
+
# DatetimeIndex, ndarray[datetime64]
|
1398 |
+
return self._add_datetime_arraylike(other)
|
1399 |
+
elif is_integer_dtype(other_dtype):
|
1400 |
+
if not isinstance(self.dtype, PeriodDtype):
|
1401 |
+
raise integer_op_not_supported(self)
|
1402 |
+
obj = cast("PeriodArray", self)
|
1403 |
+
result = obj._addsub_int_array_or_scalar(other * obj.dtype._n, operator.add)
|
1404 |
+
else:
|
1405 |
+
# Includes Categorical, other ExtensionArrays
|
1406 |
+
# For PeriodDtype, if self is a TimedeltaArray and other is a
|
1407 |
+
# PeriodArray with a timedelta-like (i.e. Tick) freq, this
|
1408 |
+
# operation is valid. Defer to the PeriodArray implementation.
|
1409 |
+
# In remaining cases, this will end up raising TypeError.
|
1410 |
+
return NotImplemented
|
1411 |
+
|
1412 |
+
if isinstance(result, np.ndarray) and lib.is_np_dtype(result.dtype, "m"):
|
1413 |
+
from pandas.core.arrays import TimedeltaArray
|
1414 |
+
|
1415 |
+
return TimedeltaArray._from_sequence(result)
|
1416 |
+
return result
|
1417 |
+
|
1418 |
+
def __radd__(self, other):
|
1419 |
+
# alias for __add__
|
1420 |
+
return self.__add__(other)
|
1421 |
+
|
1422 |
+
@unpack_zerodim_and_defer("__sub__")
|
1423 |
+
def __sub__(self, other):
|
1424 |
+
other_dtype = getattr(other, "dtype", None)
|
1425 |
+
other = ensure_wrapped_if_datetimelike(other)
|
1426 |
+
|
1427 |
+
# scalar others
|
1428 |
+
if other is NaT:
|
1429 |
+
result = self._sub_nat()
|
1430 |
+
elif isinstance(other, (Tick, timedelta, np.timedelta64)):
|
1431 |
+
result = self._add_timedeltalike_scalar(-other)
|
1432 |
+
elif isinstance(other, BaseOffset):
|
1433 |
+
# specifically _not_ a Tick
|
1434 |
+
result = self._add_offset(-other)
|
1435 |
+
elif isinstance(other, (datetime, np.datetime64)):
|
1436 |
+
result = self._sub_datetimelike_scalar(other)
|
1437 |
+
elif lib.is_integer(other):
|
1438 |
+
# This check must come after the check for np.timedelta64
|
1439 |
+
# as is_integer returns True for these
|
1440 |
+
if not isinstance(self.dtype, PeriodDtype):
|
1441 |
+
raise integer_op_not_supported(self)
|
1442 |
+
obj = cast("PeriodArray", self)
|
1443 |
+
result = obj._addsub_int_array_or_scalar(other * obj.dtype._n, operator.sub)
|
1444 |
+
|
1445 |
+
elif isinstance(other, Period):
|
1446 |
+
result = self._sub_periodlike(other)
|
1447 |
+
|
1448 |
+
# array-like others
|
1449 |
+
elif lib.is_np_dtype(other_dtype, "m"):
|
1450 |
+
# TimedeltaIndex, ndarray[timedelta64]
|
1451 |
+
result = self._add_timedelta_arraylike(-other)
|
1452 |
+
elif is_object_dtype(other_dtype):
|
1453 |
+
# e.g. Array/Index of DateOffset objects
|
1454 |
+
result = self._addsub_object_array(other, operator.sub)
|
1455 |
+
elif lib.is_np_dtype(other_dtype, "M") or isinstance(
|
1456 |
+
other_dtype, DatetimeTZDtype
|
1457 |
+
):
|
1458 |
+
# DatetimeIndex, ndarray[datetime64]
|
1459 |
+
result = self._sub_datetime_arraylike(other)
|
1460 |
+
elif isinstance(other_dtype, PeriodDtype):
|
1461 |
+
# PeriodIndex
|
1462 |
+
result = self._sub_periodlike(other)
|
1463 |
+
elif is_integer_dtype(other_dtype):
|
1464 |
+
if not isinstance(self.dtype, PeriodDtype):
|
1465 |
+
raise integer_op_not_supported(self)
|
1466 |
+
obj = cast("PeriodArray", self)
|
1467 |
+
result = obj._addsub_int_array_or_scalar(other * obj.dtype._n, operator.sub)
|
1468 |
+
else:
|
1469 |
+
# Includes ExtensionArrays, float_dtype
|
1470 |
+
return NotImplemented
|
1471 |
+
|
1472 |
+
if isinstance(result, np.ndarray) and lib.is_np_dtype(result.dtype, "m"):
|
1473 |
+
from pandas.core.arrays import TimedeltaArray
|
1474 |
+
|
1475 |
+
return TimedeltaArray._from_sequence(result)
|
1476 |
+
return result
|
1477 |
+
|
1478 |
+
def __rsub__(self, other):
|
1479 |
+
other_dtype = getattr(other, "dtype", None)
|
1480 |
+
other_is_dt64 = lib.is_np_dtype(other_dtype, "M") or isinstance(
|
1481 |
+
other_dtype, DatetimeTZDtype
|
1482 |
+
)
|
1483 |
+
|
1484 |
+
if other_is_dt64 and lib.is_np_dtype(self.dtype, "m"):
|
1485 |
+
# ndarray[datetime64] cannot be subtracted from self, so
|
1486 |
+
# we need to wrap in DatetimeArray/Index and flip the operation
|
1487 |
+
if lib.is_scalar(other):
|
1488 |
+
# i.e. np.datetime64 object
|
1489 |
+
return Timestamp(other) - self
|
1490 |
+
if not isinstance(other, DatetimeLikeArrayMixin):
|
1491 |
+
# Avoid down-casting DatetimeIndex
|
1492 |
+
from pandas.core.arrays import DatetimeArray
|
1493 |
+
|
1494 |
+
other = DatetimeArray._from_sequence(other)
|
1495 |
+
return other - self
|
1496 |
+
elif self.dtype.kind == "M" and hasattr(other, "dtype") and not other_is_dt64:
|
1497 |
+
# GH#19959 datetime - datetime is well-defined as timedelta,
|
1498 |
+
# but any other type - datetime is not well-defined.
|
1499 |
+
raise TypeError(
|
1500 |
+
f"cannot subtract {type(self).__name__} from {type(other).__name__}"
|
1501 |
+
)
|
1502 |
+
elif isinstance(self.dtype, PeriodDtype) and lib.is_np_dtype(other_dtype, "m"):
|
1503 |
+
# TODO: Can we simplify/generalize these cases at all?
|
1504 |
+
raise TypeError(f"cannot subtract {type(self).__name__} from {other.dtype}")
|
1505 |
+
elif lib.is_np_dtype(self.dtype, "m"):
|
1506 |
+
self = cast("TimedeltaArray", self)
|
1507 |
+
return (-self) + other
|
1508 |
+
|
1509 |
+
# We get here with e.g. datetime objects
|
1510 |
+
return -(self - other)
|
1511 |
+
|
1512 |
+
def __iadd__(self, other) -> Self:
|
1513 |
+
result = self + other
|
1514 |
+
self[:] = result[:]
|
1515 |
+
|
1516 |
+
if not isinstance(self.dtype, PeriodDtype):
|
1517 |
+
# restore freq, which is invalidated by setitem
|
1518 |
+
self._freq = result.freq
|
1519 |
+
return self
|
1520 |
+
|
1521 |
+
def __isub__(self, other) -> Self:
|
1522 |
+
result = self - other
|
1523 |
+
self[:] = result[:]
|
1524 |
+
|
1525 |
+
if not isinstance(self.dtype, PeriodDtype):
|
1526 |
+
# restore freq, which is invalidated by setitem
|
1527 |
+
self._freq = result.freq
|
1528 |
+
return self
|
1529 |
+
|
1530 |
+
# --------------------------------------------------------------
|
1531 |
+
# Reductions
|
1532 |
+
|
1533 |
+
@_period_dispatch
|
1534 |
+
def _quantile(
|
1535 |
+
self,
|
1536 |
+
qs: npt.NDArray[np.float64],
|
1537 |
+
interpolation: str,
|
1538 |
+
) -> Self:
|
1539 |
+
return super()._quantile(qs=qs, interpolation=interpolation)
|
1540 |
+
|
1541 |
+
@_period_dispatch
|
1542 |
+
def min(self, *, axis: AxisInt | None = None, skipna: bool = True, **kwargs):
|
1543 |
+
"""
|
1544 |
+
Return the minimum value of the Array or minimum along
|
1545 |
+
an axis.
|
1546 |
+
|
1547 |
+
See Also
|
1548 |
+
--------
|
1549 |
+
numpy.ndarray.min
|
1550 |
+
Index.min : Return the minimum value in an Index.
|
1551 |
+
Series.min : Return the minimum value in a Series.
|
1552 |
+
"""
|
1553 |
+
nv.validate_min((), kwargs)
|
1554 |
+
nv.validate_minmax_axis(axis, self.ndim)
|
1555 |
+
|
1556 |
+
result = nanops.nanmin(self._ndarray, axis=axis, skipna=skipna)
|
1557 |
+
return self._wrap_reduction_result(axis, result)
|
1558 |
+
|
1559 |
+
@_period_dispatch
|
1560 |
+
def max(self, *, axis: AxisInt | None = None, skipna: bool = True, **kwargs):
|
1561 |
+
"""
|
1562 |
+
Return the maximum value of the Array or maximum along
|
1563 |
+
an axis.
|
1564 |
+
|
1565 |
+
See Also
|
1566 |
+
--------
|
1567 |
+
numpy.ndarray.max
|
1568 |
+
Index.max : Return the maximum value in an Index.
|
1569 |
+
Series.max : Return the maximum value in a Series.
|
1570 |
+
"""
|
1571 |
+
nv.validate_max((), kwargs)
|
1572 |
+
nv.validate_minmax_axis(axis, self.ndim)
|
1573 |
+
|
1574 |
+
result = nanops.nanmax(self._ndarray, axis=axis, skipna=skipna)
|
1575 |
+
return self._wrap_reduction_result(axis, result)
|
1576 |
+
|
1577 |
+
def mean(self, *, skipna: bool = True, axis: AxisInt | None = 0):
|
1578 |
+
"""
|
1579 |
+
Return the mean value of the Array.
|
1580 |
+
|
1581 |
+
Parameters
|
1582 |
+
----------
|
1583 |
+
skipna : bool, default True
|
1584 |
+
Whether to ignore any NaT elements.
|
1585 |
+
axis : int, optional, default 0
|
1586 |
+
|
1587 |
+
Returns
|
1588 |
+
-------
|
1589 |
+
scalar
|
1590 |
+
Timestamp or Timedelta.
|
1591 |
+
|
1592 |
+
See Also
|
1593 |
+
--------
|
1594 |
+
numpy.ndarray.mean : Returns the average of array elements along a given axis.
|
1595 |
+
Series.mean : Return the mean value in a Series.
|
1596 |
+
|
1597 |
+
Notes
|
1598 |
+
-----
|
1599 |
+
mean is only defined for Datetime and Timedelta dtypes, not for Period.
|
1600 |
+
|
1601 |
+
Examples
|
1602 |
+
--------
|
1603 |
+
For :class:`pandas.DatetimeIndex`:
|
1604 |
+
|
1605 |
+
>>> idx = pd.date_range('2001-01-01 00:00', periods=3)
|
1606 |
+
>>> idx
|
1607 |
+
DatetimeIndex(['2001-01-01', '2001-01-02', '2001-01-03'],
|
1608 |
+
dtype='datetime64[ns]', freq='D')
|
1609 |
+
>>> idx.mean()
|
1610 |
+
Timestamp('2001-01-02 00:00:00')
|
1611 |
+
|
1612 |
+
For :class:`pandas.TimedeltaIndex`:
|
1613 |
+
|
1614 |
+
>>> tdelta_idx = pd.to_timedelta([1, 2, 3], unit='D')
|
1615 |
+
>>> tdelta_idx
|
1616 |
+
TimedeltaIndex(['1 days', '2 days', '3 days'],
|
1617 |
+
dtype='timedelta64[ns]', freq=None)
|
1618 |
+
>>> tdelta_idx.mean()
|
1619 |
+
Timedelta('2 days 00:00:00')
|
1620 |
+
"""
|
1621 |
+
if isinstance(self.dtype, PeriodDtype):
|
1622 |
+
# See discussion in GH#24757
|
1623 |
+
raise TypeError(
|
1624 |
+
f"mean is not implemented for {type(self).__name__} since the "
|
1625 |
+
"meaning is ambiguous. An alternative is "
|
1626 |
+
"obj.to_timestamp(how='start').mean()"
|
1627 |
+
)
|
1628 |
+
|
1629 |
+
result = nanops.nanmean(
|
1630 |
+
self._ndarray, axis=axis, skipna=skipna, mask=self.isna()
|
1631 |
+
)
|
1632 |
+
return self._wrap_reduction_result(axis, result)
|
1633 |
+
|
1634 |
+
@_period_dispatch
|
1635 |
+
def median(self, *, axis: AxisInt | None = None, skipna: bool = True, **kwargs):
|
1636 |
+
nv.validate_median((), kwargs)
|
1637 |
+
|
1638 |
+
if axis is not None and abs(axis) >= self.ndim:
|
1639 |
+
raise ValueError("abs(axis) must be less than ndim")
|
1640 |
+
|
1641 |
+
result = nanops.nanmedian(self._ndarray, axis=axis, skipna=skipna)
|
1642 |
+
return self._wrap_reduction_result(axis, result)
|
1643 |
+
|
1644 |
+
def _mode(self, dropna: bool = True):
|
1645 |
+
mask = None
|
1646 |
+
if dropna:
|
1647 |
+
mask = self.isna()
|
1648 |
+
|
1649 |
+
i8modes = algorithms.mode(self.view("i8"), mask=mask)
|
1650 |
+
npmodes = i8modes.view(self._ndarray.dtype)
|
1651 |
+
npmodes = cast(np.ndarray, npmodes)
|
1652 |
+
return self._from_backing_data(npmodes)
|
1653 |
+
|
1654 |
+
# ------------------------------------------------------------------
|
1655 |
+
# GroupBy Methods
|
1656 |
+
|
1657 |
+
def _groupby_op(
|
1658 |
+
self,
|
1659 |
+
*,
|
1660 |
+
how: str,
|
1661 |
+
has_dropped_na: bool,
|
1662 |
+
min_count: int,
|
1663 |
+
ngroups: int,
|
1664 |
+
ids: npt.NDArray[np.intp],
|
1665 |
+
**kwargs,
|
1666 |
+
):
|
1667 |
+
dtype = self.dtype
|
1668 |
+
if dtype.kind == "M":
|
1669 |
+
# Adding/multiplying datetimes is not valid
|
1670 |
+
if how in ["sum", "prod", "cumsum", "cumprod", "var", "skew"]:
|
1671 |
+
raise TypeError(f"datetime64 type does not support {how} operations")
|
1672 |
+
if how in ["any", "all"]:
|
1673 |
+
# GH#34479
|
1674 |
+
warnings.warn(
|
1675 |
+
f"'{how}' with datetime64 dtypes is deprecated and will raise in a "
|
1676 |
+
f"future version. Use (obj != pd.Timestamp(0)).{how}() instead.",
|
1677 |
+
FutureWarning,
|
1678 |
+
stacklevel=find_stack_level(),
|
1679 |
+
)
|
1680 |
+
|
1681 |
+
elif isinstance(dtype, PeriodDtype):
|
1682 |
+
# Adding/multiplying Periods is not valid
|
1683 |
+
if how in ["sum", "prod", "cumsum", "cumprod", "var", "skew"]:
|
1684 |
+
raise TypeError(f"Period type does not support {how} operations")
|
1685 |
+
if how in ["any", "all"]:
|
1686 |
+
# GH#34479
|
1687 |
+
warnings.warn(
|
1688 |
+
f"'{how}' with PeriodDtype is deprecated and will raise in a "
|
1689 |
+
f"future version. Use (obj != pd.Period(0, freq)).{how}() instead.",
|
1690 |
+
FutureWarning,
|
1691 |
+
stacklevel=find_stack_level(),
|
1692 |
+
)
|
1693 |
+
else:
|
1694 |
+
# timedeltas we can add but not multiply
|
1695 |
+
if how in ["prod", "cumprod", "skew", "var"]:
|
1696 |
+
raise TypeError(f"timedelta64 type does not support {how} operations")
|
1697 |
+
|
1698 |
+
# All of the functions implemented here are ordinal, so we can
|
1699 |
+
# operate on the tz-naive equivalents
|
1700 |
+
npvalues = self._ndarray.view("M8[ns]")
|
1701 |
+
|
1702 |
+
from pandas.core.groupby.ops import WrappedCythonOp
|
1703 |
+
|
1704 |
+
kind = WrappedCythonOp.get_kind_from_how(how)
|
1705 |
+
op = WrappedCythonOp(how=how, kind=kind, has_dropped_na=has_dropped_na)
|
1706 |
+
|
1707 |
+
res_values = op._cython_op_ndim_compat(
|
1708 |
+
npvalues,
|
1709 |
+
min_count=min_count,
|
1710 |
+
ngroups=ngroups,
|
1711 |
+
comp_ids=ids,
|
1712 |
+
mask=None,
|
1713 |
+
**kwargs,
|
1714 |
+
)
|
1715 |
+
|
1716 |
+
if op.how in op.cast_blocklist:
|
1717 |
+
# i.e. how in ["rank"], since other cast_blocklist methods don't go
|
1718 |
+
# through cython_operation
|
1719 |
+
return res_values
|
1720 |
+
|
1721 |
+
# We did a view to M8[ns] above, now we go the other direction
|
1722 |
+
assert res_values.dtype == "M8[ns]"
|
1723 |
+
if how in ["std", "sem"]:
|
1724 |
+
from pandas.core.arrays import TimedeltaArray
|
1725 |
+
|
1726 |
+
if isinstance(self.dtype, PeriodDtype):
|
1727 |
+
raise TypeError("'std' and 'sem' are not valid for PeriodDtype")
|
1728 |
+
self = cast("DatetimeArray | TimedeltaArray", self)
|
1729 |
+
new_dtype = f"m8[{self.unit}]"
|
1730 |
+
res_values = res_values.view(new_dtype)
|
1731 |
+
return TimedeltaArray._simple_new(res_values, dtype=res_values.dtype)
|
1732 |
+
|
1733 |
+
res_values = res_values.view(self._ndarray.dtype)
|
1734 |
+
return self._from_backing_data(res_values)
|
1735 |
+
|
1736 |
+
|
1737 |
+
class DatelikeOps(DatetimeLikeArrayMixin):
|
1738 |
+
"""
|
1739 |
+
Common ops for DatetimeIndex/PeriodIndex, but not TimedeltaIndex.
|
1740 |
+
"""
|
1741 |
+
|
1742 |
+
@Substitution(
|
1743 |
+
URL="https://docs.python.org/3/library/datetime.html"
|
1744 |
+
"#strftime-and-strptime-behavior"
|
1745 |
+
)
|
1746 |
+
def strftime(self, date_format: str) -> npt.NDArray[np.object_]:
|
1747 |
+
"""
|
1748 |
+
Convert to Index using specified date_format.
|
1749 |
+
|
1750 |
+
Return an Index of formatted strings specified by date_format, which
|
1751 |
+
supports the same string format as the python standard library. Details
|
1752 |
+
of the string format can be found in `python string format
|
1753 |
+
doc <%(URL)s>`__.
|
1754 |
+
|
1755 |
+
Formats supported by the C `strftime` API but not by the python string format
|
1756 |
+
doc (such as `"%%R"`, `"%%r"`) are not officially supported and should be
|
1757 |
+
preferably replaced with their supported equivalents (such as `"%%H:%%M"`,
|
1758 |
+
`"%%I:%%M:%%S %%p"`).
|
1759 |
+
|
1760 |
+
Note that `PeriodIndex` support additional directives, detailed in
|
1761 |
+
`Period.strftime`.
|
1762 |
+
|
1763 |
+
Parameters
|
1764 |
+
----------
|
1765 |
+
date_format : str
|
1766 |
+
Date format string (e.g. "%%Y-%%m-%%d").
|
1767 |
+
|
1768 |
+
Returns
|
1769 |
+
-------
|
1770 |
+
ndarray[object]
|
1771 |
+
NumPy ndarray of formatted strings.
|
1772 |
+
|
1773 |
+
See Also
|
1774 |
+
--------
|
1775 |
+
to_datetime : Convert the given argument to datetime.
|
1776 |
+
DatetimeIndex.normalize : Return DatetimeIndex with times to midnight.
|
1777 |
+
DatetimeIndex.round : Round the DatetimeIndex to the specified freq.
|
1778 |
+
DatetimeIndex.floor : Floor the DatetimeIndex to the specified freq.
|
1779 |
+
Timestamp.strftime : Format a single Timestamp.
|
1780 |
+
Period.strftime : Format a single Period.
|
1781 |
+
|
1782 |
+
Examples
|
1783 |
+
--------
|
1784 |
+
>>> rng = pd.date_range(pd.Timestamp("2018-03-10 09:00"),
|
1785 |
+
... periods=3, freq='s')
|
1786 |
+
>>> rng.strftime('%%B %%d, %%Y, %%r')
|
1787 |
+
Index(['March 10, 2018, 09:00:00 AM', 'March 10, 2018, 09:00:01 AM',
|
1788 |
+
'March 10, 2018, 09:00:02 AM'],
|
1789 |
+
dtype='object')
|
1790 |
+
"""
|
1791 |
+
result = self._format_native_types(date_format=date_format, na_rep=np.nan)
|
1792 |
+
return result.astype(object, copy=False)
|
1793 |
+
|
1794 |
+
|
1795 |
+
_round_doc = """
|
1796 |
+
Perform {op} operation on the data to the specified `freq`.
|
1797 |
+
|
1798 |
+
Parameters
|
1799 |
+
----------
|
1800 |
+
freq : str or Offset
|
1801 |
+
The frequency level to {op} the index to. Must be a fixed
|
1802 |
+
frequency like 'S' (second) not 'ME' (month end). See
|
1803 |
+
:ref:`frequency aliases <timeseries.offset_aliases>` for
|
1804 |
+
a list of possible `freq` values.
|
1805 |
+
ambiguous : 'infer', bool-ndarray, 'NaT', default 'raise'
|
1806 |
+
Only relevant for DatetimeIndex:
|
1807 |
+
|
1808 |
+
- 'infer' will attempt to infer fall dst-transition hours based on
|
1809 |
+
order
|
1810 |
+
- bool-ndarray where True signifies a DST time, False designates
|
1811 |
+
a non-DST time (note that this flag is only applicable for
|
1812 |
+
ambiguous times)
|
1813 |
+
- 'NaT' will return NaT where there are ambiguous times
|
1814 |
+
- 'raise' will raise an AmbiguousTimeError if there are ambiguous
|
1815 |
+
times.
|
1816 |
+
|
1817 |
+
nonexistent : 'shift_forward', 'shift_backward', 'NaT', timedelta, default 'raise'
|
1818 |
+
A nonexistent time does not exist in a particular timezone
|
1819 |
+
where clocks moved forward due to DST.
|
1820 |
+
|
1821 |
+
- 'shift_forward' will shift the nonexistent time forward to the
|
1822 |
+
closest existing time
|
1823 |
+
- 'shift_backward' will shift the nonexistent time backward to the
|
1824 |
+
closest existing time
|
1825 |
+
- 'NaT' will return NaT where there are nonexistent times
|
1826 |
+
- timedelta objects will shift nonexistent times by the timedelta
|
1827 |
+
- 'raise' will raise an NonExistentTimeError if there are
|
1828 |
+
nonexistent times.
|
1829 |
+
|
1830 |
+
Returns
|
1831 |
+
-------
|
1832 |
+
DatetimeIndex, TimedeltaIndex, or Series
|
1833 |
+
Index of the same type for a DatetimeIndex or TimedeltaIndex,
|
1834 |
+
or a Series with the same index for a Series.
|
1835 |
+
|
1836 |
+
Raises
|
1837 |
+
------
|
1838 |
+
ValueError if the `freq` cannot be converted.
|
1839 |
+
|
1840 |
+
Notes
|
1841 |
+
-----
|
1842 |
+
If the timestamps have a timezone, {op}ing will take place relative to the
|
1843 |
+
local ("wall") time and re-localized to the same timezone. When {op}ing
|
1844 |
+
near daylight savings time, use ``nonexistent`` and ``ambiguous`` to
|
1845 |
+
control the re-localization behavior.
|
1846 |
+
|
1847 |
+
Examples
|
1848 |
+
--------
|
1849 |
+
**DatetimeIndex**
|
1850 |
+
|
1851 |
+
>>> rng = pd.date_range('1/1/2018 11:59:00', periods=3, freq='min')
|
1852 |
+
>>> rng
|
1853 |
+
DatetimeIndex(['2018-01-01 11:59:00', '2018-01-01 12:00:00',
|
1854 |
+
'2018-01-01 12:01:00'],
|
1855 |
+
dtype='datetime64[ns]', freq='min')
|
1856 |
+
"""
|
1857 |
+
|
1858 |
+
_round_example = """>>> rng.round('h')
|
1859 |
+
DatetimeIndex(['2018-01-01 12:00:00', '2018-01-01 12:00:00',
|
1860 |
+
'2018-01-01 12:00:00'],
|
1861 |
+
dtype='datetime64[ns]', freq=None)
|
1862 |
+
|
1863 |
+
**Series**
|
1864 |
+
|
1865 |
+
>>> pd.Series(rng).dt.round("h")
|
1866 |
+
0 2018-01-01 12:00:00
|
1867 |
+
1 2018-01-01 12:00:00
|
1868 |
+
2 2018-01-01 12:00:00
|
1869 |
+
dtype: datetime64[ns]
|
1870 |
+
|
1871 |
+
When rounding near a daylight savings time transition, use ``ambiguous`` or
|
1872 |
+
``nonexistent`` to control how the timestamp should be re-localized.
|
1873 |
+
|
1874 |
+
>>> rng_tz = pd.DatetimeIndex(["2021-10-31 03:30:00"], tz="Europe/Amsterdam")
|
1875 |
+
|
1876 |
+
>>> rng_tz.floor("2h", ambiguous=False)
|
1877 |
+
DatetimeIndex(['2021-10-31 02:00:00+01:00'],
|
1878 |
+
dtype='datetime64[ns, Europe/Amsterdam]', freq=None)
|
1879 |
+
|
1880 |
+
>>> rng_tz.floor("2h", ambiguous=True)
|
1881 |
+
DatetimeIndex(['2021-10-31 02:00:00+02:00'],
|
1882 |
+
dtype='datetime64[ns, Europe/Amsterdam]', freq=None)
|
1883 |
+
"""
|
1884 |
+
|
1885 |
+
_floor_example = """>>> rng.floor('h')
|
1886 |
+
DatetimeIndex(['2018-01-01 11:00:00', '2018-01-01 12:00:00',
|
1887 |
+
'2018-01-01 12:00:00'],
|
1888 |
+
dtype='datetime64[ns]', freq=None)
|
1889 |
+
|
1890 |
+
**Series**
|
1891 |
+
|
1892 |
+
>>> pd.Series(rng).dt.floor("h")
|
1893 |
+
0 2018-01-01 11:00:00
|
1894 |
+
1 2018-01-01 12:00:00
|
1895 |
+
2 2018-01-01 12:00:00
|
1896 |
+
dtype: datetime64[ns]
|
1897 |
+
|
1898 |
+
When rounding near a daylight savings time transition, use ``ambiguous`` or
|
1899 |
+
``nonexistent`` to control how the timestamp should be re-localized.
|
1900 |
+
|
1901 |
+
>>> rng_tz = pd.DatetimeIndex(["2021-10-31 03:30:00"], tz="Europe/Amsterdam")
|
1902 |
+
|
1903 |
+
>>> rng_tz.floor("2h", ambiguous=False)
|
1904 |
+
DatetimeIndex(['2021-10-31 02:00:00+01:00'],
|
1905 |
+
dtype='datetime64[ns, Europe/Amsterdam]', freq=None)
|
1906 |
+
|
1907 |
+
>>> rng_tz.floor("2h", ambiguous=True)
|
1908 |
+
DatetimeIndex(['2021-10-31 02:00:00+02:00'],
|
1909 |
+
dtype='datetime64[ns, Europe/Amsterdam]', freq=None)
|
1910 |
+
"""
|
1911 |
+
|
1912 |
+
_ceil_example = """>>> rng.ceil('h')
|
1913 |
+
DatetimeIndex(['2018-01-01 12:00:00', '2018-01-01 12:00:00',
|
1914 |
+
'2018-01-01 13:00:00'],
|
1915 |
+
dtype='datetime64[ns]', freq=None)
|
1916 |
+
|
1917 |
+
**Series**
|
1918 |
+
|
1919 |
+
>>> pd.Series(rng).dt.ceil("h")
|
1920 |
+
0 2018-01-01 12:00:00
|
1921 |
+
1 2018-01-01 12:00:00
|
1922 |
+
2 2018-01-01 13:00:00
|
1923 |
+
dtype: datetime64[ns]
|
1924 |
+
|
1925 |
+
When rounding near a daylight savings time transition, use ``ambiguous`` or
|
1926 |
+
``nonexistent`` to control how the timestamp should be re-localized.
|
1927 |
+
|
1928 |
+
>>> rng_tz = pd.DatetimeIndex(["2021-10-31 01:30:00"], tz="Europe/Amsterdam")
|
1929 |
+
|
1930 |
+
>>> rng_tz.ceil("h", ambiguous=False)
|
1931 |
+
DatetimeIndex(['2021-10-31 02:00:00+01:00'],
|
1932 |
+
dtype='datetime64[ns, Europe/Amsterdam]', freq=None)
|
1933 |
+
|
1934 |
+
>>> rng_tz.ceil("h", ambiguous=True)
|
1935 |
+
DatetimeIndex(['2021-10-31 02:00:00+02:00'],
|
1936 |
+
dtype='datetime64[ns, Europe/Amsterdam]', freq=None)
|
1937 |
+
"""
|
1938 |
+
|
1939 |
+
|
1940 |
+
class TimelikeOps(DatetimeLikeArrayMixin):
|
1941 |
+
"""
|
1942 |
+
Common ops for TimedeltaIndex/DatetimeIndex, but not PeriodIndex.
|
1943 |
+
"""
|
1944 |
+
|
1945 |
+
_default_dtype: np.dtype
|
1946 |
+
|
1947 |
+
def __init__(
|
1948 |
+
self, values, dtype=None, freq=lib.no_default, copy: bool = False
|
1949 |
+
) -> None:
|
1950 |
+
warnings.warn(
|
1951 |
+
# GH#55623
|
1952 |
+
f"{type(self).__name__}.__init__ is deprecated and will be "
|
1953 |
+
"removed in a future version. Use pd.array instead.",
|
1954 |
+
FutureWarning,
|
1955 |
+
stacklevel=find_stack_level(),
|
1956 |
+
)
|
1957 |
+
if dtype is not None:
|
1958 |
+
dtype = pandas_dtype(dtype)
|
1959 |
+
|
1960 |
+
values = extract_array(values, extract_numpy=True)
|
1961 |
+
if isinstance(values, IntegerArray):
|
1962 |
+
values = values.to_numpy("int64", na_value=iNaT)
|
1963 |
+
|
1964 |
+
inferred_freq = getattr(values, "_freq", None)
|
1965 |
+
explicit_none = freq is None
|
1966 |
+
freq = freq if freq is not lib.no_default else None
|
1967 |
+
|
1968 |
+
if isinstance(values, type(self)):
|
1969 |
+
if explicit_none:
|
1970 |
+
# don't inherit from values
|
1971 |
+
pass
|
1972 |
+
elif freq is None:
|
1973 |
+
freq = values.freq
|
1974 |
+
elif freq and values.freq:
|
1975 |
+
freq = to_offset(freq)
|
1976 |
+
freq = _validate_inferred_freq(freq, values.freq)
|
1977 |
+
|
1978 |
+
if dtype is not None and dtype != values.dtype:
|
1979 |
+
# TODO: we only have tests for this for DTA, not TDA (2022-07-01)
|
1980 |
+
raise TypeError(
|
1981 |
+
f"dtype={dtype} does not match data dtype {values.dtype}"
|
1982 |
+
)
|
1983 |
+
|
1984 |
+
dtype = values.dtype
|
1985 |
+
values = values._ndarray
|
1986 |
+
|
1987 |
+
elif dtype is None:
|
1988 |
+
if isinstance(values, np.ndarray) and values.dtype.kind in "Mm":
|
1989 |
+
dtype = values.dtype
|
1990 |
+
else:
|
1991 |
+
dtype = self._default_dtype
|
1992 |
+
if isinstance(values, np.ndarray) and values.dtype == "i8":
|
1993 |
+
values = values.view(dtype)
|
1994 |
+
|
1995 |
+
if not isinstance(values, np.ndarray):
|
1996 |
+
raise ValueError(
|
1997 |
+
f"Unexpected type '{type(values).__name__}'. 'values' must be a "
|
1998 |
+
f"{type(self).__name__}, ndarray, or Series or Index "
|
1999 |
+
"containing one of those."
|
2000 |
+
)
|
2001 |
+
if values.ndim not in [1, 2]:
|
2002 |
+
raise ValueError("Only 1-dimensional input arrays are supported.")
|
2003 |
+
|
2004 |
+
if values.dtype == "i8":
|
2005 |
+
# for compat with datetime/timedelta/period shared methods,
|
2006 |
+
# we can sometimes get here with int64 values. These represent
|
2007 |
+
# nanosecond UTC (or tz-naive) unix timestamps
|
2008 |
+
if dtype is None:
|
2009 |
+
dtype = self._default_dtype
|
2010 |
+
values = values.view(self._default_dtype)
|
2011 |
+
elif lib.is_np_dtype(dtype, "mM"):
|
2012 |
+
values = values.view(dtype)
|
2013 |
+
elif isinstance(dtype, DatetimeTZDtype):
|
2014 |
+
kind = self._default_dtype.kind
|
2015 |
+
new_dtype = f"{kind}8[{dtype.unit}]"
|
2016 |
+
values = values.view(new_dtype)
|
2017 |
+
|
2018 |
+
dtype = self._validate_dtype(values, dtype)
|
2019 |
+
|
2020 |
+
if freq == "infer":
|
2021 |
+
raise ValueError(
|
2022 |
+
f"Frequency inference not allowed in {type(self).__name__}.__init__. "
|
2023 |
+
"Use 'pd.array()' instead."
|
2024 |
+
)
|
2025 |
+
|
2026 |
+
if copy:
|
2027 |
+
values = values.copy()
|
2028 |
+
if freq:
|
2029 |
+
freq = to_offset(freq)
|
2030 |
+
if values.dtype.kind == "m" and not isinstance(freq, Tick):
|
2031 |
+
raise TypeError("TimedeltaArray/Index freq must be a Tick")
|
2032 |
+
|
2033 |
+
NDArrayBacked.__init__(self, values=values, dtype=dtype)
|
2034 |
+
self._freq = freq
|
2035 |
+
|
2036 |
+
if inferred_freq is None and freq is not None:
|
2037 |
+
type(self)._validate_frequency(self, freq)
|
2038 |
+
|
2039 |
+
@classmethod
|
2040 |
+
def _validate_dtype(cls, values, dtype):
|
2041 |
+
raise AbstractMethodError(cls)
|
2042 |
+
|
2043 |
+
@property
|
2044 |
+
def freq(self):
|
2045 |
+
"""
|
2046 |
+
Return the frequency object if it is set, otherwise None.
|
2047 |
+
"""
|
2048 |
+
return self._freq
|
2049 |
+
|
2050 |
+
@freq.setter
|
2051 |
+
def freq(self, value) -> None:
|
2052 |
+
if value is not None:
|
2053 |
+
value = to_offset(value)
|
2054 |
+
self._validate_frequency(self, value)
|
2055 |
+
if self.dtype.kind == "m" and not isinstance(value, Tick):
|
2056 |
+
raise TypeError("TimedeltaArray/Index freq must be a Tick")
|
2057 |
+
|
2058 |
+
if self.ndim > 1:
|
2059 |
+
raise ValueError("Cannot set freq with ndim > 1")
|
2060 |
+
|
2061 |
+
self._freq = value
|
2062 |
+
|
2063 |
+
@final
|
2064 |
+
def _maybe_pin_freq(self, freq, validate_kwds: dict):
|
2065 |
+
"""
|
2066 |
+
Constructor helper to pin the appropriate `freq` attribute. Assumes
|
2067 |
+
that self._freq is currently set to any freq inferred in
|
2068 |
+
_from_sequence_not_strict.
|
2069 |
+
"""
|
2070 |
+
if freq is None:
|
2071 |
+
# user explicitly passed None -> override any inferred_freq
|
2072 |
+
self._freq = None
|
2073 |
+
elif freq == "infer":
|
2074 |
+
# if self._freq is *not* None then we already inferred a freq
|
2075 |
+
# and there is nothing left to do
|
2076 |
+
if self._freq is None:
|
2077 |
+
# Set _freq directly to bypass duplicative _validate_frequency
|
2078 |
+
# check.
|
2079 |
+
self._freq = to_offset(self.inferred_freq)
|
2080 |
+
elif freq is lib.no_default:
|
2081 |
+
# user did not specify anything, keep inferred freq if the original
|
2082 |
+
# data had one, otherwise do nothing
|
2083 |
+
pass
|
2084 |
+
elif self._freq is None:
|
2085 |
+
# We cannot inherit a freq from the data, so we need to validate
|
2086 |
+
# the user-passed freq
|
2087 |
+
freq = to_offset(freq)
|
2088 |
+
type(self)._validate_frequency(self, freq, **validate_kwds)
|
2089 |
+
self._freq = freq
|
2090 |
+
else:
|
2091 |
+
# Otherwise we just need to check that the user-passed freq
|
2092 |
+
# doesn't conflict with the one we already have.
|
2093 |
+
freq = to_offset(freq)
|
2094 |
+
_validate_inferred_freq(freq, self._freq)
|
2095 |
+
|
2096 |
+
@final
|
2097 |
+
@classmethod
|
2098 |
+
def _validate_frequency(cls, index, freq: BaseOffset, **kwargs):
|
2099 |
+
"""
|
2100 |
+
Validate that a frequency is compatible with the values of a given
|
2101 |
+
Datetime Array/Index or Timedelta Array/Index
|
2102 |
+
|
2103 |
+
Parameters
|
2104 |
+
----------
|
2105 |
+
index : DatetimeIndex or TimedeltaIndex
|
2106 |
+
The index on which to determine if the given frequency is valid
|
2107 |
+
freq : DateOffset
|
2108 |
+
The frequency to validate
|
2109 |
+
"""
|
2110 |
+
inferred = index.inferred_freq
|
2111 |
+
if index.size == 0 or inferred == freq.freqstr:
|
2112 |
+
return None
|
2113 |
+
|
2114 |
+
try:
|
2115 |
+
on_freq = cls._generate_range(
|
2116 |
+
start=index[0],
|
2117 |
+
end=None,
|
2118 |
+
periods=len(index),
|
2119 |
+
freq=freq,
|
2120 |
+
unit=index.unit,
|
2121 |
+
**kwargs,
|
2122 |
+
)
|
2123 |
+
if not np.array_equal(index.asi8, on_freq.asi8):
|
2124 |
+
raise ValueError
|
2125 |
+
except ValueError as err:
|
2126 |
+
if "non-fixed" in str(err):
|
2127 |
+
# non-fixed frequencies are not meaningful for timedelta64;
|
2128 |
+
# we retain that error message
|
2129 |
+
raise err
|
2130 |
+
# GH#11587 the main way this is reached is if the `np.array_equal`
|
2131 |
+
# check above is False. This can also be reached if index[0]
|
2132 |
+
# is `NaT`, in which case the call to `cls._generate_range` will
|
2133 |
+
# raise a ValueError, which we re-raise with a more targeted
|
2134 |
+
# message.
|
2135 |
+
raise ValueError(
|
2136 |
+
f"Inferred frequency {inferred} from passed values "
|
2137 |
+
f"does not conform to passed frequency {freq.freqstr}"
|
2138 |
+
) from err
|
2139 |
+
|
2140 |
+
@classmethod
|
2141 |
+
def _generate_range(
|
2142 |
+
cls, start, end, periods: int | None, freq, *args, **kwargs
|
2143 |
+
) -> Self:
|
2144 |
+
raise AbstractMethodError(cls)
|
2145 |
+
|
2146 |
+
# --------------------------------------------------------------
|
2147 |
+
|
2148 |
+
@cache_readonly
|
2149 |
+
def _creso(self) -> int:
|
2150 |
+
return get_unit_from_dtype(self._ndarray.dtype)
|
2151 |
+
|
2152 |
+
@cache_readonly
|
2153 |
+
def unit(self) -> str:
|
2154 |
+
# e.g. "ns", "us", "ms"
|
2155 |
+
# error: Argument 1 to "dtype_to_unit" has incompatible type
|
2156 |
+
# "ExtensionDtype"; expected "Union[DatetimeTZDtype, dtype[Any]]"
|
2157 |
+
return dtype_to_unit(self.dtype) # type: ignore[arg-type]
|
2158 |
+
|
2159 |
+
def as_unit(self, unit: str, round_ok: bool = True) -> Self:
|
2160 |
+
if unit not in ["s", "ms", "us", "ns"]:
|
2161 |
+
raise ValueError("Supported units are 's', 'ms', 'us', 'ns'")
|
2162 |
+
|
2163 |
+
dtype = np.dtype(f"{self.dtype.kind}8[{unit}]")
|
2164 |
+
new_values = astype_overflowsafe(self._ndarray, dtype, round_ok=round_ok)
|
2165 |
+
|
2166 |
+
if isinstance(self.dtype, np.dtype):
|
2167 |
+
new_dtype = new_values.dtype
|
2168 |
+
else:
|
2169 |
+
tz = cast("DatetimeArray", self).tz
|
2170 |
+
new_dtype = DatetimeTZDtype(tz=tz, unit=unit)
|
2171 |
+
|
2172 |
+
# error: Unexpected keyword argument "freq" for "_simple_new" of
|
2173 |
+
# "NDArrayBacked" [call-arg]
|
2174 |
+
return type(self)._simple_new(
|
2175 |
+
new_values, dtype=new_dtype, freq=self.freq # type: ignore[call-arg]
|
2176 |
+
)
|
2177 |
+
|
2178 |
+
# TODO: annotate other as DatetimeArray | TimedeltaArray | Timestamp | Timedelta
|
2179 |
+
# with the return type matching input type. TypeVar?
|
2180 |
+
def _ensure_matching_resos(self, other):
|
2181 |
+
if self._creso != other._creso:
|
2182 |
+
# Just as with Timestamp/Timedelta, we cast to the higher resolution
|
2183 |
+
if self._creso < other._creso:
|
2184 |
+
self = self.as_unit(other.unit)
|
2185 |
+
else:
|
2186 |
+
other = other.as_unit(self.unit)
|
2187 |
+
return self, other
|
2188 |
+
|
2189 |
+
# --------------------------------------------------------------
|
2190 |
+
|
2191 |
+
def __array_ufunc__(self, ufunc: np.ufunc, method: str, *inputs, **kwargs):
|
2192 |
+
if (
|
2193 |
+
ufunc in [np.isnan, np.isinf, np.isfinite]
|
2194 |
+
and len(inputs) == 1
|
2195 |
+
and inputs[0] is self
|
2196 |
+
):
|
2197 |
+
# numpy 1.18 changed isinf and isnan to not raise on dt64/td64
|
2198 |
+
return getattr(ufunc, method)(self._ndarray, **kwargs)
|
2199 |
+
|
2200 |
+
return super().__array_ufunc__(ufunc, method, *inputs, **kwargs)
|
2201 |
+
|
2202 |
+
def _round(self, freq, mode, ambiguous, nonexistent):
|
2203 |
+
# round the local times
|
2204 |
+
if isinstance(self.dtype, DatetimeTZDtype):
|
2205 |
+
# operate on naive timestamps, then convert back to aware
|
2206 |
+
self = cast("DatetimeArray", self)
|
2207 |
+
naive = self.tz_localize(None)
|
2208 |
+
result = naive._round(freq, mode, ambiguous, nonexistent)
|
2209 |
+
return result.tz_localize(
|
2210 |
+
self.tz, ambiguous=ambiguous, nonexistent=nonexistent
|
2211 |
+
)
|
2212 |
+
|
2213 |
+
values = self.view("i8")
|
2214 |
+
values = cast(np.ndarray, values)
|
2215 |
+
nanos = get_unit_for_round(freq, self._creso)
|
2216 |
+
if nanos == 0:
|
2217 |
+
# GH 52761
|
2218 |
+
return self.copy()
|
2219 |
+
result_i8 = round_nsint64(values, mode, nanos)
|
2220 |
+
result = self._maybe_mask_results(result_i8, fill_value=iNaT)
|
2221 |
+
result = result.view(self._ndarray.dtype)
|
2222 |
+
return self._simple_new(result, dtype=self.dtype)
|
2223 |
+
|
2224 |
+
@Appender((_round_doc + _round_example).format(op="round"))
|
2225 |
+
def round(
|
2226 |
+
self,
|
2227 |
+
freq,
|
2228 |
+
ambiguous: TimeAmbiguous = "raise",
|
2229 |
+
nonexistent: TimeNonexistent = "raise",
|
2230 |
+
) -> Self:
|
2231 |
+
return self._round(freq, RoundTo.NEAREST_HALF_EVEN, ambiguous, nonexistent)
|
2232 |
+
|
2233 |
+
@Appender((_round_doc + _floor_example).format(op="floor"))
|
2234 |
+
def floor(
|
2235 |
+
self,
|
2236 |
+
freq,
|
2237 |
+
ambiguous: TimeAmbiguous = "raise",
|
2238 |
+
nonexistent: TimeNonexistent = "raise",
|
2239 |
+
) -> Self:
|
2240 |
+
return self._round(freq, RoundTo.MINUS_INFTY, ambiguous, nonexistent)
|
2241 |
+
|
2242 |
+
@Appender((_round_doc + _ceil_example).format(op="ceil"))
|
2243 |
+
def ceil(
|
2244 |
+
self,
|
2245 |
+
freq,
|
2246 |
+
ambiguous: TimeAmbiguous = "raise",
|
2247 |
+
nonexistent: TimeNonexistent = "raise",
|
2248 |
+
) -> Self:
|
2249 |
+
return self._round(freq, RoundTo.PLUS_INFTY, ambiguous, nonexistent)
|
2250 |
+
|
2251 |
+
# --------------------------------------------------------------
|
2252 |
+
# Reductions
|
2253 |
+
|
2254 |
+
def any(self, *, axis: AxisInt | None = None, skipna: bool = True) -> bool:
|
2255 |
+
# GH#34479 the nanops call will issue a FutureWarning for non-td64 dtype
|
2256 |
+
return nanops.nanany(self._ndarray, axis=axis, skipna=skipna, mask=self.isna())
|
2257 |
+
|
2258 |
+
def all(self, *, axis: AxisInt | None = None, skipna: bool = True) -> bool:
|
2259 |
+
# GH#34479 the nanops call will issue a FutureWarning for non-td64 dtype
|
2260 |
+
|
2261 |
+
return nanops.nanall(self._ndarray, axis=axis, skipna=skipna, mask=self.isna())
|
2262 |
+
|
2263 |
+
# --------------------------------------------------------------
|
2264 |
+
# Frequency Methods
|
2265 |
+
|
2266 |
+
def _maybe_clear_freq(self) -> None:
|
2267 |
+
self._freq = None
|
2268 |
+
|
2269 |
+
def _with_freq(self, freq) -> Self:
|
2270 |
+
"""
|
2271 |
+
Helper to get a view on the same data, with a new freq.
|
2272 |
+
|
2273 |
+
Parameters
|
2274 |
+
----------
|
2275 |
+
freq : DateOffset, None, or "infer"
|
2276 |
+
|
2277 |
+
Returns
|
2278 |
+
-------
|
2279 |
+
Same type as self
|
2280 |
+
"""
|
2281 |
+
# GH#29843
|
2282 |
+
if freq is None:
|
2283 |
+
# Always valid
|
2284 |
+
pass
|
2285 |
+
elif len(self) == 0 and isinstance(freq, BaseOffset):
|
2286 |
+
# Always valid. In the TimedeltaArray case, we require a Tick offset
|
2287 |
+
if self.dtype.kind == "m" and not isinstance(freq, Tick):
|
2288 |
+
raise TypeError("TimedeltaArray/Index freq must be a Tick")
|
2289 |
+
else:
|
2290 |
+
# As an internal method, we can ensure this assertion always holds
|
2291 |
+
assert freq == "infer"
|
2292 |
+
freq = to_offset(self.inferred_freq)
|
2293 |
+
|
2294 |
+
arr = self.view()
|
2295 |
+
arr._freq = freq
|
2296 |
+
return arr
|
2297 |
+
|
2298 |
+
# --------------------------------------------------------------
|
2299 |
+
# ExtensionArray Interface
|
2300 |
+
|
2301 |
+
def _values_for_json(self) -> np.ndarray:
|
2302 |
+
# Small performance bump vs the base class which calls np.asarray(self)
|
2303 |
+
if isinstance(self.dtype, np.dtype):
|
2304 |
+
return self._ndarray
|
2305 |
+
return super()._values_for_json()
|
2306 |
+
|
2307 |
+
def factorize(
|
2308 |
+
self,
|
2309 |
+
use_na_sentinel: bool = True,
|
2310 |
+
sort: bool = False,
|
2311 |
+
):
|
2312 |
+
if self.freq is not None:
|
2313 |
+
# We must be unique, so can short-circuit (and retain freq)
|
2314 |
+
codes = np.arange(len(self), dtype=np.intp)
|
2315 |
+
uniques = self.copy() # TODO: copy or view?
|
2316 |
+
if sort and self.freq.n < 0:
|
2317 |
+
codes = codes[::-1]
|
2318 |
+
uniques = uniques[::-1]
|
2319 |
+
return codes, uniques
|
2320 |
+
|
2321 |
+
if sort:
|
2322 |
+
# algorithms.factorize only passes sort=True here when freq is
|
2323 |
+
# not None, so this should not be reached.
|
2324 |
+
raise NotImplementedError(
|
2325 |
+
f"The 'sort' keyword in {type(self).__name__}.factorize is "
|
2326 |
+
"ignored unless arr.freq is not None. To factorize with sort, "
|
2327 |
+
"call pd.factorize(obj, sort=True) instead."
|
2328 |
+
)
|
2329 |
+
return super().factorize(use_na_sentinel=use_na_sentinel)
|
2330 |
+
|
2331 |
+
@classmethod
|
2332 |
+
def _concat_same_type(
|
2333 |
+
cls,
|
2334 |
+
to_concat: Sequence[Self],
|
2335 |
+
axis: AxisInt = 0,
|
2336 |
+
) -> Self:
|
2337 |
+
new_obj = super()._concat_same_type(to_concat, axis)
|
2338 |
+
|
2339 |
+
obj = to_concat[0]
|
2340 |
+
|
2341 |
+
if axis == 0:
|
2342 |
+
# GH 3232: If the concat result is evenly spaced, we can retain the
|
2343 |
+
# original frequency
|
2344 |
+
to_concat = [x for x in to_concat if len(x)]
|
2345 |
+
|
2346 |
+
if obj.freq is not None and all(x.freq == obj.freq for x in to_concat):
|
2347 |
+
pairs = zip(to_concat[:-1], to_concat[1:])
|
2348 |
+
if all(pair[0][-1] + obj.freq == pair[1][0] for pair in pairs):
|
2349 |
+
new_freq = obj.freq
|
2350 |
+
new_obj._freq = new_freq
|
2351 |
+
return new_obj
|
2352 |
+
|
2353 |
+
def copy(self, order: str = "C") -> Self:
|
2354 |
+
new_obj = super().copy(order=order)
|
2355 |
+
new_obj._freq = self.freq
|
2356 |
+
return new_obj
|
2357 |
+
|
2358 |
+
def interpolate(
|
2359 |
+
self,
|
2360 |
+
*,
|
2361 |
+
method: InterpolateOptions,
|
2362 |
+
axis: int,
|
2363 |
+
index: Index,
|
2364 |
+
limit,
|
2365 |
+
limit_direction,
|
2366 |
+
limit_area,
|
2367 |
+
copy: bool,
|
2368 |
+
**kwargs,
|
2369 |
+
) -> Self:
|
2370 |
+
"""
|
2371 |
+
See NDFrame.interpolate.__doc__.
|
2372 |
+
"""
|
2373 |
+
# NB: we return type(self) even if copy=False
|
2374 |
+
if method != "linear":
|
2375 |
+
raise NotImplementedError
|
2376 |
+
|
2377 |
+
if not copy:
|
2378 |
+
out_data = self._ndarray
|
2379 |
+
else:
|
2380 |
+
out_data = self._ndarray.copy()
|
2381 |
+
|
2382 |
+
missing.interpolate_2d_inplace(
|
2383 |
+
out_data,
|
2384 |
+
method=method,
|
2385 |
+
axis=axis,
|
2386 |
+
index=index,
|
2387 |
+
limit=limit,
|
2388 |
+
limit_direction=limit_direction,
|
2389 |
+
limit_area=limit_area,
|
2390 |
+
**kwargs,
|
2391 |
+
)
|
2392 |
+
if not copy:
|
2393 |
+
return self
|
2394 |
+
return type(self)._simple_new(out_data, dtype=self.dtype)
|
2395 |
+
|
2396 |
+
# --------------------------------------------------------------
|
2397 |
+
# Unsorted
|
2398 |
+
|
2399 |
+
@property
|
2400 |
+
def _is_dates_only(self) -> bool:
|
2401 |
+
"""
|
2402 |
+
Check if we are round times at midnight (and no timezone), which will
|
2403 |
+
be given a more compact __repr__ than other cases. For TimedeltaArray
|
2404 |
+
we are checking for multiples of 24H.
|
2405 |
+
"""
|
2406 |
+
if not lib.is_np_dtype(self.dtype):
|
2407 |
+
# i.e. we have a timezone
|
2408 |
+
return False
|
2409 |
+
|
2410 |
+
values_int = self.asi8
|
2411 |
+
consider_values = values_int != iNaT
|
2412 |
+
reso = get_unit_from_dtype(self.dtype)
|
2413 |
+
ppd = periods_per_day(reso)
|
2414 |
+
|
2415 |
+
# TODO: can we reuse is_date_array_normalized? would need a skipna kwd
|
2416 |
+
# (first attempt at this was less performant than this implementation)
|
2417 |
+
even_days = np.logical_and(consider_values, values_int % ppd != 0).sum() == 0
|
2418 |
+
return even_days
|
2419 |
+
|
2420 |
+
|
2421 |
+
# -------------------------------------------------------------------
|
2422 |
+
# Shared Constructor Helpers
|
2423 |
+
|
2424 |
+
|
2425 |
+
def ensure_arraylike_for_datetimelike(
|
2426 |
+
data, copy: bool, cls_name: str
|
2427 |
+
) -> tuple[ArrayLike, bool]:
|
2428 |
+
if not hasattr(data, "dtype"):
|
2429 |
+
# e.g. list, tuple
|
2430 |
+
if not isinstance(data, (list, tuple)) and np.ndim(data) == 0:
|
2431 |
+
# i.e. generator
|
2432 |
+
data = list(data)
|
2433 |
+
|
2434 |
+
data = construct_1d_object_array_from_listlike(data)
|
2435 |
+
copy = False
|
2436 |
+
elif isinstance(data, ABCMultiIndex):
|
2437 |
+
raise TypeError(f"Cannot create a {cls_name} from a MultiIndex.")
|
2438 |
+
else:
|
2439 |
+
data = extract_array(data, extract_numpy=True)
|
2440 |
+
|
2441 |
+
if isinstance(data, IntegerArray) or (
|
2442 |
+
isinstance(data, ArrowExtensionArray) and data.dtype.kind in "iu"
|
2443 |
+
):
|
2444 |
+
data = data.to_numpy("int64", na_value=iNaT)
|
2445 |
+
copy = False
|
2446 |
+
elif isinstance(data, ArrowExtensionArray):
|
2447 |
+
data = data._maybe_convert_datelike_array()
|
2448 |
+
data = data.to_numpy()
|
2449 |
+
copy = False
|
2450 |
+
elif not isinstance(data, (np.ndarray, ExtensionArray)):
|
2451 |
+
# GH#24539 e.g. xarray, dask object
|
2452 |
+
data = np.asarray(data)
|
2453 |
+
|
2454 |
+
elif isinstance(data, ABCCategorical):
|
2455 |
+
# GH#18664 preserve tz in going DTI->Categorical->DTI
|
2456 |
+
# TODO: cases where we need to do another pass through maybe_convert_dtype,
|
2457 |
+
# e.g. the categories are timedelta64s
|
2458 |
+
data = data.categories.take(data.codes, fill_value=NaT)._values
|
2459 |
+
copy = False
|
2460 |
+
|
2461 |
+
return data, copy
|
2462 |
+
|
2463 |
+
|
2464 |
+
@overload
|
2465 |
+
def validate_periods(periods: None) -> None:
|
2466 |
+
...
|
2467 |
+
|
2468 |
+
|
2469 |
+
@overload
|
2470 |
+
def validate_periods(periods: int | float) -> int:
|
2471 |
+
...
|
2472 |
+
|
2473 |
+
|
2474 |
+
def validate_periods(periods: int | float | None) -> int | None:
|
2475 |
+
"""
|
2476 |
+
If a `periods` argument is passed to the Datetime/Timedelta Array/Index
|
2477 |
+
constructor, cast it to an integer.
|
2478 |
+
|
2479 |
+
Parameters
|
2480 |
+
----------
|
2481 |
+
periods : None, float, int
|
2482 |
+
|
2483 |
+
Returns
|
2484 |
+
-------
|
2485 |
+
periods : None or int
|
2486 |
+
|
2487 |
+
Raises
|
2488 |
+
------
|
2489 |
+
TypeError
|
2490 |
+
if periods is None, float, or int
|
2491 |
+
"""
|
2492 |
+
if periods is not None:
|
2493 |
+
if lib.is_float(periods):
|
2494 |
+
warnings.warn(
|
2495 |
+
# GH#56036
|
2496 |
+
"Non-integer 'periods' in pd.date_range, pd.timedelta_range, "
|
2497 |
+
"pd.period_range, and pd.interval_range are deprecated and "
|
2498 |
+
"will raise in a future version.",
|
2499 |
+
FutureWarning,
|
2500 |
+
stacklevel=find_stack_level(),
|
2501 |
+
)
|
2502 |
+
periods = int(periods)
|
2503 |
+
elif not lib.is_integer(periods):
|
2504 |
+
raise TypeError(f"periods must be a number, got {periods}")
|
2505 |
+
return periods
|
2506 |
+
|
2507 |
+
|
2508 |
+
def _validate_inferred_freq(
|
2509 |
+
freq: BaseOffset | None, inferred_freq: BaseOffset | None
|
2510 |
+
) -> BaseOffset | None:
|
2511 |
+
"""
|
2512 |
+
If the user passes a freq and another freq is inferred from passed data,
|
2513 |
+
require that they match.
|
2514 |
+
|
2515 |
+
Parameters
|
2516 |
+
----------
|
2517 |
+
freq : DateOffset or None
|
2518 |
+
inferred_freq : DateOffset or None
|
2519 |
+
|
2520 |
+
Returns
|
2521 |
+
-------
|
2522 |
+
freq : DateOffset or None
|
2523 |
+
"""
|
2524 |
+
if inferred_freq is not None:
|
2525 |
+
if freq is not None and freq != inferred_freq:
|
2526 |
+
raise ValueError(
|
2527 |
+
f"Inferred frequency {inferred_freq} from passed "
|
2528 |
+
"values does not conform to passed frequency "
|
2529 |
+
f"{freq.freqstr}"
|
2530 |
+
)
|
2531 |
+
if freq is None:
|
2532 |
+
freq = inferred_freq
|
2533 |
+
|
2534 |
+
return freq
|
2535 |
+
|
2536 |
+
|
2537 |
+
def dtype_to_unit(dtype: DatetimeTZDtype | np.dtype | ArrowDtype) -> str:
|
2538 |
+
"""
|
2539 |
+
Return the unit str corresponding to the dtype's resolution.
|
2540 |
+
|
2541 |
+
Parameters
|
2542 |
+
----------
|
2543 |
+
dtype : DatetimeTZDtype or np.dtype
|
2544 |
+
If np.dtype, we assume it is a datetime64 dtype.
|
2545 |
+
|
2546 |
+
Returns
|
2547 |
+
-------
|
2548 |
+
str
|
2549 |
+
"""
|
2550 |
+
if isinstance(dtype, DatetimeTZDtype):
|
2551 |
+
return dtype.unit
|
2552 |
+
elif isinstance(dtype, ArrowDtype):
|
2553 |
+
if dtype.kind not in "mM":
|
2554 |
+
raise ValueError(f"{dtype=} does not have a resolution.")
|
2555 |
+
return dtype.pyarrow_dtype.unit
|
2556 |
+
return np.datetime_data(dtype)[0]
|
env-llmeval/lib/python3.10/site-packages/pandas/core/arrays/datetimes.py
ADDED
@@ -0,0 +1,2820 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from __future__ import annotations
|
2 |
+
|
3 |
+
from datetime import (
|
4 |
+
datetime,
|
5 |
+
timedelta,
|
6 |
+
tzinfo,
|
7 |
+
)
|
8 |
+
from typing import (
|
9 |
+
TYPE_CHECKING,
|
10 |
+
cast,
|
11 |
+
overload,
|
12 |
+
)
|
13 |
+
import warnings
|
14 |
+
|
15 |
+
import numpy as np
|
16 |
+
|
17 |
+
from pandas._libs import (
|
18 |
+
lib,
|
19 |
+
tslib,
|
20 |
+
)
|
21 |
+
from pandas._libs.tslibs import (
|
22 |
+
BaseOffset,
|
23 |
+
NaT,
|
24 |
+
NaTType,
|
25 |
+
Resolution,
|
26 |
+
Timestamp,
|
27 |
+
astype_overflowsafe,
|
28 |
+
fields,
|
29 |
+
get_resolution,
|
30 |
+
get_supported_dtype,
|
31 |
+
get_unit_from_dtype,
|
32 |
+
ints_to_pydatetime,
|
33 |
+
is_date_array_normalized,
|
34 |
+
is_supported_dtype,
|
35 |
+
is_unitless,
|
36 |
+
normalize_i8_timestamps,
|
37 |
+
timezones,
|
38 |
+
to_offset,
|
39 |
+
tz_convert_from_utc,
|
40 |
+
tzconversion,
|
41 |
+
)
|
42 |
+
from pandas._libs.tslibs.dtypes import abbrev_to_npy_unit
|
43 |
+
from pandas.errors import PerformanceWarning
|
44 |
+
from pandas.util._exceptions import find_stack_level
|
45 |
+
from pandas.util._validators import validate_inclusive
|
46 |
+
|
47 |
+
from pandas.core.dtypes.common import (
|
48 |
+
DT64NS_DTYPE,
|
49 |
+
INT64_DTYPE,
|
50 |
+
is_bool_dtype,
|
51 |
+
is_float_dtype,
|
52 |
+
is_string_dtype,
|
53 |
+
pandas_dtype,
|
54 |
+
)
|
55 |
+
from pandas.core.dtypes.dtypes import (
|
56 |
+
DatetimeTZDtype,
|
57 |
+
ExtensionDtype,
|
58 |
+
PeriodDtype,
|
59 |
+
)
|
60 |
+
from pandas.core.dtypes.missing import isna
|
61 |
+
|
62 |
+
from pandas.core.arrays import datetimelike as dtl
|
63 |
+
from pandas.core.arrays._ranges import generate_regular_range
|
64 |
+
import pandas.core.common as com
|
65 |
+
|
66 |
+
from pandas.tseries.frequencies import get_period_alias
|
67 |
+
from pandas.tseries.offsets import (
|
68 |
+
Day,
|
69 |
+
Tick,
|
70 |
+
)
|
71 |
+
|
72 |
+
if TYPE_CHECKING:
|
73 |
+
from collections.abc import Iterator
|
74 |
+
|
75 |
+
from pandas._typing import (
|
76 |
+
ArrayLike,
|
77 |
+
DateTimeErrorChoices,
|
78 |
+
DtypeObj,
|
79 |
+
IntervalClosedType,
|
80 |
+
Self,
|
81 |
+
TimeAmbiguous,
|
82 |
+
TimeNonexistent,
|
83 |
+
npt,
|
84 |
+
)
|
85 |
+
|
86 |
+
from pandas import DataFrame
|
87 |
+
from pandas.core.arrays import PeriodArray
|
88 |
+
|
89 |
+
|
90 |
+
_ITER_CHUNKSIZE = 10_000
|
91 |
+
|
92 |
+
|
93 |
+
@overload
|
94 |
+
def tz_to_dtype(tz: tzinfo, unit: str = ...) -> DatetimeTZDtype:
|
95 |
+
...
|
96 |
+
|
97 |
+
|
98 |
+
@overload
|
99 |
+
def tz_to_dtype(tz: None, unit: str = ...) -> np.dtype[np.datetime64]:
|
100 |
+
...
|
101 |
+
|
102 |
+
|
103 |
+
def tz_to_dtype(
|
104 |
+
tz: tzinfo | None, unit: str = "ns"
|
105 |
+
) -> np.dtype[np.datetime64] | DatetimeTZDtype:
|
106 |
+
"""
|
107 |
+
Return a datetime64[ns] dtype appropriate for the given timezone.
|
108 |
+
|
109 |
+
Parameters
|
110 |
+
----------
|
111 |
+
tz : tzinfo or None
|
112 |
+
unit : str, default "ns"
|
113 |
+
|
114 |
+
Returns
|
115 |
+
-------
|
116 |
+
np.dtype or Datetime64TZDType
|
117 |
+
"""
|
118 |
+
if tz is None:
|
119 |
+
return np.dtype(f"M8[{unit}]")
|
120 |
+
else:
|
121 |
+
return DatetimeTZDtype(tz=tz, unit=unit)
|
122 |
+
|
123 |
+
|
124 |
+
def _field_accessor(name: str, field: str, docstring: str | None = None):
|
125 |
+
def f(self):
|
126 |
+
values = self._local_timestamps()
|
127 |
+
|
128 |
+
if field in self._bool_ops:
|
129 |
+
result: np.ndarray
|
130 |
+
|
131 |
+
if field.endswith(("start", "end")):
|
132 |
+
freq = self.freq
|
133 |
+
month_kw = 12
|
134 |
+
if freq:
|
135 |
+
kwds = freq.kwds
|
136 |
+
month_kw = kwds.get("startingMonth", kwds.get("month", 12))
|
137 |
+
|
138 |
+
result = fields.get_start_end_field(
|
139 |
+
values, field, self.freqstr, month_kw, reso=self._creso
|
140 |
+
)
|
141 |
+
else:
|
142 |
+
result = fields.get_date_field(values, field, reso=self._creso)
|
143 |
+
|
144 |
+
# these return a boolean by-definition
|
145 |
+
return result
|
146 |
+
|
147 |
+
if field in self._object_ops:
|
148 |
+
result = fields.get_date_name_field(values, field, reso=self._creso)
|
149 |
+
result = self._maybe_mask_results(result, fill_value=None)
|
150 |
+
|
151 |
+
else:
|
152 |
+
result = fields.get_date_field(values, field, reso=self._creso)
|
153 |
+
result = self._maybe_mask_results(
|
154 |
+
result, fill_value=None, convert="float64"
|
155 |
+
)
|
156 |
+
|
157 |
+
return result
|
158 |
+
|
159 |
+
f.__name__ = name
|
160 |
+
f.__doc__ = docstring
|
161 |
+
return property(f)
|
162 |
+
|
163 |
+
|
164 |
+
# error: Definition of "_concat_same_type" in base class "NDArrayBacked" is
|
165 |
+
# incompatible with definition in base class "ExtensionArray"
|
166 |
+
class DatetimeArray(dtl.TimelikeOps, dtl.DatelikeOps): # type: ignore[misc]
|
167 |
+
"""
|
168 |
+
Pandas ExtensionArray for tz-naive or tz-aware datetime data.
|
169 |
+
|
170 |
+
.. warning::
|
171 |
+
|
172 |
+
DatetimeArray is currently experimental, and its API may change
|
173 |
+
without warning. In particular, :attr:`DatetimeArray.dtype` is
|
174 |
+
expected to change to always be an instance of an ``ExtensionDtype``
|
175 |
+
subclass.
|
176 |
+
|
177 |
+
Parameters
|
178 |
+
----------
|
179 |
+
values : Series, Index, DatetimeArray, ndarray
|
180 |
+
The datetime data.
|
181 |
+
|
182 |
+
For DatetimeArray `values` (or a Series or Index boxing one),
|
183 |
+
`dtype` and `freq` will be extracted from `values`.
|
184 |
+
|
185 |
+
dtype : numpy.dtype or DatetimeTZDtype
|
186 |
+
Note that the only NumPy dtype allowed is 'datetime64[ns]'.
|
187 |
+
freq : str or Offset, optional
|
188 |
+
The frequency.
|
189 |
+
copy : bool, default False
|
190 |
+
Whether to copy the underlying array of values.
|
191 |
+
|
192 |
+
Attributes
|
193 |
+
----------
|
194 |
+
None
|
195 |
+
|
196 |
+
Methods
|
197 |
+
-------
|
198 |
+
None
|
199 |
+
|
200 |
+
Examples
|
201 |
+
--------
|
202 |
+
>>> pd.arrays.DatetimeArray._from_sequence(
|
203 |
+
... pd.DatetimeIndex(['2023-01-01', '2023-01-02'], freq='D'))
|
204 |
+
<DatetimeArray>
|
205 |
+
['2023-01-01 00:00:00', '2023-01-02 00:00:00']
|
206 |
+
Length: 2, dtype: datetime64[ns]
|
207 |
+
"""
|
208 |
+
|
209 |
+
_typ = "datetimearray"
|
210 |
+
_internal_fill_value = np.datetime64("NaT", "ns")
|
211 |
+
_recognized_scalars = (datetime, np.datetime64)
|
212 |
+
_is_recognized_dtype = lambda x: lib.is_np_dtype(x, "M") or isinstance(
|
213 |
+
x, DatetimeTZDtype
|
214 |
+
)
|
215 |
+
_infer_matches = ("datetime", "datetime64", "date")
|
216 |
+
|
217 |
+
@property
|
218 |
+
def _scalar_type(self) -> type[Timestamp]:
|
219 |
+
return Timestamp
|
220 |
+
|
221 |
+
# define my properties & methods for delegation
|
222 |
+
_bool_ops: list[str] = [
|
223 |
+
"is_month_start",
|
224 |
+
"is_month_end",
|
225 |
+
"is_quarter_start",
|
226 |
+
"is_quarter_end",
|
227 |
+
"is_year_start",
|
228 |
+
"is_year_end",
|
229 |
+
"is_leap_year",
|
230 |
+
]
|
231 |
+
_object_ops: list[str] = ["freq", "tz"]
|
232 |
+
_field_ops: list[str] = [
|
233 |
+
"year",
|
234 |
+
"month",
|
235 |
+
"day",
|
236 |
+
"hour",
|
237 |
+
"minute",
|
238 |
+
"second",
|
239 |
+
"weekday",
|
240 |
+
"dayofweek",
|
241 |
+
"day_of_week",
|
242 |
+
"dayofyear",
|
243 |
+
"day_of_year",
|
244 |
+
"quarter",
|
245 |
+
"days_in_month",
|
246 |
+
"daysinmonth",
|
247 |
+
"microsecond",
|
248 |
+
"nanosecond",
|
249 |
+
]
|
250 |
+
_other_ops: list[str] = ["date", "time", "timetz"]
|
251 |
+
_datetimelike_ops: list[str] = (
|
252 |
+
_field_ops + _object_ops + _bool_ops + _other_ops + ["unit"]
|
253 |
+
)
|
254 |
+
_datetimelike_methods: list[str] = [
|
255 |
+
"to_period",
|
256 |
+
"tz_localize",
|
257 |
+
"tz_convert",
|
258 |
+
"normalize",
|
259 |
+
"strftime",
|
260 |
+
"round",
|
261 |
+
"floor",
|
262 |
+
"ceil",
|
263 |
+
"month_name",
|
264 |
+
"day_name",
|
265 |
+
"as_unit",
|
266 |
+
]
|
267 |
+
|
268 |
+
# ndim is inherited from ExtensionArray, must exist to ensure
|
269 |
+
# Timestamp.__richcmp__(DateTimeArray) operates pointwise
|
270 |
+
|
271 |
+
# ensure that operations with numpy arrays defer to our implementation
|
272 |
+
__array_priority__ = 1000
|
273 |
+
|
274 |
+
# -----------------------------------------------------------------
|
275 |
+
# Constructors
|
276 |
+
|
277 |
+
_dtype: np.dtype[np.datetime64] | DatetimeTZDtype
|
278 |
+
_freq: BaseOffset | None = None
|
279 |
+
_default_dtype = DT64NS_DTYPE # used in TimeLikeOps.__init__
|
280 |
+
|
281 |
+
@classmethod
|
282 |
+
def _from_scalars(cls, scalars, *, dtype: DtypeObj) -> Self:
|
283 |
+
if lib.infer_dtype(scalars, skipna=True) not in ["datetime", "datetime64"]:
|
284 |
+
# TODO: require any NAs be valid-for-DTA
|
285 |
+
# TODO: if dtype is passed, check for tzawareness compat?
|
286 |
+
raise ValueError
|
287 |
+
return cls._from_sequence(scalars, dtype=dtype)
|
288 |
+
|
289 |
+
@classmethod
|
290 |
+
def _validate_dtype(cls, values, dtype):
|
291 |
+
# used in TimeLikeOps.__init__
|
292 |
+
dtype = _validate_dt64_dtype(dtype)
|
293 |
+
_validate_dt64_dtype(values.dtype)
|
294 |
+
if isinstance(dtype, np.dtype):
|
295 |
+
if values.dtype != dtype:
|
296 |
+
raise ValueError("Values resolution does not match dtype.")
|
297 |
+
else:
|
298 |
+
vunit = np.datetime_data(values.dtype)[0]
|
299 |
+
if vunit != dtype.unit:
|
300 |
+
raise ValueError("Values resolution does not match dtype.")
|
301 |
+
return dtype
|
302 |
+
|
303 |
+
# error: Signature of "_simple_new" incompatible with supertype "NDArrayBacked"
|
304 |
+
@classmethod
|
305 |
+
def _simple_new( # type: ignore[override]
|
306 |
+
cls,
|
307 |
+
values: npt.NDArray[np.datetime64],
|
308 |
+
freq: BaseOffset | None = None,
|
309 |
+
dtype: np.dtype[np.datetime64] | DatetimeTZDtype = DT64NS_DTYPE,
|
310 |
+
) -> Self:
|
311 |
+
assert isinstance(values, np.ndarray)
|
312 |
+
assert dtype.kind == "M"
|
313 |
+
if isinstance(dtype, np.dtype):
|
314 |
+
assert dtype == values.dtype
|
315 |
+
assert not is_unitless(dtype)
|
316 |
+
else:
|
317 |
+
# DatetimeTZDtype. If we have e.g. DatetimeTZDtype[us, UTC],
|
318 |
+
# then values.dtype should be M8[us].
|
319 |
+
assert dtype._creso == get_unit_from_dtype(values.dtype)
|
320 |
+
|
321 |
+
result = super()._simple_new(values, dtype)
|
322 |
+
result._freq = freq
|
323 |
+
return result
|
324 |
+
|
325 |
+
@classmethod
|
326 |
+
def _from_sequence(cls, scalars, *, dtype=None, copy: bool = False):
|
327 |
+
return cls._from_sequence_not_strict(scalars, dtype=dtype, copy=copy)
|
328 |
+
|
329 |
+
@classmethod
|
330 |
+
def _from_sequence_not_strict(
|
331 |
+
cls,
|
332 |
+
data,
|
333 |
+
*,
|
334 |
+
dtype=None,
|
335 |
+
copy: bool = False,
|
336 |
+
tz=lib.no_default,
|
337 |
+
freq: str | BaseOffset | lib.NoDefault | None = lib.no_default,
|
338 |
+
dayfirst: bool = False,
|
339 |
+
yearfirst: bool = False,
|
340 |
+
ambiguous: TimeAmbiguous = "raise",
|
341 |
+
) -> Self:
|
342 |
+
"""
|
343 |
+
A non-strict version of _from_sequence, called from DatetimeIndex.__new__.
|
344 |
+
"""
|
345 |
+
|
346 |
+
# if the user either explicitly passes tz=None or a tz-naive dtype, we
|
347 |
+
# disallows inferring a tz.
|
348 |
+
explicit_tz_none = tz is None
|
349 |
+
if tz is lib.no_default:
|
350 |
+
tz = None
|
351 |
+
else:
|
352 |
+
tz = timezones.maybe_get_tz(tz)
|
353 |
+
|
354 |
+
dtype = _validate_dt64_dtype(dtype)
|
355 |
+
# if dtype has an embedded tz, capture it
|
356 |
+
tz = _validate_tz_from_dtype(dtype, tz, explicit_tz_none)
|
357 |
+
|
358 |
+
unit = None
|
359 |
+
if dtype is not None:
|
360 |
+
unit = dtl.dtype_to_unit(dtype)
|
361 |
+
|
362 |
+
data, copy = dtl.ensure_arraylike_for_datetimelike(
|
363 |
+
data, copy, cls_name="DatetimeArray"
|
364 |
+
)
|
365 |
+
inferred_freq = None
|
366 |
+
if isinstance(data, DatetimeArray):
|
367 |
+
inferred_freq = data.freq
|
368 |
+
|
369 |
+
subarr, tz = _sequence_to_dt64(
|
370 |
+
data,
|
371 |
+
copy=copy,
|
372 |
+
tz=tz,
|
373 |
+
dayfirst=dayfirst,
|
374 |
+
yearfirst=yearfirst,
|
375 |
+
ambiguous=ambiguous,
|
376 |
+
out_unit=unit,
|
377 |
+
)
|
378 |
+
# We have to call this again after possibly inferring a tz above
|
379 |
+
_validate_tz_from_dtype(dtype, tz, explicit_tz_none)
|
380 |
+
if tz is not None and explicit_tz_none:
|
381 |
+
raise ValueError(
|
382 |
+
"Passed data is timezone-aware, incompatible with 'tz=None'. "
|
383 |
+
"Use obj.tz_localize(None) instead."
|
384 |
+
)
|
385 |
+
|
386 |
+
data_unit = np.datetime_data(subarr.dtype)[0]
|
387 |
+
data_dtype = tz_to_dtype(tz, data_unit)
|
388 |
+
result = cls._simple_new(subarr, freq=inferred_freq, dtype=data_dtype)
|
389 |
+
if unit is not None and unit != result.unit:
|
390 |
+
# If unit was specified in user-passed dtype, cast to it here
|
391 |
+
result = result.as_unit(unit)
|
392 |
+
|
393 |
+
validate_kwds = {"ambiguous": ambiguous}
|
394 |
+
result._maybe_pin_freq(freq, validate_kwds)
|
395 |
+
return result
|
396 |
+
|
397 |
+
@classmethod
|
398 |
+
def _generate_range(
|
399 |
+
cls,
|
400 |
+
start,
|
401 |
+
end,
|
402 |
+
periods: int | None,
|
403 |
+
freq,
|
404 |
+
tz=None,
|
405 |
+
normalize: bool = False,
|
406 |
+
ambiguous: TimeAmbiguous = "raise",
|
407 |
+
nonexistent: TimeNonexistent = "raise",
|
408 |
+
inclusive: IntervalClosedType = "both",
|
409 |
+
*,
|
410 |
+
unit: str | None = None,
|
411 |
+
) -> Self:
|
412 |
+
periods = dtl.validate_periods(periods)
|
413 |
+
if freq is None and any(x is None for x in [periods, start, end]):
|
414 |
+
raise ValueError("Must provide freq argument if no data is supplied")
|
415 |
+
|
416 |
+
if com.count_not_none(start, end, periods, freq) != 3:
|
417 |
+
raise ValueError(
|
418 |
+
"Of the four parameters: start, end, periods, "
|
419 |
+
"and freq, exactly three must be specified"
|
420 |
+
)
|
421 |
+
freq = to_offset(freq)
|
422 |
+
|
423 |
+
if start is not None:
|
424 |
+
start = Timestamp(start)
|
425 |
+
|
426 |
+
if end is not None:
|
427 |
+
end = Timestamp(end)
|
428 |
+
|
429 |
+
if start is NaT or end is NaT:
|
430 |
+
raise ValueError("Neither `start` nor `end` can be NaT")
|
431 |
+
|
432 |
+
if unit is not None:
|
433 |
+
if unit not in ["s", "ms", "us", "ns"]:
|
434 |
+
raise ValueError("'unit' must be one of 's', 'ms', 'us', 'ns'")
|
435 |
+
else:
|
436 |
+
unit = "ns"
|
437 |
+
|
438 |
+
if start is not None:
|
439 |
+
start = start.as_unit(unit, round_ok=False)
|
440 |
+
if end is not None:
|
441 |
+
end = end.as_unit(unit, round_ok=False)
|
442 |
+
|
443 |
+
left_inclusive, right_inclusive = validate_inclusive(inclusive)
|
444 |
+
start, end = _maybe_normalize_endpoints(start, end, normalize)
|
445 |
+
tz = _infer_tz_from_endpoints(start, end, tz)
|
446 |
+
|
447 |
+
if tz is not None:
|
448 |
+
# Localize the start and end arguments
|
449 |
+
start = _maybe_localize_point(start, freq, tz, ambiguous, nonexistent)
|
450 |
+
end = _maybe_localize_point(end, freq, tz, ambiguous, nonexistent)
|
451 |
+
|
452 |
+
if freq is not None:
|
453 |
+
# We break Day arithmetic (fixed 24 hour) here and opt for
|
454 |
+
# Day to mean calendar day (23/24/25 hour). Therefore, strip
|
455 |
+
# tz info from start and day to avoid DST arithmetic
|
456 |
+
if isinstance(freq, Day):
|
457 |
+
if start is not None:
|
458 |
+
start = start.tz_localize(None)
|
459 |
+
if end is not None:
|
460 |
+
end = end.tz_localize(None)
|
461 |
+
|
462 |
+
if isinstance(freq, Tick):
|
463 |
+
i8values = generate_regular_range(start, end, periods, freq, unit=unit)
|
464 |
+
else:
|
465 |
+
xdr = _generate_range(
|
466 |
+
start=start, end=end, periods=periods, offset=freq, unit=unit
|
467 |
+
)
|
468 |
+
i8values = np.array([x._value for x in xdr], dtype=np.int64)
|
469 |
+
|
470 |
+
endpoint_tz = start.tz if start is not None else end.tz
|
471 |
+
|
472 |
+
if tz is not None and endpoint_tz is None:
|
473 |
+
if not timezones.is_utc(tz):
|
474 |
+
# short-circuit tz_localize_to_utc which would make
|
475 |
+
# an unnecessary copy with UTC but be a no-op.
|
476 |
+
creso = abbrev_to_npy_unit(unit)
|
477 |
+
i8values = tzconversion.tz_localize_to_utc(
|
478 |
+
i8values,
|
479 |
+
tz,
|
480 |
+
ambiguous=ambiguous,
|
481 |
+
nonexistent=nonexistent,
|
482 |
+
creso=creso,
|
483 |
+
)
|
484 |
+
|
485 |
+
# i8values is localized datetime64 array -> have to convert
|
486 |
+
# start/end as well to compare
|
487 |
+
if start is not None:
|
488 |
+
start = start.tz_localize(tz, ambiguous, nonexistent)
|
489 |
+
if end is not None:
|
490 |
+
end = end.tz_localize(tz, ambiguous, nonexistent)
|
491 |
+
else:
|
492 |
+
# Create a linearly spaced date_range in local time
|
493 |
+
# Nanosecond-granularity timestamps aren't always correctly
|
494 |
+
# representable with doubles, so we limit the range that we
|
495 |
+
# pass to np.linspace as much as possible
|
496 |
+
periods = cast(int, periods)
|
497 |
+
i8values = (
|
498 |
+
np.linspace(0, end._value - start._value, periods, dtype="int64")
|
499 |
+
+ start._value
|
500 |
+
)
|
501 |
+
if i8values.dtype != "i8":
|
502 |
+
# 2022-01-09 I (brock) am not sure if it is possible for this
|
503 |
+
# to overflow and cast to e.g. f8, but if it does we need to cast
|
504 |
+
i8values = i8values.astype("i8")
|
505 |
+
|
506 |
+
if start == end:
|
507 |
+
if not left_inclusive and not right_inclusive:
|
508 |
+
i8values = i8values[1:-1]
|
509 |
+
else:
|
510 |
+
start_i8 = Timestamp(start)._value
|
511 |
+
end_i8 = Timestamp(end)._value
|
512 |
+
if not left_inclusive or not right_inclusive:
|
513 |
+
if not left_inclusive and len(i8values) and i8values[0] == start_i8:
|
514 |
+
i8values = i8values[1:]
|
515 |
+
if not right_inclusive and len(i8values) and i8values[-1] == end_i8:
|
516 |
+
i8values = i8values[:-1]
|
517 |
+
|
518 |
+
dt64_values = i8values.view(f"datetime64[{unit}]")
|
519 |
+
dtype = tz_to_dtype(tz, unit=unit)
|
520 |
+
return cls._simple_new(dt64_values, freq=freq, dtype=dtype)
|
521 |
+
|
522 |
+
# -----------------------------------------------------------------
|
523 |
+
# DatetimeLike Interface
|
524 |
+
|
525 |
+
def _unbox_scalar(self, value) -> np.datetime64:
|
526 |
+
if not isinstance(value, self._scalar_type) and value is not NaT:
|
527 |
+
raise ValueError("'value' should be a Timestamp.")
|
528 |
+
self._check_compatible_with(value)
|
529 |
+
if value is NaT:
|
530 |
+
return np.datetime64(value._value, self.unit)
|
531 |
+
else:
|
532 |
+
return value.as_unit(self.unit).asm8
|
533 |
+
|
534 |
+
def _scalar_from_string(self, value) -> Timestamp | NaTType:
|
535 |
+
return Timestamp(value, tz=self.tz)
|
536 |
+
|
537 |
+
def _check_compatible_with(self, other) -> None:
|
538 |
+
if other is NaT:
|
539 |
+
return
|
540 |
+
self._assert_tzawareness_compat(other)
|
541 |
+
|
542 |
+
# -----------------------------------------------------------------
|
543 |
+
# Descriptive Properties
|
544 |
+
|
545 |
+
def _box_func(self, x: np.datetime64) -> Timestamp | NaTType:
|
546 |
+
# GH#42228
|
547 |
+
value = x.view("i8")
|
548 |
+
ts = Timestamp._from_value_and_reso(value, reso=self._creso, tz=self.tz)
|
549 |
+
return ts
|
550 |
+
|
551 |
+
@property
|
552 |
+
# error: Return type "Union[dtype, DatetimeTZDtype]" of "dtype"
|
553 |
+
# incompatible with return type "ExtensionDtype" in supertype
|
554 |
+
# "ExtensionArray"
|
555 |
+
def dtype(self) -> np.dtype[np.datetime64] | DatetimeTZDtype: # type: ignore[override]
|
556 |
+
"""
|
557 |
+
The dtype for the DatetimeArray.
|
558 |
+
|
559 |
+
.. warning::
|
560 |
+
|
561 |
+
A future version of pandas will change dtype to never be a
|
562 |
+
``numpy.dtype``. Instead, :attr:`DatetimeArray.dtype` will
|
563 |
+
always be an instance of an ``ExtensionDtype`` subclass.
|
564 |
+
|
565 |
+
Returns
|
566 |
+
-------
|
567 |
+
numpy.dtype or DatetimeTZDtype
|
568 |
+
If the values are tz-naive, then ``np.dtype('datetime64[ns]')``
|
569 |
+
is returned.
|
570 |
+
|
571 |
+
If the values are tz-aware, then the ``DatetimeTZDtype``
|
572 |
+
is returned.
|
573 |
+
"""
|
574 |
+
return self._dtype
|
575 |
+
|
576 |
+
@property
|
577 |
+
def tz(self) -> tzinfo | None:
|
578 |
+
"""
|
579 |
+
Return the timezone.
|
580 |
+
|
581 |
+
Returns
|
582 |
+
-------
|
583 |
+
datetime.tzinfo, pytz.tzinfo.BaseTZInfo, dateutil.tz.tz.tzfile, or None
|
584 |
+
Returns None when the array is tz-naive.
|
585 |
+
|
586 |
+
Examples
|
587 |
+
--------
|
588 |
+
For Series:
|
589 |
+
|
590 |
+
>>> s = pd.Series(["1/1/2020 10:00:00+00:00", "2/1/2020 11:00:00+00:00"])
|
591 |
+
>>> s = pd.to_datetime(s)
|
592 |
+
>>> s
|
593 |
+
0 2020-01-01 10:00:00+00:00
|
594 |
+
1 2020-02-01 11:00:00+00:00
|
595 |
+
dtype: datetime64[ns, UTC]
|
596 |
+
>>> s.dt.tz
|
597 |
+
datetime.timezone.utc
|
598 |
+
|
599 |
+
For DatetimeIndex:
|
600 |
+
|
601 |
+
>>> idx = pd.DatetimeIndex(["1/1/2020 10:00:00+00:00",
|
602 |
+
... "2/1/2020 11:00:00+00:00"])
|
603 |
+
>>> idx.tz
|
604 |
+
datetime.timezone.utc
|
605 |
+
"""
|
606 |
+
# GH 18595
|
607 |
+
return getattr(self.dtype, "tz", None)
|
608 |
+
|
609 |
+
@tz.setter
|
610 |
+
def tz(self, value):
|
611 |
+
# GH 3746: Prevent localizing or converting the index by setting tz
|
612 |
+
raise AttributeError(
|
613 |
+
"Cannot directly set timezone. Use tz_localize() "
|
614 |
+
"or tz_convert() as appropriate"
|
615 |
+
)
|
616 |
+
|
617 |
+
@property
|
618 |
+
def tzinfo(self) -> tzinfo | None:
|
619 |
+
"""
|
620 |
+
Alias for tz attribute
|
621 |
+
"""
|
622 |
+
return self.tz
|
623 |
+
|
624 |
+
@property # NB: override with cache_readonly in immutable subclasses
|
625 |
+
def is_normalized(self) -> bool:
|
626 |
+
"""
|
627 |
+
Returns True if all of the dates are at midnight ("no time")
|
628 |
+
"""
|
629 |
+
return is_date_array_normalized(self.asi8, self.tz, reso=self._creso)
|
630 |
+
|
631 |
+
@property # NB: override with cache_readonly in immutable subclasses
|
632 |
+
def _resolution_obj(self) -> Resolution:
|
633 |
+
return get_resolution(self.asi8, self.tz, reso=self._creso)
|
634 |
+
|
635 |
+
# ----------------------------------------------------------------
|
636 |
+
# Array-Like / EA-Interface Methods
|
637 |
+
|
638 |
+
def __array__(self, dtype=None, copy=None) -> np.ndarray:
|
639 |
+
if dtype is None and self.tz:
|
640 |
+
# The default for tz-aware is object, to preserve tz info
|
641 |
+
dtype = object
|
642 |
+
|
643 |
+
return super().__array__(dtype=dtype, copy=copy)
|
644 |
+
|
645 |
+
def __iter__(self) -> Iterator:
|
646 |
+
"""
|
647 |
+
Return an iterator over the boxed values
|
648 |
+
|
649 |
+
Yields
|
650 |
+
------
|
651 |
+
tstamp : Timestamp
|
652 |
+
"""
|
653 |
+
if self.ndim > 1:
|
654 |
+
for i in range(len(self)):
|
655 |
+
yield self[i]
|
656 |
+
else:
|
657 |
+
# convert in chunks of 10k for efficiency
|
658 |
+
data = self.asi8
|
659 |
+
length = len(self)
|
660 |
+
chunksize = _ITER_CHUNKSIZE
|
661 |
+
chunks = (length // chunksize) + 1
|
662 |
+
|
663 |
+
for i in range(chunks):
|
664 |
+
start_i = i * chunksize
|
665 |
+
end_i = min((i + 1) * chunksize, length)
|
666 |
+
converted = ints_to_pydatetime(
|
667 |
+
data[start_i:end_i],
|
668 |
+
tz=self.tz,
|
669 |
+
box="timestamp",
|
670 |
+
reso=self._creso,
|
671 |
+
)
|
672 |
+
yield from converted
|
673 |
+
|
674 |
+
def astype(self, dtype, copy: bool = True):
|
675 |
+
# We handle
|
676 |
+
# --> datetime
|
677 |
+
# --> period
|
678 |
+
# DatetimeLikeArrayMixin Super handles the rest.
|
679 |
+
dtype = pandas_dtype(dtype)
|
680 |
+
|
681 |
+
if dtype == self.dtype:
|
682 |
+
if copy:
|
683 |
+
return self.copy()
|
684 |
+
return self
|
685 |
+
|
686 |
+
elif isinstance(dtype, ExtensionDtype):
|
687 |
+
if not isinstance(dtype, DatetimeTZDtype):
|
688 |
+
# e.g. Sparse[datetime64[ns]]
|
689 |
+
return super().astype(dtype, copy=copy)
|
690 |
+
elif self.tz is None:
|
691 |
+
# pre-2.0 this did self.tz_localize(dtype.tz), which did not match
|
692 |
+
# the Series behavior which did
|
693 |
+
# values.tz_localize("UTC").tz_convert(dtype.tz)
|
694 |
+
raise TypeError(
|
695 |
+
"Cannot use .astype to convert from timezone-naive dtype to "
|
696 |
+
"timezone-aware dtype. Use obj.tz_localize instead or "
|
697 |
+
"series.dt.tz_localize instead"
|
698 |
+
)
|
699 |
+
else:
|
700 |
+
# tzaware unit conversion e.g. datetime64[s, UTC]
|
701 |
+
np_dtype = np.dtype(dtype.str)
|
702 |
+
res_values = astype_overflowsafe(self._ndarray, np_dtype, copy=copy)
|
703 |
+
return type(self)._simple_new(res_values, dtype=dtype, freq=self.freq)
|
704 |
+
|
705 |
+
elif (
|
706 |
+
self.tz is None
|
707 |
+
and lib.is_np_dtype(dtype, "M")
|
708 |
+
and not is_unitless(dtype)
|
709 |
+
and is_supported_dtype(dtype)
|
710 |
+
):
|
711 |
+
# unit conversion e.g. datetime64[s]
|
712 |
+
res_values = astype_overflowsafe(self._ndarray, dtype, copy=True)
|
713 |
+
return type(self)._simple_new(res_values, dtype=res_values.dtype)
|
714 |
+
# TODO: preserve freq?
|
715 |
+
|
716 |
+
elif self.tz is not None and lib.is_np_dtype(dtype, "M"):
|
717 |
+
# pre-2.0 behavior for DTA/DTI was
|
718 |
+
# values.tz_convert("UTC").tz_localize(None), which did not match
|
719 |
+
# the Series behavior
|
720 |
+
raise TypeError(
|
721 |
+
"Cannot use .astype to convert from timezone-aware dtype to "
|
722 |
+
"timezone-naive dtype. Use obj.tz_localize(None) or "
|
723 |
+
"obj.tz_convert('UTC').tz_localize(None) instead."
|
724 |
+
)
|
725 |
+
|
726 |
+
elif (
|
727 |
+
self.tz is None
|
728 |
+
and lib.is_np_dtype(dtype, "M")
|
729 |
+
and dtype != self.dtype
|
730 |
+
and is_unitless(dtype)
|
731 |
+
):
|
732 |
+
raise TypeError(
|
733 |
+
"Casting to unit-less dtype 'datetime64' is not supported. "
|
734 |
+
"Pass e.g. 'datetime64[ns]' instead."
|
735 |
+
)
|
736 |
+
|
737 |
+
elif isinstance(dtype, PeriodDtype):
|
738 |
+
return self.to_period(freq=dtype.freq)
|
739 |
+
return dtl.DatetimeLikeArrayMixin.astype(self, dtype, copy)
|
740 |
+
|
741 |
+
# -----------------------------------------------------------------
|
742 |
+
# Rendering Methods
|
743 |
+
|
744 |
+
def _format_native_types(
|
745 |
+
self, *, na_rep: str | float = "NaT", date_format=None, **kwargs
|
746 |
+
) -> npt.NDArray[np.object_]:
|
747 |
+
if date_format is None and self._is_dates_only:
|
748 |
+
# Only dates and no timezone: provide a default format
|
749 |
+
date_format = "%Y-%m-%d"
|
750 |
+
|
751 |
+
return tslib.format_array_from_datetime(
|
752 |
+
self.asi8, tz=self.tz, format=date_format, na_rep=na_rep, reso=self._creso
|
753 |
+
)
|
754 |
+
|
755 |
+
# -----------------------------------------------------------------
|
756 |
+
# Comparison Methods
|
757 |
+
|
758 |
+
def _has_same_tz(self, other) -> bool:
|
759 |
+
# vzone shouldn't be None if value is non-datetime like
|
760 |
+
if isinstance(other, np.datetime64):
|
761 |
+
# convert to Timestamp as np.datetime64 doesn't have tz attr
|
762 |
+
other = Timestamp(other)
|
763 |
+
|
764 |
+
if not hasattr(other, "tzinfo"):
|
765 |
+
return False
|
766 |
+
other_tz = other.tzinfo
|
767 |
+
return timezones.tz_compare(self.tzinfo, other_tz)
|
768 |
+
|
769 |
+
def _assert_tzawareness_compat(self, other) -> None:
|
770 |
+
# adapted from _Timestamp._assert_tzawareness_compat
|
771 |
+
other_tz = getattr(other, "tzinfo", None)
|
772 |
+
other_dtype = getattr(other, "dtype", None)
|
773 |
+
|
774 |
+
if isinstance(other_dtype, DatetimeTZDtype):
|
775 |
+
# Get tzinfo from Series dtype
|
776 |
+
other_tz = other.dtype.tz
|
777 |
+
if other is NaT:
|
778 |
+
# pd.NaT quacks both aware and naive
|
779 |
+
pass
|
780 |
+
elif self.tz is None:
|
781 |
+
if other_tz is not None:
|
782 |
+
raise TypeError(
|
783 |
+
"Cannot compare tz-naive and tz-aware datetime-like objects."
|
784 |
+
)
|
785 |
+
elif other_tz is None:
|
786 |
+
raise TypeError(
|
787 |
+
"Cannot compare tz-naive and tz-aware datetime-like objects"
|
788 |
+
)
|
789 |
+
|
790 |
+
# -----------------------------------------------------------------
|
791 |
+
# Arithmetic Methods
|
792 |
+
|
793 |
+
def _add_offset(self, offset: BaseOffset) -> Self:
|
794 |
+
assert not isinstance(offset, Tick)
|
795 |
+
|
796 |
+
if self.tz is not None:
|
797 |
+
values = self.tz_localize(None)
|
798 |
+
else:
|
799 |
+
values = self
|
800 |
+
|
801 |
+
try:
|
802 |
+
res_values = offset._apply_array(values._ndarray)
|
803 |
+
if res_values.dtype.kind == "i":
|
804 |
+
# error: Argument 1 to "view" of "ndarray" has incompatible type
|
805 |
+
# "dtype[datetime64] | DatetimeTZDtype"; expected
|
806 |
+
# "dtype[Any] | type[Any] | _SupportsDType[dtype[Any]]"
|
807 |
+
res_values = res_values.view(values.dtype) # type: ignore[arg-type]
|
808 |
+
except NotImplementedError:
|
809 |
+
warnings.warn(
|
810 |
+
"Non-vectorized DateOffset being applied to Series or DatetimeIndex.",
|
811 |
+
PerformanceWarning,
|
812 |
+
stacklevel=find_stack_level(),
|
813 |
+
)
|
814 |
+
res_values = self.astype("O") + offset
|
815 |
+
# TODO(GH#55564): as_unit will be unnecessary
|
816 |
+
result = type(self)._from_sequence(res_values).as_unit(self.unit)
|
817 |
+
if not len(self):
|
818 |
+
# GH#30336 _from_sequence won't be able to infer self.tz
|
819 |
+
return result.tz_localize(self.tz)
|
820 |
+
|
821 |
+
else:
|
822 |
+
result = type(self)._simple_new(res_values, dtype=res_values.dtype)
|
823 |
+
if offset.normalize:
|
824 |
+
result = result.normalize()
|
825 |
+
result._freq = None
|
826 |
+
|
827 |
+
if self.tz is not None:
|
828 |
+
result = result.tz_localize(self.tz)
|
829 |
+
|
830 |
+
return result
|
831 |
+
|
832 |
+
# -----------------------------------------------------------------
|
833 |
+
# Timezone Conversion and Localization Methods
|
834 |
+
|
835 |
+
def _local_timestamps(self) -> npt.NDArray[np.int64]:
|
836 |
+
"""
|
837 |
+
Convert to an i8 (unix-like nanosecond timestamp) representation
|
838 |
+
while keeping the local timezone and not using UTC.
|
839 |
+
This is used to calculate time-of-day information as if the timestamps
|
840 |
+
were timezone-naive.
|
841 |
+
"""
|
842 |
+
if self.tz is None or timezones.is_utc(self.tz):
|
843 |
+
# Avoid the copy that would be made in tzconversion
|
844 |
+
return self.asi8
|
845 |
+
return tz_convert_from_utc(self.asi8, self.tz, reso=self._creso)
|
846 |
+
|
847 |
+
def tz_convert(self, tz) -> Self:
|
848 |
+
"""
|
849 |
+
Convert tz-aware Datetime Array/Index from one time zone to another.
|
850 |
+
|
851 |
+
Parameters
|
852 |
+
----------
|
853 |
+
tz : str, pytz.timezone, dateutil.tz.tzfile, datetime.tzinfo or None
|
854 |
+
Time zone for time. Corresponding timestamps would be converted
|
855 |
+
to this time zone of the Datetime Array/Index. A `tz` of None will
|
856 |
+
convert to UTC and remove the timezone information.
|
857 |
+
|
858 |
+
Returns
|
859 |
+
-------
|
860 |
+
Array or Index
|
861 |
+
|
862 |
+
Raises
|
863 |
+
------
|
864 |
+
TypeError
|
865 |
+
If Datetime Array/Index is tz-naive.
|
866 |
+
|
867 |
+
See Also
|
868 |
+
--------
|
869 |
+
DatetimeIndex.tz : A timezone that has a variable offset from UTC.
|
870 |
+
DatetimeIndex.tz_localize : Localize tz-naive DatetimeIndex to a
|
871 |
+
given time zone, or remove timezone from a tz-aware DatetimeIndex.
|
872 |
+
|
873 |
+
Examples
|
874 |
+
--------
|
875 |
+
With the `tz` parameter, we can change the DatetimeIndex
|
876 |
+
to other time zones:
|
877 |
+
|
878 |
+
>>> dti = pd.date_range(start='2014-08-01 09:00',
|
879 |
+
... freq='h', periods=3, tz='Europe/Berlin')
|
880 |
+
|
881 |
+
>>> dti
|
882 |
+
DatetimeIndex(['2014-08-01 09:00:00+02:00',
|
883 |
+
'2014-08-01 10:00:00+02:00',
|
884 |
+
'2014-08-01 11:00:00+02:00'],
|
885 |
+
dtype='datetime64[ns, Europe/Berlin]', freq='h')
|
886 |
+
|
887 |
+
>>> dti.tz_convert('US/Central')
|
888 |
+
DatetimeIndex(['2014-08-01 02:00:00-05:00',
|
889 |
+
'2014-08-01 03:00:00-05:00',
|
890 |
+
'2014-08-01 04:00:00-05:00'],
|
891 |
+
dtype='datetime64[ns, US/Central]', freq='h')
|
892 |
+
|
893 |
+
With the ``tz=None``, we can remove the timezone (after converting
|
894 |
+
to UTC if necessary):
|
895 |
+
|
896 |
+
>>> dti = pd.date_range(start='2014-08-01 09:00', freq='h',
|
897 |
+
... periods=3, tz='Europe/Berlin')
|
898 |
+
|
899 |
+
>>> dti
|
900 |
+
DatetimeIndex(['2014-08-01 09:00:00+02:00',
|
901 |
+
'2014-08-01 10:00:00+02:00',
|
902 |
+
'2014-08-01 11:00:00+02:00'],
|
903 |
+
dtype='datetime64[ns, Europe/Berlin]', freq='h')
|
904 |
+
|
905 |
+
>>> dti.tz_convert(None)
|
906 |
+
DatetimeIndex(['2014-08-01 07:00:00',
|
907 |
+
'2014-08-01 08:00:00',
|
908 |
+
'2014-08-01 09:00:00'],
|
909 |
+
dtype='datetime64[ns]', freq='h')
|
910 |
+
"""
|
911 |
+
tz = timezones.maybe_get_tz(tz)
|
912 |
+
|
913 |
+
if self.tz is None:
|
914 |
+
# tz naive, use tz_localize
|
915 |
+
raise TypeError(
|
916 |
+
"Cannot convert tz-naive timestamps, use tz_localize to localize"
|
917 |
+
)
|
918 |
+
|
919 |
+
# No conversion since timestamps are all UTC to begin with
|
920 |
+
dtype = tz_to_dtype(tz, unit=self.unit)
|
921 |
+
return self._simple_new(self._ndarray, dtype=dtype, freq=self.freq)
|
922 |
+
|
923 |
+
@dtl.ravel_compat
|
924 |
+
def tz_localize(
|
925 |
+
self,
|
926 |
+
tz,
|
927 |
+
ambiguous: TimeAmbiguous = "raise",
|
928 |
+
nonexistent: TimeNonexistent = "raise",
|
929 |
+
) -> Self:
|
930 |
+
"""
|
931 |
+
Localize tz-naive Datetime Array/Index to tz-aware Datetime Array/Index.
|
932 |
+
|
933 |
+
This method takes a time zone (tz) naive Datetime Array/Index object
|
934 |
+
and makes this time zone aware. It does not move the time to another
|
935 |
+
time zone.
|
936 |
+
|
937 |
+
This method can also be used to do the inverse -- to create a time
|
938 |
+
zone unaware object from an aware object. To that end, pass `tz=None`.
|
939 |
+
|
940 |
+
Parameters
|
941 |
+
----------
|
942 |
+
tz : str, pytz.timezone, dateutil.tz.tzfile, datetime.tzinfo or None
|
943 |
+
Time zone to convert timestamps to. Passing ``None`` will
|
944 |
+
remove the time zone information preserving local time.
|
945 |
+
ambiguous : 'infer', 'NaT', bool array, default 'raise'
|
946 |
+
When clocks moved backward due to DST, ambiguous times may arise.
|
947 |
+
For example in Central European Time (UTC+01), when going from
|
948 |
+
03:00 DST to 02:00 non-DST, 02:30:00 local time occurs both at
|
949 |
+
00:30:00 UTC and at 01:30:00 UTC. In such a situation, the
|
950 |
+
`ambiguous` parameter dictates how ambiguous times should be
|
951 |
+
handled.
|
952 |
+
|
953 |
+
- 'infer' will attempt to infer fall dst-transition hours based on
|
954 |
+
order
|
955 |
+
- bool-ndarray where True signifies a DST time, False signifies a
|
956 |
+
non-DST time (note that this flag is only applicable for
|
957 |
+
ambiguous times)
|
958 |
+
- 'NaT' will return NaT where there are ambiguous times
|
959 |
+
- 'raise' will raise an AmbiguousTimeError if there are ambiguous
|
960 |
+
times.
|
961 |
+
|
962 |
+
nonexistent : 'shift_forward', 'shift_backward, 'NaT', timedelta, \
|
963 |
+
default 'raise'
|
964 |
+
A nonexistent time does not exist in a particular timezone
|
965 |
+
where clocks moved forward due to DST.
|
966 |
+
|
967 |
+
- 'shift_forward' will shift the nonexistent time forward to the
|
968 |
+
closest existing time
|
969 |
+
- 'shift_backward' will shift the nonexistent time backward to the
|
970 |
+
closest existing time
|
971 |
+
- 'NaT' will return NaT where there are nonexistent times
|
972 |
+
- timedelta objects will shift nonexistent times by the timedelta
|
973 |
+
- 'raise' will raise an NonExistentTimeError if there are
|
974 |
+
nonexistent times.
|
975 |
+
|
976 |
+
Returns
|
977 |
+
-------
|
978 |
+
Same type as self
|
979 |
+
Array/Index converted to the specified time zone.
|
980 |
+
|
981 |
+
Raises
|
982 |
+
------
|
983 |
+
TypeError
|
984 |
+
If the Datetime Array/Index is tz-aware and tz is not None.
|
985 |
+
|
986 |
+
See Also
|
987 |
+
--------
|
988 |
+
DatetimeIndex.tz_convert : Convert tz-aware DatetimeIndex from
|
989 |
+
one time zone to another.
|
990 |
+
|
991 |
+
Examples
|
992 |
+
--------
|
993 |
+
>>> tz_naive = pd.date_range('2018-03-01 09:00', periods=3)
|
994 |
+
>>> tz_naive
|
995 |
+
DatetimeIndex(['2018-03-01 09:00:00', '2018-03-02 09:00:00',
|
996 |
+
'2018-03-03 09:00:00'],
|
997 |
+
dtype='datetime64[ns]', freq='D')
|
998 |
+
|
999 |
+
Localize DatetimeIndex in US/Eastern time zone:
|
1000 |
+
|
1001 |
+
>>> tz_aware = tz_naive.tz_localize(tz='US/Eastern')
|
1002 |
+
>>> tz_aware
|
1003 |
+
DatetimeIndex(['2018-03-01 09:00:00-05:00',
|
1004 |
+
'2018-03-02 09:00:00-05:00',
|
1005 |
+
'2018-03-03 09:00:00-05:00'],
|
1006 |
+
dtype='datetime64[ns, US/Eastern]', freq=None)
|
1007 |
+
|
1008 |
+
With the ``tz=None``, we can remove the time zone information
|
1009 |
+
while keeping the local time (not converted to UTC):
|
1010 |
+
|
1011 |
+
>>> tz_aware.tz_localize(None)
|
1012 |
+
DatetimeIndex(['2018-03-01 09:00:00', '2018-03-02 09:00:00',
|
1013 |
+
'2018-03-03 09:00:00'],
|
1014 |
+
dtype='datetime64[ns]', freq=None)
|
1015 |
+
|
1016 |
+
Be careful with DST changes. When there is sequential data, pandas can
|
1017 |
+
infer the DST time:
|
1018 |
+
|
1019 |
+
>>> s = pd.to_datetime(pd.Series(['2018-10-28 01:30:00',
|
1020 |
+
... '2018-10-28 02:00:00',
|
1021 |
+
... '2018-10-28 02:30:00',
|
1022 |
+
... '2018-10-28 02:00:00',
|
1023 |
+
... '2018-10-28 02:30:00',
|
1024 |
+
... '2018-10-28 03:00:00',
|
1025 |
+
... '2018-10-28 03:30:00']))
|
1026 |
+
>>> s.dt.tz_localize('CET', ambiguous='infer')
|
1027 |
+
0 2018-10-28 01:30:00+02:00
|
1028 |
+
1 2018-10-28 02:00:00+02:00
|
1029 |
+
2 2018-10-28 02:30:00+02:00
|
1030 |
+
3 2018-10-28 02:00:00+01:00
|
1031 |
+
4 2018-10-28 02:30:00+01:00
|
1032 |
+
5 2018-10-28 03:00:00+01:00
|
1033 |
+
6 2018-10-28 03:30:00+01:00
|
1034 |
+
dtype: datetime64[ns, CET]
|
1035 |
+
|
1036 |
+
In some cases, inferring the DST is impossible. In such cases, you can
|
1037 |
+
pass an ndarray to the ambiguous parameter to set the DST explicitly
|
1038 |
+
|
1039 |
+
>>> s = pd.to_datetime(pd.Series(['2018-10-28 01:20:00',
|
1040 |
+
... '2018-10-28 02:36:00',
|
1041 |
+
... '2018-10-28 03:46:00']))
|
1042 |
+
>>> s.dt.tz_localize('CET', ambiguous=np.array([True, True, False]))
|
1043 |
+
0 2018-10-28 01:20:00+02:00
|
1044 |
+
1 2018-10-28 02:36:00+02:00
|
1045 |
+
2 2018-10-28 03:46:00+01:00
|
1046 |
+
dtype: datetime64[ns, CET]
|
1047 |
+
|
1048 |
+
If the DST transition causes nonexistent times, you can shift these
|
1049 |
+
dates forward or backwards with a timedelta object or `'shift_forward'`
|
1050 |
+
or `'shift_backwards'`.
|
1051 |
+
|
1052 |
+
>>> s = pd.to_datetime(pd.Series(['2015-03-29 02:30:00',
|
1053 |
+
... '2015-03-29 03:30:00']))
|
1054 |
+
>>> s.dt.tz_localize('Europe/Warsaw', nonexistent='shift_forward')
|
1055 |
+
0 2015-03-29 03:00:00+02:00
|
1056 |
+
1 2015-03-29 03:30:00+02:00
|
1057 |
+
dtype: datetime64[ns, Europe/Warsaw]
|
1058 |
+
|
1059 |
+
>>> s.dt.tz_localize('Europe/Warsaw', nonexistent='shift_backward')
|
1060 |
+
0 2015-03-29 01:59:59.999999999+01:00
|
1061 |
+
1 2015-03-29 03:30:00+02:00
|
1062 |
+
dtype: datetime64[ns, Europe/Warsaw]
|
1063 |
+
|
1064 |
+
>>> s.dt.tz_localize('Europe/Warsaw', nonexistent=pd.Timedelta('1h'))
|
1065 |
+
0 2015-03-29 03:30:00+02:00
|
1066 |
+
1 2015-03-29 03:30:00+02:00
|
1067 |
+
dtype: datetime64[ns, Europe/Warsaw]
|
1068 |
+
"""
|
1069 |
+
nonexistent_options = ("raise", "NaT", "shift_forward", "shift_backward")
|
1070 |
+
if nonexistent not in nonexistent_options and not isinstance(
|
1071 |
+
nonexistent, timedelta
|
1072 |
+
):
|
1073 |
+
raise ValueError(
|
1074 |
+
"The nonexistent argument must be one of 'raise', "
|
1075 |
+
"'NaT', 'shift_forward', 'shift_backward' or "
|
1076 |
+
"a timedelta object"
|
1077 |
+
)
|
1078 |
+
|
1079 |
+
if self.tz is not None:
|
1080 |
+
if tz is None:
|
1081 |
+
new_dates = tz_convert_from_utc(self.asi8, self.tz, reso=self._creso)
|
1082 |
+
else:
|
1083 |
+
raise TypeError("Already tz-aware, use tz_convert to convert.")
|
1084 |
+
else:
|
1085 |
+
tz = timezones.maybe_get_tz(tz)
|
1086 |
+
# Convert to UTC
|
1087 |
+
|
1088 |
+
new_dates = tzconversion.tz_localize_to_utc(
|
1089 |
+
self.asi8,
|
1090 |
+
tz,
|
1091 |
+
ambiguous=ambiguous,
|
1092 |
+
nonexistent=nonexistent,
|
1093 |
+
creso=self._creso,
|
1094 |
+
)
|
1095 |
+
new_dates_dt64 = new_dates.view(f"M8[{self.unit}]")
|
1096 |
+
dtype = tz_to_dtype(tz, unit=self.unit)
|
1097 |
+
|
1098 |
+
freq = None
|
1099 |
+
if timezones.is_utc(tz) or (len(self) == 1 and not isna(new_dates_dt64[0])):
|
1100 |
+
# we can preserve freq
|
1101 |
+
# TODO: Also for fixed-offsets
|
1102 |
+
freq = self.freq
|
1103 |
+
elif tz is None and self.tz is None:
|
1104 |
+
# no-op
|
1105 |
+
freq = self.freq
|
1106 |
+
return self._simple_new(new_dates_dt64, dtype=dtype, freq=freq)
|
1107 |
+
|
1108 |
+
# ----------------------------------------------------------------
|
1109 |
+
# Conversion Methods - Vectorized analogues of Timestamp methods
|
1110 |
+
|
1111 |
+
def to_pydatetime(self) -> npt.NDArray[np.object_]:
|
1112 |
+
"""
|
1113 |
+
Return an ndarray of ``datetime.datetime`` objects.
|
1114 |
+
|
1115 |
+
Returns
|
1116 |
+
-------
|
1117 |
+
numpy.ndarray
|
1118 |
+
|
1119 |
+
Examples
|
1120 |
+
--------
|
1121 |
+
>>> idx = pd.date_range('2018-02-27', periods=3)
|
1122 |
+
>>> idx.to_pydatetime()
|
1123 |
+
array([datetime.datetime(2018, 2, 27, 0, 0),
|
1124 |
+
datetime.datetime(2018, 2, 28, 0, 0),
|
1125 |
+
datetime.datetime(2018, 3, 1, 0, 0)], dtype=object)
|
1126 |
+
"""
|
1127 |
+
return ints_to_pydatetime(self.asi8, tz=self.tz, reso=self._creso)
|
1128 |
+
|
1129 |
+
def normalize(self) -> Self:
|
1130 |
+
"""
|
1131 |
+
Convert times to midnight.
|
1132 |
+
|
1133 |
+
The time component of the date-time is converted to midnight i.e.
|
1134 |
+
00:00:00. This is useful in cases, when the time does not matter.
|
1135 |
+
Length is unaltered. The timezones are unaffected.
|
1136 |
+
|
1137 |
+
This method is available on Series with datetime values under
|
1138 |
+
the ``.dt`` accessor, and directly on Datetime Array/Index.
|
1139 |
+
|
1140 |
+
Returns
|
1141 |
+
-------
|
1142 |
+
DatetimeArray, DatetimeIndex or Series
|
1143 |
+
The same type as the original data. Series will have the same
|
1144 |
+
name and index. DatetimeIndex will have the same name.
|
1145 |
+
|
1146 |
+
See Also
|
1147 |
+
--------
|
1148 |
+
floor : Floor the datetimes to the specified freq.
|
1149 |
+
ceil : Ceil the datetimes to the specified freq.
|
1150 |
+
round : Round the datetimes to the specified freq.
|
1151 |
+
|
1152 |
+
Examples
|
1153 |
+
--------
|
1154 |
+
>>> idx = pd.date_range(start='2014-08-01 10:00', freq='h',
|
1155 |
+
... periods=3, tz='Asia/Calcutta')
|
1156 |
+
>>> idx
|
1157 |
+
DatetimeIndex(['2014-08-01 10:00:00+05:30',
|
1158 |
+
'2014-08-01 11:00:00+05:30',
|
1159 |
+
'2014-08-01 12:00:00+05:30'],
|
1160 |
+
dtype='datetime64[ns, Asia/Calcutta]', freq='h')
|
1161 |
+
>>> idx.normalize()
|
1162 |
+
DatetimeIndex(['2014-08-01 00:00:00+05:30',
|
1163 |
+
'2014-08-01 00:00:00+05:30',
|
1164 |
+
'2014-08-01 00:00:00+05:30'],
|
1165 |
+
dtype='datetime64[ns, Asia/Calcutta]', freq=None)
|
1166 |
+
"""
|
1167 |
+
new_values = normalize_i8_timestamps(self.asi8, self.tz, reso=self._creso)
|
1168 |
+
dt64_values = new_values.view(self._ndarray.dtype)
|
1169 |
+
|
1170 |
+
dta = type(self)._simple_new(dt64_values, dtype=dt64_values.dtype)
|
1171 |
+
dta = dta._with_freq("infer")
|
1172 |
+
if self.tz is not None:
|
1173 |
+
dta = dta.tz_localize(self.tz)
|
1174 |
+
return dta
|
1175 |
+
|
1176 |
+
def to_period(self, freq=None) -> PeriodArray:
|
1177 |
+
"""
|
1178 |
+
Cast to PeriodArray/PeriodIndex at a particular frequency.
|
1179 |
+
|
1180 |
+
Converts DatetimeArray/Index to PeriodArray/PeriodIndex.
|
1181 |
+
|
1182 |
+
Parameters
|
1183 |
+
----------
|
1184 |
+
freq : str or Period, optional
|
1185 |
+
One of pandas' :ref:`period aliases <timeseries.period_aliases>`
|
1186 |
+
or an Period object. Will be inferred by default.
|
1187 |
+
|
1188 |
+
Returns
|
1189 |
+
-------
|
1190 |
+
PeriodArray/PeriodIndex
|
1191 |
+
|
1192 |
+
Raises
|
1193 |
+
------
|
1194 |
+
ValueError
|
1195 |
+
When converting a DatetimeArray/Index with non-regular values,
|
1196 |
+
so that a frequency cannot be inferred.
|
1197 |
+
|
1198 |
+
See Also
|
1199 |
+
--------
|
1200 |
+
PeriodIndex: Immutable ndarray holding ordinal values.
|
1201 |
+
DatetimeIndex.to_pydatetime: Return DatetimeIndex as object.
|
1202 |
+
|
1203 |
+
Examples
|
1204 |
+
--------
|
1205 |
+
>>> df = pd.DataFrame({"y": [1, 2, 3]},
|
1206 |
+
... index=pd.to_datetime(["2000-03-31 00:00:00",
|
1207 |
+
... "2000-05-31 00:00:00",
|
1208 |
+
... "2000-08-31 00:00:00"]))
|
1209 |
+
>>> df.index.to_period("M")
|
1210 |
+
PeriodIndex(['2000-03', '2000-05', '2000-08'],
|
1211 |
+
dtype='period[M]')
|
1212 |
+
|
1213 |
+
Infer the daily frequency
|
1214 |
+
|
1215 |
+
>>> idx = pd.date_range("2017-01-01", periods=2)
|
1216 |
+
>>> idx.to_period()
|
1217 |
+
PeriodIndex(['2017-01-01', '2017-01-02'],
|
1218 |
+
dtype='period[D]')
|
1219 |
+
"""
|
1220 |
+
from pandas.core.arrays import PeriodArray
|
1221 |
+
|
1222 |
+
if self.tz is not None:
|
1223 |
+
warnings.warn(
|
1224 |
+
"Converting to PeriodArray/Index representation "
|
1225 |
+
"will drop timezone information.",
|
1226 |
+
UserWarning,
|
1227 |
+
stacklevel=find_stack_level(),
|
1228 |
+
)
|
1229 |
+
|
1230 |
+
if freq is None:
|
1231 |
+
freq = self.freqstr or self.inferred_freq
|
1232 |
+
if isinstance(self.freq, BaseOffset) and hasattr(
|
1233 |
+
self.freq, "_period_dtype_code"
|
1234 |
+
):
|
1235 |
+
freq = PeriodDtype(self.freq)._freqstr
|
1236 |
+
|
1237 |
+
if freq is None:
|
1238 |
+
raise ValueError(
|
1239 |
+
"You must pass a freq argument as current index has none."
|
1240 |
+
)
|
1241 |
+
|
1242 |
+
res = get_period_alias(freq)
|
1243 |
+
|
1244 |
+
# https://github.com/pandas-dev/pandas/issues/33358
|
1245 |
+
if res is None:
|
1246 |
+
res = freq
|
1247 |
+
|
1248 |
+
freq = res
|
1249 |
+
return PeriodArray._from_datetime64(self._ndarray, freq, tz=self.tz)
|
1250 |
+
|
1251 |
+
# -----------------------------------------------------------------
|
1252 |
+
# Properties - Vectorized Timestamp Properties/Methods
|
1253 |
+
|
1254 |
+
def month_name(self, locale=None) -> npt.NDArray[np.object_]:
|
1255 |
+
"""
|
1256 |
+
Return the month names with specified locale.
|
1257 |
+
|
1258 |
+
Parameters
|
1259 |
+
----------
|
1260 |
+
locale : str, optional
|
1261 |
+
Locale determining the language in which to return the month name.
|
1262 |
+
Default is English locale (``'en_US.utf8'``). Use the command
|
1263 |
+
``locale -a`` on your terminal on Unix systems to find your locale
|
1264 |
+
language code.
|
1265 |
+
|
1266 |
+
Returns
|
1267 |
+
-------
|
1268 |
+
Series or Index
|
1269 |
+
Series or Index of month names.
|
1270 |
+
|
1271 |
+
Examples
|
1272 |
+
--------
|
1273 |
+
>>> s = pd.Series(pd.date_range(start='2018-01', freq='ME', periods=3))
|
1274 |
+
>>> s
|
1275 |
+
0 2018-01-31
|
1276 |
+
1 2018-02-28
|
1277 |
+
2 2018-03-31
|
1278 |
+
dtype: datetime64[ns]
|
1279 |
+
>>> s.dt.month_name()
|
1280 |
+
0 January
|
1281 |
+
1 February
|
1282 |
+
2 March
|
1283 |
+
dtype: object
|
1284 |
+
|
1285 |
+
>>> idx = pd.date_range(start='2018-01', freq='ME', periods=3)
|
1286 |
+
>>> idx
|
1287 |
+
DatetimeIndex(['2018-01-31', '2018-02-28', '2018-03-31'],
|
1288 |
+
dtype='datetime64[ns]', freq='ME')
|
1289 |
+
>>> idx.month_name()
|
1290 |
+
Index(['January', 'February', 'March'], dtype='object')
|
1291 |
+
|
1292 |
+
Using the ``locale`` parameter you can set a different locale language,
|
1293 |
+
for example: ``idx.month_name(locale='pt_BR.utf8')`` will return month
|
1294 |
+
names in Brazilian Portuguese language.
|
1295 |
+
|
1296 |
+
>>> idx = pd.date_range(start='2018-01', freq='ME', periods=3)
|
1297 |
+
>>> idx
|
1298 |
+
DatetimeIndex(['2018-01-31', '2018-02-28', '2018-03-31'],
|
1299 |
+
dtype='datetime64[ns]', freq='ME')
|
1300 |
+
>>> idx.month_name(locale='pt_BR.utf8') # doctest: +SKIP
|
1301 |
+
Index(['Janeiro', 'Fevereiro', 'Março'], dtype='object')
|
1302 |
+
"""
|
1303 |
+
values = self._local_timestamps()
|
1304 |
+
|
1305 |
+
result = fields.get_date_name_field(
|
1306 |
+
values, "month_name", locale=locale, reso=self._creso
|
1307 |
+
)
|
1308 |
+
result = self._maybe_mask_results(result, fill_value=None)
|
1309 |
+
return result
|
1310 |
+
|
1311 |
+
def day_name(self, locale=None) -> npt.NDArray[np.object_]:
|
1312 |
+
"""
|
1313 |
+
Return the day names with specified locale.
|
1314 |
+
|
1315 |
+
Parameters
|
1316 |
+
----------
|
1317 |
+
locale : str, optional
|
1318 |
+
Locale determining the language in which to return the day name.
|
1319 |
+
Default is English locale (``'en_US.utf8'``). Use the command
|
1320 |
+
``locale -a`` on your terminal on Unix systems to find your locale
|
1321 |
+
language code.
|
1322 |
+
|
1323 |
+
Returns
|
1324 |
+
-------
|
1325 |
+
Series or Index
|
1326 |
+
Series or Index of day names.
|
1327 |
+
|
1328 |
+
Examples
|
1329 |
+
--------
|
1330 |
+
>>> s = pd.Series(pd.date_range(start='2018-01-01', freq='D', periods=3))
|
1331 |
+
>>> s
|
1332 |
+
0 2018-01-01
|
1333 |
+
1 2018-01-02
|
1334 |
+
2 2018-01-03
|
1335 |
+
dtype: datetime64[ns]
|
1336 |
+
>>> s.dt.day_name()
|
1337 |
+
0 Monday
|
1338 |
+
1 Tuesday
|
1339 |
+
2 Wednesday
|
1340 |
+
dtype: object
|
1341 |
+
|
1342 |
+
>>> idx = pd.date_range(start='2018-01-01', freq='D', periods=3)
|
1343 |
+
>>> idx
|
1344 |
+
DatetimeIndex(['2018-01-01', '2018-01-02', '2018-01-03'],
|
1345 |
+
dtype='datetime64[ns]', freq='D')
|
1346 |
+
>>> idx.day_name()
|
1347 |
+
Index(['Monday', 'Tuesday', 'Wednesday'], dtype='object')
|
1348 |
+
|
1349 |
+
Using the ``locale`` parameter you can set a different locale language,
|
1350 |
+
for example: ``idx.day_name(locale='pt_BR.utf8')`` will return day
|
1351 |
+
names in Brazilian Portuguese language.
|
1352 |
+
|
1353 |
+
>>> idx = pd.date_range(start='2018-01-01', freq='D', periods=3)
|
1354 |
+
>>> idx
|
1355 |
+
DatetimeIndex(['2018-01-01', '2018-01-02', '2018-01-03'],
|
1356 |
+
dtype='datetime64[ns]', freq='D')
|
1357 |
+
>>> idx.day_name(locale='pt_BR.utf8') # doctest: +SKIP
|
1358 |
+
Index(['Segunda', 'Terça', 'Quarta'], dtype='object')
|
1359 |
+
"""
|
1360 |
+
values = self._local_timestamps()
|
1361 |
+
|
1362 |
+
result = fields.get_date_name_field(
|
1363 |
+
values, "day_name", locale=locale, reso=self._creso
|
1364 |
+
)
|
1365 |
+
result = self._maybe_mask_results(result, fill_value=None)
|
1366 |
+
return result
|
1367 |
+
|
1368 |
+
@property
|
1369 |
+
def time(self) -> npt.NDArray[np.object_]:
|
1370 |
+
"""
|
1371 |
+
Returns numpy array of :class:`datetime.time` objects.
|
1372 |
+
|
1373 |
+
The time part of the Timestamps.
|
1374 |
+
|
1375 |
+
Examples
|
1376 |
+
--------
|
1377 |
+
For Series:
|
1378 |
+
|
1379 |
+
>>> s = pd.Series(["1/1/2020 10:00:00+00:00", "2/1/2020 11:00:00+00:00"])
|
1380 |
+
>>> s = pd.to_datetime(s)
|
1381 |
+
>>> s
|
1382 |
+
0 2020-01-01 10:00:00+00:00
|
1383 |
+
1 2020-02-01 11:00:00+00:00
|
1384 |
+
dtype: datetime64[ns, UTC]
|
1385 |
+
>>> s.dt.time
|
1386 |
+
0 10:00:00
|
1387 |
+
1 11:00:00
|
1388 |
+
dtype: object
|
1389 |
+
|
1390 |
+
For DatetimeIndex:
|
1391 |
+
|
1392 |
+
>>> idx = pd.DatetimeIndex(["1/1/2020 10:00:00+00:00",
|
1393 |
+
... "2/1/2020 11:00:00+00:00"])
|
1394 |
+
>>> idx.time
|
1395 |
+
array([datetime.time(10, 0), datetime.time(11, 0)], dtype=object)
|
1396 |
+
"""
|
1397 |
+
# If the Timestamps have a timezone that is not UTC,
|
1398 |
+
# convert them into their i8 representation while
|
1399 |
+
# keeping their timezone and not using UTC
|
1400 |
+
timestamps = self._local_timestamps()
|
1401 |
+
|
1402 |
+
return ints_to_pydatetime(timestamps, box="time", reso=self._creso)
|
1403 |
+
|
1404 |
+
@property
|
1405 |
+
def timetz(self) -> npt.NDArray[np.object_]:
|
1406 |
+
"""
|
1407 |
+
Returns numpy array of :class:`datetime.time` objects with timezones.
|
1408 |
+
|
1409 |
+
The time part of the Timestamps.
|
1410 |
+
|
1411 |
+
Examples
|
1412 |
+
--------
|
1413 |
+
For Series:
|
1414 |
+
|
1415 |
+
>>> s = pd.Series(["1/1/2020 10:00:00+00:00", "2/1/2020 11:00:00+00:00"])
|
1416 |
+
>>> s = pd.to_datetime(s)
|
1417 |
+
>>> s
|
1418 |
+
0 2020-01-01 10:00:00+00:00
|
1419 |
+
1 2020-02-01 11:00:00+00:00
|
1420 |
+
dtype: datetime64[ns, UTC]
|
1421 |
+
>>> s.dt.timetz
|
1422 |
+
0 10:00:00+00:00
|
1423 |
+
1 11:00:00+00:00
|
1424 |
+
dtype: object
|
1425 |
+
|
1426 |
+
For DatetimeIndex:
|
1427 |
+
|
1428 |
+
>>> idx = pd.DatetimeIndex(["1/1/2020 10:00:00+00:00",
|
1429 |
+
... "2/1/2020 11:00:00+00:00"])
|
1430 |
+
>>> idx.timetz
|
1431 |
+
array([datetime.time(10, 0, tzinfo=datetime.timezone.utc),
|
1432 |
+
datetime.time(11, 0, tzinfo=datetime.timezone.utc)], dtype=object)
|
1433 |
+
"""
|
1434 |
+
return ints_to_pydatetime(self.asi8, self.tz, box="time", reso=self._creso)
|
1435 |
+
|
1436 |
+
@property
|
1437 |
+
def date(self) -> npt.NDArray[np.object_]:
|
1438 |
+
"""
|
1439 |
+
Returns numpy array of python :class:`datetime.date` objects.
|
1440 |
+
|
1441 |
+
Namely, the date part of Timestamps without time and
|
1442 |
+
timezone information.
|
1443 |
+
|
1444 |
+
Examples
|
1445 |
+
--------
|
1446 |
+
For Series:
|
1447 |
+
|
1448 |
+
>>> s = pd.Series(["1/1/2020 10:00:00+00:00", "2/1/2020 11:00:00+00:00"])
|
1449 |
+
>>> s = pd.to_datetime(s)
|
1450 |
+
>>> s
|
1451 |
+
0 2020-01-01 10:00:00+00:00
|
1452 |
+
1 2020-02-01 11:00:00+00:00
|
1453 |
+
dtype: datetime64[ns, UTC]
|
1454 |
+
>>> s.dt.date
|
1455 |
+
0 2020-01-01
|
1456 |
+
1 2020-02-01
|
1457 |
+
dtype: object
|
1458 |
+
|
1459 |
+
For DatetimeIndex:
|
1460 |
+
|
1461 |
+
>>> idx = pd.DatetimeIndex(["1/1/2020 10:00:00+00:00",
|
1462 |
+
... "2/1/2020 11:00:00+00:00"])
|
1463 |
+
>>> idx.date
|
1464 |
+
array([datetime.date(2020, 1, 1), datetime.date(2020, 2, 1)], dtype=object)
|
1465 |
+
"""
|
1466 |
+
# If the Timestamps have a timezone that is not UTC,
|
1467 |
+
# convert them into their i8 representation while
|
1468 |
+
# keeping their timezone and not using UTC
|
1469 |
+
timestamps = self._local_timestamps()
|
1470 |
+
|
1471 |
+
return ints_to_pydatetime(timestamps, box="date", reso=self._creso)
|
1472 |
+
|
1473 |
+
def isocalendar(self) -> DataFrame:
|
1474 |
+
"""
|
1475 |
+
Calculate year, week, and day according to the ISO 8601 standard.
|
1476 |
+
|
1477 |
+
Returns
|
1478 |
+
-------
|
1479 |
+
DataFrame
|
1480 |
+
With columns year, week and day.
|
1481 |
+
|
1482 |
+
See Also
|
1483 |
+
--------
|
1484 |
+
Timestamp.isocalendar : Function return a 3-tuple containing ISO year,
|
1485 |
+
week number, and weekday for the given Timestamp object.
|
1486 |
+
datetime.date.isocalendar : Return a named tuple object with
|
1487 |
+
three components: year, week and weekday.
|
1488 |
+
|
1489 |
+
Examples
|
1490 |
+
--------
|
1491 |
+
>>> idx = pd.date_range(start='2019-12-29', freq='D', periods=4)
|
1492 |
+
>>> idx.isocalendar()
|
1493 |
+
year week day
|
1494 |
+
2019-12-29 2019 52 7
|
1495 |
+
2019-12-30 2020 1 1
|
1496 |
+
2019-12-31 2020 1 2
|
1497 |
+
2020-01-01 2020 1 3
|
1498 |
+
>>> idx.isocalendar().week
|
1499 |
+
2019-12-29 52
|
1500 |
+
2019-12-30 1
|
1501 |
+
2019-12-31 1
|
1502 |
+
2020-01-01 1
|
1503 |
+
Freq: D, Name: week, dtype: UInt32
|
1504 |
+
"""
|
1505 |
+
from pandas import DataFrame
|
1506 |
+
|
1507 |
+
values = self._local_timestamps()
|
1508 |
+
sarray = fields.build_isocalendar_sarray(values, reso=self._creso)
|
1509 |
+
iso_calendar_df = DataFrame(
|
1510 |
+
sarray, columns=["year", "week", "day"], dtype="UInt32"
|
1511 |
+
)
|
1512 |
+
if self._hasna:
|
1513 |
+
iso_calendar_df.iloc[self._isnan] = None
|
1514 |
+
return iso_calendar_df
|
1515 |
+
|
1516 |
+
year = _field_accessor(
|
1517 |
+
"year",
|
1518 |
+
"Y",
|
1519 |
+
"""
|
1520 |
+
The year of the datetime.
|
1521 |
+
|
1522 |
+
Examples
|
1523 |
+
--------
|
1524 |
+
>>> datetime_series = pd.Series(
|
1525 |
+
... pd.date_range("2000-01-01", periods=3, freq="YE")
|
1526 |
+
... )
|
1527 |
+
>>> datetime_series
|
1528 |
+
0 2000-12-31
|
1529 |
+
1 2001-12-31
|
1530 |
+
2 2002-12-31
|
1531 |
+
dtype: datetime64[ns]
|
1532 |
+
>>> datetime_series.dt.year
|
1533 |
+
0 2000
|
1534 |
+
1 2001
|
1535 |
+
2 2002
|
1536 |
+
dtype: int32
|
1537 |
+
""",
|
1538 |
+
)
|
1539 |
+
month = _field_accessor(
|
1540 |
+
"month",
|
1541 |
+
"M",
|
1542 |
+
"""
|
1543 |
+
The month as January=1, December=12.
|
1544 |
+
|
1545 |
+
Examples
|
1546 |
+
--------
|
1547 |
+
>>> datetime_series = pd.Series(
|
1548 |
+
... pd.date_range("2000-01-01", periods=3, freq="ME")
|
1549 |
+
... )
|
1550 |
+
>>> datetime_series
|
1551 |
+
0 2000-01-31
|
1552 |
+
1 2000-02-29
|
1553 |
+
2 2000-03-31
|
1554 |
+
dtype: datetime64[ns]
|
1555 |
+
>>> datetime_series.dt.month
|
1556 |
+
0 1
|
1557 |
+
1 2
|
1558 |
+
2 3
|
1559 |
+
dtype: int32
|
1560 |
+
""",
|
1561 |
+
)
|
1562 |
+
day = _field_accessor(
|
1563 |
+
"day",
|
1564 |
+
"D",
|
1565 |
+
"""
|
1566 |
+
The day of the datetime.
|
1567 |
+
|
1568 |
+
Examples
|
1569 |
+
--------
|
1570 |
+
>>> datetime_series = pd.Series(
|
1571 |
+
... pd.date_range("2000-01-01", periods=3, freq="D")
|
1572 |
+
... )
|
1573 |
+
>>> datetime_series
|
1574 |
+
0 2000-01-01
|
1575 |
+
1 2000-01-02
|
1576 |
+
2 2000-01-03
|
1577 |
+
dtype: datetime64[ns]
|
1578 |
+
>>> datetime_series.dt.day
|
1579 |
+
0 1
|
1580 |
+
1 2
|
1581 |
+
2 3
|
1582 |
+
dtype: int32
|
1583 |
+
""",
|
1584 |
+
)
|
1585 |
+
hour = _field_accessor(
|
1586 |
+
"hour",
|
1587 |
+
"h",
|
1588 |
+
"""
|
1589 |
+
The hours of the datetime.
|
1590 |
+
|
1591 |
+
Examples
|
1592 |
+
--------
|
1593 |
+
>>> datetime_series = pd.Series(
|
1594 |
+
... pd.date_range("2000-01-01", periods=3, freq="h")
|
1595 |
+
... )
|
1596 |
+
>>> datetime_series
|
1597 |
+
0 2000-01-01 00:00:00
|
1598 |
+
1 2000-01-01 01:00:00
|
1599 |
+
2 2000-01-01 02:00:00
|
1600 |
+
dtype: datetime64[ns]
|
1601 |
+
>>> datetime_series.dt.hour
|
1602 |
+
0 0
|
1603 |
+
1 1
|
1604 |
+
2 2
|
1605 |
+
dtype: int32
|
1606 |
+
""",
|
1607 |
+
)
|
1608 |
+
minute = _field_accessor(
|
1609 |
+
"minute",
|
1610 |
+
"m",
|
1611 |
+
"""
|
1612 |
+
The minutes of the datetime.
|
1613 |
+
|
1614 |
+
Examples
|
1615 |
+
--------
|
1616 |
+
>>> datetime_series = pd.Series(
|
1617 |
+
... pd.date_range("2000-01-01", periods=3, freq="min")
|
1618 |
+
... )
|
1619 |
+
>>> datetime_series
|
1620 |
+
0 2000-01-01 00:00:00
|
1621 |
+
1 2000-01-01 00:01:00
|
1622 |
+
2 2000-01-01 00:02:00
|
1623 |
+
dtype: datetime64[ns]
|
1624 |
+
>>> datetime_series.dt.minute
|
1625 |
+
0 0
|
1626 |
+
1 1
|
1627 |
+
2 2
|
1628 |
+
dtype: int32
|
1629 |
+
""",
|
1630 |
+
)
|
1631 |
+
second = _field_accessor(
|
1632 |
+
"second",
|
1633 |
+
"s",
|
1634 |
+
"""
|
1635 |
+
The seconds of the datetime.
|
1636 |
+
|
1637 |
+
Examples
|
1638 |
+
--------
|
1639 |
+
>>> datetime_series = pd.Series(
|
1640 |
+
... pd.date_range("2000-01-01", periods=3, freq="s")
|
1641 |
+
... )
|
1642 |
+
>>> datetime_series
|
1643 |
+
0 2000-01-01 00:00:00
|
1644 |
+
1 2000-01-01 00:00:01
|
1645 |
+
2 2000-01-01 00:00:02
|
1646 |
+
dtype: datetime64[ns]
|
1647 |
+
>>> datetime_series.dt.second
|
1648 |
+
0 0
|
1649 |
+
1 1
|
1650 |
+
2 2
|
1651 |
+
dtype: int32
|
1652 |
+
""",
|
1653 |
+
)
|
1654 |
+
microsecond = _field_accessor(
|
1655 |
+
"microsecond",
|
1656 |
+
"us",
|
1657 |
+
"""
|
1658 |
+
The microseconds of the datetime.
|
1659 |
+
|
1660 |
+
Examples
|
1661 |
+
--------
|
1662 |
+
>>> datetime_series = pd.Series(
|
1663 |
+
... pd.date_range("2000-01-01", periods=3, freq="us")
|
1664 |
+
... )
|
1665 |
+
>>> datetime_series
|
1666 |
+
0 2000-01-01 00:00:00.000000
|
1667 |
+
1 2000-01-01 00:00:00.000001
|
1668 |
+
2 2000-01-01 00:00:00.000002
|
1669 |
+
dtype: datetime64[ns]
|
1670 |
+
>>> datetime_series.dt.microsecond
|
1671 |
+
0 0
|
1672 |
+
1 1
|
1673 |
+
2 2
|
1674 |
+
dtype: int32
|
1675 |
+
""",
|
1676 |
+
)
|
1677 |
+
nanosecond = _field_accessor(
|
1678 |
+
"nanosecond",
|
1679 |
+
"ns",
|
1680 |
+
"""
|
1681 |
+
The nanoseconds of the datetime.
|
1682 |
+
|
1683 |
+
Examples
|
1684 |
+
--------
|
1685 |
+
>>> datetime_series = pd.Series(
|
1686 |
+
... pd.date_range("2000-01-01", periods=3, freq="ns")
|
1687 |
+
... )
|
1688 |
+
>>> datetime_series
|
1689 |
+
0 2000-01-01 00:00:00.000000000
|
1690 |
+
1 2000-01-01 00:00:00.000000001
|
1691 |
+
2 2000-01-01 00:00:00.000000002
|
1692 |
+
dtype: datetime64[ns]
|
1693 |
+
>>> datetime_series.dt.nanosecond
|
1694 |
+
0 0
|
1695 |
+
1 1
|
1696 |
+
2 2
|
1697 |
+
dtype: int32
|
1698 |
+
""",
|
1699 |
+
)
|
1700 |
+
_dayofweek_doc = """
|
1701 |
+
The day of the week with Monday=0, Sunday=6.
|
1702 |
+
|
1703 |
+
Return the day of the week. It is assumed the week starts on
|
1704 |
+
Monday, which is denoted by 0 and ends on Sunday which is denoted
|
1705 |
+
by 6. This method is available on both Series with datetime
|
1706 |
+
values (using the `dt` accessor) or DatetimeIndex.
|
1707 |
+
|
1708 |
+
Returns
|
1709 |
+
-------
|
1710 |
+
Series or Index
|
1711 |
+
Containing integers indicating the day number.
|
1712 |
+
|
1713 |
+
See Also
|
1714 |
+
--------
|
1715 |
+
Series.dt.dayofweek : Alias.
|
1716 |
+
Series.dt.weekday : Alias.
|
1717 |
+
Series.dt.day_name : Returns the name of the day of the week.
|
1718 |
+
|
1719 |
+
Examples
|
1720 |
+
--------
|
1721 |
+
>>> s = pd.date_range('2016-12-31', '2017-01-08', freq='D').to_series()
|
1722 |
+
>>> s.dt.dayofweek
|
1723 |
+
2016-12-31 5
|
1724 |
+
2017-01-01 6
|
1725 |
+
2017-01-02 0
|
1726 |
+
2017-01-03 1
|
1727 |
+
2017-01-04 2
|
1728 |
+
2017-01-05 3
|
1729 |
+
2017-01-06 4
|
1730 |
+
2017-01-07 5
|
1731 |
+
2017-01-08 6
|
1732 |
+
Freq: D, dtype: int32
|
1733 |
+
"""
|
1734 |
+
day_of_week = _field_accessor("day_of_week", "dow", _dayofweek_doc)
|
1735 |
+
dayofweek = day_of_week
|
1736 |
+
weekday = day_of_week
|
1737 |
+
|
1738 |
+
day_of_year = _field_accessor(
|
1739 |
+
"dayofyear",
|
1740 |
+
"doy",
|
1741 |
+
"""
|
1742 |
+
The ordinal day of the year.
|
1743 |
+
|
1744 |
+
Examples
|
1745 |
+
--------
|
1746 |
+
For Series:
|
1747 |
+
|
1748 |
+
>>> s = pd.Series(["1/1/2020 10:00:00+00:00", "2/1/2020 11:00:00+00:00"])
|
1749 |
+
>>> s = pd.to_datetime(s)
|
1750 |
+
>>> s
|
1751 |
+
0 2020-01-01 10:00:00+00:00
|
1752 |
+
1 2020-02-01 11:00:00+00:00
|
1753 |
+
dtype: datetime64[ns, UTC]
|
1754 |
+
>>> s.dt.dayofyear
|
1755 |
+
0 1
|
1756 |
+
1 32
|
1757 |
+
dtype: int32
|
1758 |
+
|
1759 |
+
For DatetimeIndex:
|
1760 |
+
|
1761 |
+
>>> idx = pd.DatetimeIndex(["1/1/2020 10:00:00+00:00",
|
1762 |
+
... "2/1/2020 11:00:00+00:00"])
|
1763 |
+
>>> idx.dayofyear
|
1764 |
+
Index([1, 32], dtype='int32')
|
1765 |
+
""",
|
1766 |
+
)
|
1767 |
+
dayofyear = day_of_year
|
1768 |
+
quarter = _field_accessor(
|
1769 |
+
"quarter",
|
1770 |
+
"q",
|
1771 |
+
"""
|
1772 |
+
The quarter of the date.
|
1773 |
+
|
1774 |
+
Examples
|
1775 |
+
--------
|
1776 |
+
For Series:
|
1777 |
+
|
1778 |
+
>>> s = pd.Series(["1/1/2020 10:00:00+00:00", "4/1/2020 11:00:00+00:00"])
|
1779 |
+
>>> s = pd.to_datetime(s)
|
1780 |
+
>>> s
|
1781 |
+
0 2020-01-01 10:00:00+00:00
|
1782 |
+
1 2020-04-01 11:00:00+00:00
|
1783 |
+
dtype: datetime64[ns, UTC]
|
1784 |
+
>>> s.dt.quarter
|
1785 |
+
0 1
|
1786 |
+
1 2
|
1787 |
+
dtype: int32
|
1788 |
+
|
1789 |
+
For DatetimeIndex:
|
1790 |
+
|
1791 |
+
>>> idx = pd.DatetimeIndex(["1/1/2020 10:00:00+00:00",
|
1792 |
+
... "2/1/2020 11:00:00+00:00"])
|
1793 |
+
>>> idx.quarter
|
1794 |
+
Index([1, 1], dtype='int32')
|
1795 |
+
""",
|
1796 |
+
)
|
1797 |
+
days_in_month = _field_accessor(
|
1798 |
+
"days_in_month",
|
1799 |
+
"dim",
|
1800 |
+
"""
|
1801 |
+
The number of days in the month.
|
1802 |
+
|
1803 |
+
Examples
|
1804 |
+
--------
|
1805 |
+
>>> s = pd.Series(["1/1/2020 10:00:00+00:00", "2/1/2020 11:00:00+00:00"])
|
1806 |
+
>>> s = pd.to_datetime(s)
|
1807 |
+
>>> s
|
1808 |
+
0 2020-01-01 10:00:00+00:00
|
1809 |
+
1 2020-02-01 11:00:00+00:00
|
1810 |
+
dtype: datetime64[ns, UTC]
|
1811 |
+
>>> s.dt.daysinmonth
|
1812 |
+
0 31
|
1813 |
+
1 29
|
1814 |
+
dtype: int32
|
1815 |
+
""",
|
1816 |
+
)
|
1817 |
+
daysinmonth = days_in_month
|
1818 |
+
_is_month_doc = """
|
1819 |
+
Indicates whether the date is the {first_or_last} day of the month.
|
1820 |
+
|
1821 |
+
Returns
|
1822 |
+
-------
|
1823 |
+
Series or array
|
1824 |
+
For Series, returns a Series with boolean values.
|
1825 |
+
For DatetimeIndex, returns a boolean array.
|
1826 |
+
|
1827 |
+
See Also
|
1828 |
+
--------
|
1829 |
+
is_month_start : Return a boolean indicating whether the date
|
1830 |
+
is the first day of the month.
|
1831 |
+
is_month_end : Return a boolean indicating whether the date
|
1832 |
+
is the last day of the month.
|
1833 |
+
|
1834 |
+
Examples
|
1835 |
+
--------
|
1836 |
+
This method is available on Series with datetime values under
|
1837 |
+
the ``.dt`` accessor, and directly on DatetimeIndex.
|
1838 |
+
|
1839 |
+
>>> s = pd.Series(pd.date_range("2018-02-27", periods=3))
|
1840 |
+
>>> s
|
1841 |
+
0 2018-02-27
|
1842 |
+
1 2018-02-28
|
1843 |
+
2 2018-03-01
|
1844 |
+
dtype: datetime64[ns]
|
1845 |
+
>>> s.dt.is_month_start
|
1846 |
+
0 False
|
1847 |
+
1 False
|
1848 |
+
2 True
|
1849 |
+
dtype: bool
|
1850 |
+
>>> s.dt.is_month_end
|
1851 |
+
0 False
|
1852 |
+
1 True
|
1853 |
+
2 False
|
1854 |
+
dtype: bool
|
1855 |
+
|
1856 |
+
>>> idx = pd.date_range("2018-02-27", periods=3)
|
1857 |
+
>>> idx.is_month_start
|
1858 |
+
array([False, False, True])
|
1859 |
+
>>> idx.is_month_end
|
1860 |
+
array([False, True, False])
|
1861 |
+
"""
|
1862 |
+
is_month_start = _field_accessor(
|
1863 |
+
"is_month_start", "is_month_start", _is_month_doc.format(first_or_last="first")
|
1864 |
+
)
|
1865 |
+
|
1866 |
+
is_month_end = _field_accessor(
|
1867 |
+
"is_month_end", "is_month_end", _is_month_doc.format(first_or_last="last")
|
1868 |
+
)
|
1869 |
+
|
1870 |
+
is_quarter_start = _field_accessor(
|
1871 |
+
"is_quarter_start",
|
1872 |
+
"is_quarter_start",
|
1873 |
+
"""
|
1874 |
+
Indicator for whether the date is the first day of a quarter.
|
1875 |
+
|
1876 |
+
Returns
|
1877 |
+
-------
|
1878 |
+
is_quarter_start : Series or DatetimeIndex
|
1879 |
+
The same type as the original data with boolean values. Series will
|
1880 |
+
have the same name and index. DatetimeIndex will have the same
|
1881 |
+
name.
|
1882 |
+
|
1883 |
+
See Also
|
1884 |
+
--------
|
1885 |
+
quarter : Return the quarter of the date.
|
1886 |
+
is_quarter_end : Similar property for indicating the quarter end.
|
1887 |
+
|
1888 |
+
Examples
|
1889 |
+
--------
|
1890 |
+
This method is available on Series with datetime values under
|
1891 |
+
the ``.dt`` accessor, and directly on DatetimeIndex.
|
1892 |
+
|
1893 |
+
>>> df = pd.DataFrame({'dates': pd.date_range("2017-03-30",
|
1894 |
+
... periods=4)})
|
1895 |
+
>>> df.assign(quarter=df.dates.dt.quarter,
|
1896 |
+
... is_quarter_start=df.dates.dt.is_quarter_start)
|
1897 |
+
dates quarter is_quarter_start
|
1898 |
+
0 2017-03-30 1 False
|
1899 |
+
1 2017-03-31 1 False
|
1900 |
+
2 2017-04-01 2 True
|
1901 |
+
3 2017-04-02 2 False
|
1902 |
+
|
1903 |
+
>>> idx = pd.date_range('2017-03-30', periods=4)
|
1904 |
+
>>> idx
|
1905 |
+
DatetimeIndex(['2017-03-30', '2017-03-31', '2017-04-01', '2017-04-02'],
|
1906 |
+
dtype='datetime64[ns]', freq='D')
|
1907 |
+
|
1908 |
+
>>> idx.is_quarter_start
|
1909 |
+
array([False, False, True, False])
|
1910 |
+
""",
|
1911 |
+
)
|
1912 |
+
is_quarter_end = _field_accessor(
|
1913 |
+
"is_quarter_end",
|
1914 |
+
"is_quarter_end",
|
1915 |
+
"""
|
1916 |
+
Indicator for whether the date is the last day of a quarter.
|
1917 |
+
|
1918 |
+
Returns
|
1919 |
+
-------
|
1920 |
+
is_quarter_end : Series or DatetimeIndex
|
1921 |
+
The same type as the original data with boolean values. Series will
|
1922 |
+
have the same name and index. DatetimeIndex will have the same
|
1923 |
+
name.
|
1924 |
+
|
1925 |
+
See Also
|
1926 |
+
--------
|
1927 |
+
quarter : Return the quarter of the date.
|
1928 |
+
is_quarter_start : Similar property indicating the quarter start.
|
1929 |
+
|
1930 |
+
Examples
|
1931 |
+
--------
|
1932 |
+
This method is available on Series with datetime values under
|
1933 |
+
the ``.dt`` accessor, and directly on DatetimeIndex.
|
1934 |
+
|
1935 |
+
>>> df = pd.DataFrame({'dates': pd.date_range("2017-03-30",
|
1936 |
+
... periods=4)})
|
1937 |
+
>>> df.assign(quarter=df.dates.dt.quarter,
|
1938 |
+
... is_quarter_end=df.dates.dt.is_quarter_end)
|
1939 |
+
dates quarter is_quarter_end
|
1940 |
+
0 2017-03-30 1 False
|
1941 |
+
1 2017-03-31 1 True
|
1942 |
+
2 2017-04-01 2 False
|
1943 |
+
3 2017-04-02 2 False
|
1944 |
+
|
1945 |
+
>>> idx = pd.date_range('2017-03-30', periods=4)
|
1946 |
+
>>> idx
|
1947 |
+
DatetimeIndex(['2017-03-30', '2017-03-31', '2017-04-01', '2017-04-02'],
|
1948 |
+
dtype='datetime64[ns]', freq='D')
|
1949 |
+
|
1950 |
+
>>> idx.is_quarter_end
|
1951 |
+
array([False, True, False, False])
|
1952 |
+
""",
|
1953 |
+
)
|
1954 |
+
is_year_start = _field_accessor(
|
1955 |
+
"is_year_start",
|
1956 |
+
"is_year_start",
|
1957 |
+
"""
|
1958 |
+
Indicate whether the date is the first day of a year.
|
1959 |
+
|
1960 |
+
Returns
|
1961 |
+
-------
|
1962 |
+
Series or DatetimeIndex
|
1963 |
+
The same type as the original data with boolean values. Series will
|
1964 |
+
have the same name and index. DatetimeIndex will have the same
|
1965 |
+
name.
|
1966 |
+
|
1967 |
+
See Also
|
1968 |
+
--------
|
1969 |
+
is_year_end : Similar property indicating the last day of the year.
|
1970 |
+
|
1971 |
+
Examples
|
1972 |
+
--------
|
1973 |
+
This method is available on Series with datetime values under
|
1974 |
+
the ``.dt`` accessor, and directly on DatetimeIndex.
|
1975 |
+
|
1976 |
+
>>> dates = pd.Series(pd.date_range("2017-12-30", periods=3))
|
1977 |
+
>>> dates
|
1978 |
+
0 2017-12-30
|
1979 |
+
1 2017-12-31
|
1980 |
+
2 2018-01-01
|
1981 |
+
dtype: datetime64[ns]
|
1982 |
+
|
1983 |
+
>>> dates.dt.is_year_start
|
1984 |
+
0 False
|
1985 |
+
1 False
|
1986 |
+
2 True
|
1987 |
+
dtype: bool
|
1988 |
+
|
1989 |
+
>>> idx = pd.date_range("2017-12-30", periods=3)
|
1990 |
+
>>> idx
|
1991 |
+
DatetimeIndex(['2017-12-30', '2017-12-31', '2018-01-01'],
|
1992 |
+
dtype='datetime64[ns]', freq='D')
|
1993 |
+
|
1994 |
+
>>> idx.is_year_start
|
1995 |
+
array([False, False, True])
|
1996 |
+
""",
|
1997 |
+
)
|
1998 |
+
is_year_end = _field_accessor(
|
1999 |
+
"is_year_end",
|
2000 |
+
"is_year_end",
|
2001 |
+
"""
|
2002 |
+
Indicate whether the date is the last day of the year.
|
2003 |
+
|
2004 |
+
Returns
|
2005 |
+
-------
|
2006 |
+
Series or DatetimeIndex
|
2007 |
+
The same type as the original data with boolean values. Series will
|
2008 |
+
have the same name and index. DatetimeIndex will have the same
|
2009 |
+
name.
|
2010 |
+
|
2011 |
+
See Also
|
2012 |
+
--------
|
2013 |
+
is_year_start : Similar property indicating the start of the year.
|
2014 |
+
|
2015 |
+
Examples
|
2016 |
+
--------
|
2017 |
+
This method is available on Series with datetime values under
|
2018 |
+
the ``.dt`` accessor, and directly on DatetimeIndex.
|
2019 |
+
|
2020 |
+
>>> dates = pd.Series(pd.date_range("2017-12-30", periods=3))
|
2021 |
+
>>> dates
|
2022 |
+
0 2017-12-30
|
2023 |
+
1 2017-12-31
|
2024 |
+
2 2018-01-01
|
2025 |
+
dtype: datetime64[ns]
|
2026 |
+
|
2027 |
+
>>> dates.dt.is_year_end
|
2028 |
+
0 False
|
2029 |
+
1 True
|
2030 |
+
2 False
|
2031 |
+
dtype: bool
|
2032 |
+
|
2033 |
+
>>> idx = pd.date_range("2017-12-30", periods=3)
|
2034 |
+
>>> idx
|
2035 |
+
DatetimeIndex(['2017-12-30', '2017-12-31', '2018-01-01'],
|
2036 |
+
dtype='datetime64[ns]', freq='D')
|
2037 |
+
|
2038 |
+
>>> idx.is_year_end
|
2039 |
+
array([False, True, False])
|
2040 |
+
""",
|
2041 |
+
)
|
2042 |
+
is_leap_year = _field_accessor(
|
2043 |
+
"is_leap_year",
|
2044 |
+
"is_leap_year",
|
2045 |
+
"""
|
2046 |
+
Boolean indicator if the date belongs to a leap year.
|
2047 |
+
|
2048 |
+
A leap year is a year, which has 366 days (instead of 365) including
|
2049 |
+
29th of February as an intercalary day.
|
2050 |
+
Leap years are years which are multiples of four with the exception
|
2051 |
+
of years divisible by 100 but not by 400.
|
2052 |
+
|
2053 |
+
Returns
|
2054 |
+
-------
|
2055 |
+
Series or ndarray
|
2056 |
+
Booleans indicating if dates belong to a leap year.
|
2057 |
+
|
2058 |
+
Examples
|
2059 |
+
--------
|
2060 |
+
This method is available on Series with datetime values under
|
2061 |
+
the ``.dt`` accessor, and directly on DatetimeIndex.
|
2062 |
+
|
2063 |
+
>>> idx = pd.date_range("2012-01-01", "2015-01-01", freq="YE")
|
2064 |
+
>>> idx
|
2065 |
+
DatetimeIndex(['2012-12-31', '2013-12-31', '2014-12-31'],
|
2066 |
+
dtype='datetime64[ns]', freq='YE-DEC')
|
2067 |
+
>>> idx.is_leap_year
|
2068 |
+
array([ True, False, False])
|
2069 |
+
|
2070 |
+
>>> dates_series = pd.Series(idx)
|
2071 |
+
>>> dates_series
|
2072 |
+
0 2012-12-31
|
2073 |
+
1 2013-12-31
|
2074 |
+
2 2014-12-31
|
2075 |
+
dtype: datetime64[ns]
|
2076 |
+
>>> dates_series.dt.is_leap_year
|
2077 |
+
0 True
|
2078 |
+
1 False
|
2079 |
+
2 False
|
2080 |
+
dtype: bool
|
2081 |
+
""",
|
2082 |
+
)
|
2083 |
+
|
2084 |
+
def to_julian_date(self) -> npt.NDArray[np.float64]:
|
2085 |
+
"""
|
2086 |
+
Convert Datetime Array to float64 ndarray of Julian Dates.
|
2087 |
+
0 Julian date is noon January 1, 4713 BC.
|
2088 |
+
https://en.wikipedia.org/wiki/Julian_day
|
2089 |
+
"""
|
2090 |
+
|
2091 |
+
# http://mysite.verizon.net/aesir_research/date/jdalg2.htm
|
2092 |
+
year = np.asarray(self.year)
|
2093 |
+
month = np.asarray(self.month)
|
2094 |
+
day = np.asarray(self.day)
|
2095 |
+
testarr = month < 3
|
2096 |
+
year[testarr] -= 1
|
2097 |
+
month[testarr] += 12
|
2098 |
+
return (
|
2099 |
+
day
|
2100 |
+
+ np.fix((153 * month - 457) / 5)
|
2101 |
+
+ 365 * year
|
2102 |
+
+ np.floor(year / 4)
|
2103 |
+
- np.floor(year / 100)
|
2104 |
+
+ np.floor(year / 400)
|
2105 |
+
+ 1_721_118.5
|
2106 |
+
+ (
|
2107 |
+
self.hour
|
2108 |
+
+ self.minute / 60
|
2109 |
+
+ self.second / 3600
|
2110 |
+
+ self.microsecond / 3600 / 10**6
|
2111 |
+
+ self.nanosecond / 3600 / 10**9
|
2112 |
+
)
|
2113 |
+
/ 24
|
2114 |
+
)
|
2115 |
+
|
2116 |
+
# -----------------------------------------------------------------
|
2117 |
+
# Reductions
|
2118 |
+
|
2119 |
+
def std(
|
2120 |
+
self,
|
2121 |
+
axis=None,
|
2122 |
+
dtype=None,
|
2123 |
+
out=None,
|
2124 |
+
ddof: int = 1,
|
2125 |
+
keepdims: bool = False,
|
2126 |
+
skipna: bool = True,
|
2127 |
+
):
|
2128 |
+
"""
|
2129 |
+
Return sample standard deviation over requested axis.
|
2130 |
+
|
2131 |
+
Normalized by `N-1` by default. This can be changed using ``ddof``.
|
2132 |
+
|
2133 |
+
Parameters
|
2134 |
+
----------
|
2135 |
+
axis : int, optional
|
2136 |
+
Axis for the function to be applied on. For :class:`pandas.Series`
|
2137 |
+
this parameter is unused and defaults to ``None``.
|
2138 |
+
ddof : int, default 1
|
2139 |
+
Degrees of Freedom. The divisor used in calculations is `N - ddof`,
|
2140 |
+
where `N` represents the number of elements.
|
2141 |
+
skipna : bool, default True
|
2142 |
+
Exclude NA/null values. If an entire row/column is ``NA``, the result
|
2143 |
+
will be ``NA``.
|
2144 |
+
|
2145 |
+
Returns
|
2146 |
+
-------
|
2147 |
+
Timedelta
|
2148 |
+
|
2149 |
+
See Also
|
2150 |
+
--------
|
2151 |
+
numpy.ndarray.std : Returns the standard deviation of the array elements
|
2152 |
+
along given axis.
|
2153 |
+
Series.std : Return sample standard deviation over requested axis.
|
2154 |
+
|
2155 |
+
Examples
|
2156 |
+
--------
|
2157 |
+
For :class:`pandas.DatetimeIndex`:
|
2158 |
+
|
2159 |
+
>>> idx = pd.date_range('2001-01-01 00:00', periods=3)
|
2160 |
+
>>> idx
|
2161 |
+
DatetimeIndex(['2001-01-01', '2001-01-02', '2001-01-03'],
|
2162 |
+
dtype='datetime64[ns]', freq='D')
|
2163 |
+
>>> idx.std()
|
2164 |
+
Timedelta('1 days 00:00:00')
|
2165 |
+
"""
|
2166 |
+
# Because std is translation-invariant, we can get self.std
|
2167 |
+
# by calculating (self - Timestamp(0)).std, and we can do it
|
2168 |
+
# without creating a copy by using a view on self._ndarray
|
2169 |
+
from pandas.core.arrays import TimedeltaArray
|
2170 |
+
|
2171 |
+
# Find the td64 dtype with the same resolution as our dt64 dtype
|
2172 |
+
dtype_str = self._ndarray.dtype.name.replace("datetime64", "timedelta64")
|
2173 |
+
dtype = np.dtype(dtype_str)
|
2174 |
+
|
2175 |
+
tda = TimedeltaArray._simple_new(self._ndarray.view(dtype), dtype=dtype)
|
2176 |
+
|
2177 |
+
return tda.std(axis=axis, out=out, ddof=ddof, keepdims=keepdims, skipna=skipna)
|
2178 |
+
|
2179 |
+
|
2180 |
+
# -------------------------------------------------------------------
|
2181 |
+
# Constructor Helpers
|
2182 |
+
|
2183 |
+
|
2184 |
+
def _sequence_to_dt64(
|
2185 |
+
data: ArrayLike,
|
2186 |
+
*,
|
2187 |
+
copy: bool = False,
|
2188 |
+
tz: tzinfo | None = None,
|
2189 |
+
dayfirst: bool = False,
|
2190 |
+
yearfirst: bool = False,
|
2191 |
+
ambiguous: TimeAmbiguous = "raise",
|
2192 |
+
out_unit: str | None = None,
|
2193 |
+
):
|
2194 |
+
"""
|
2195 |
+
Parameters
|
2196 |
+
----------
|
2197 |
+
data : np.ndarray or ExtensionArray
|
2198 |
+
dtl.ensure_arraylike_for_datetimelike has already been called.
|
2199 |
+
copy : bool, default False
|
2200 |
+
tz : tzinfo or None, default None
|
2201 |
+
dayfirst : bool, default False
|
2202 |
+
yearfirst : bool, default False
|
2203 |
+
ambiguous : str, bool, or arraylike, default 'raise'
|
2204 |
+
See pandas._libs.tslibs.tzconversion.tz_localize_to_utc.
|
2205 |
+
out_unit : str or None, default None
|
2206 |
+
Desired output resolution.
|
2207 |
+
|
2208 |
+
Returns
|
2209 |
+
-------
|
2210 |
+
result : numpy.ndarray
|
2211 |
+
The sequence converted to a numpy array with dtype ``datetime64[unit]``.
|
2212 |
+
Where `unit` is "ns" unless specified otherwise by `out_unit`.
|
2213 |
+
tz : tzinfo or None
|
2214 |
+
Either the user-provided tzinfo or one inferred from the data.
|
2215 |
+
|
2216 |
+
Raises
|
2217 |
+
------
|
2218 |
+
TypeError : PeriodDType data is passed
|
2219 |
+
"""
|
2220 |
+
|
2221 |
+
# By this point we are assured to have either a numpy array or Index
|
2222 |
+
data, copy = maybe_convert_dtype(data, copy, tz=tz)
|
2223 |
+
data_dtype = getattr(data, "dtype", None)
|
2224 |
+
|
2225 |
+
if out_unit is None:
|
2226 |
+
out_unit = "ns"
|
2227 |
+
out_dtype = np.dtype(f"M8[{out_unit}]")
|
2228 |
+
|
2229 |
+
if data_dtype == object or is_string_dtype(data_dtype):
|
2230 |
+
# TODO: We do not have tests specific to string-dtypes,
|
2231 |
+
# also complex or categorical or other extension
|
2232 |
+
data = cast(np.ndarray, data)
|
2233 |
+
copy = False
|
2234 |
+
if lib.infer_dtype(data, skipna=False) == "integer":
|
2235 |
+
# Much more performant than going through array_to_datetime
|
2236 |
+
data = data.astype(np.int64)
|
2237 |
+
elif tz is not None and ambiguous == "raise":
|
2238 |
+
obj_data = np.asarray(data, dtype=object)
|
2239 |
+
result = tslib.array_to_datetime_with_tz(
|
2240 |
+
obj_data,
|
2241 |
+
tz=tz,
|
2242 |
+
dayfirst=dayfirst,
|
2243 |
+
yearfirst=yearfirst,
|
2244 |
+
creso=abbrev_to_npy_unit(out_unit),
|
2245 |
+
)
|
2246 |
+
return result, tz
|
2247 |
+
else:
|
2248 |
+
converted, inferred_tz = objects_to_datetime64(
|
2249 |
+
data,
|
2250 |
+
dayfirst=dayfirst,
|
2251 |
+
yearfirst=yearfirst,
|
2252 |
+
allow_object=False,
|
2253 |
+
out_unit=out_unit or "ns",
|
2254 |
+
)
|
2255 |
+
copy = False
|
2256 |
+
if tz and inferred_tz:
|
2257 |
+
# two timezones: convert to intended from base UTC repr
|
2258 |
+
# GH#42505 by convention, these are _already_ UTC
|
2259 |
+
result = converted
|
2260 |
+
|
2261 |
+
elif inferred_tz:
|
2262 |
+
tz = inferred_tz
|
2263 |
+
result = converted
|
2264 |
+
|
2265 |
+
else:
|
2266 |
+
result, _ = _construct_from_dt64_naive(
|
2267 |
+
converted, tz=tz, copy=copy, ambiguous=ambiguous
|
2268 |
+
)
|
2269 |
+
return result, tz
|
2270 |
+
|
2271 |
+
data_dtype = data.dtype
|
2272 |
+
|
2273 |
+
# `data` may have originally been a Categorical[datetime64[ns, tz]],
|
2274 |
+
# so we need to handle these types.
|
2275 |
+
if isinstance(data_dtype, DatetimeTZDtype):
|
2276 |
+
# DatetimeArray -> ndarray
|
2277 |
+
data = cast(DatetimeArray, data)
|
2278 |
+
tz = _maybe_infer_tz(tz, data.tz)
|
2279 |
+
result = data._ndarray
|
2280 |
+
|
2281 |
+
elif lib.is_np_dtype(data_dtype, "M"):
|
2282 |
+
# tz-naive DatetimeArray or ndarray[datetime64]
|
2283 |
+
if isinstance(data, DatetimeArray):
|
2284 |
+
data = data._ndarray
|
2285 |
+
|
2286 |
+
data = cast(np.ndarray, data)
|
2287 |
+
result, copy = _construct_from_dt64_naive(
|
2288 |
+
data, tz=tz, copy=copy, ambiguous=ambiguous
|
2289 |
+
)
|
2290 |
+
|
2291 |
+
else:
|
2292 |
+
# must be integer dtype otherwise
|
2293 |
+
# assume this data are epoch timestamps
|
2294 |
+
if data.dtype != INT64_DTYPE:
|
2295 |
+
data = data.astype(np.int64, copy=False)
|
2296 |
+
copy = False
|
2297 |
+
data = cast(np.ndarray, data)
|
2298 |
+
result = data.view(out_dtype)
|
2299 |
+
|
2300 |
+
if copy:
|
2301 |
+
result = result.copy()
|
2302 |
+
|
2303 |
+
assert isinstance(result, np.ndarray), type(result)
|
2304 |
+
assert result.dtype.kind == "M"
|
2305 |
+
assert result.dtype != "M8"
|
2306 |
+
assert is_supported_dtype(result.dtype)
|
2307 |
+
return result, tz
|
2308 |
+
|
2309 |
+
|
2310 |
+
def _construct_from_dt64_naive(
|
2311 |
+
data: np.ndarray, *, tz: tzinfo | None, copy: bool, ambiguous: TimeAmbiguous
|
2312 |
+
) -> tuple[np.ndarray, bool]:
|
2313 |
+
"""
|
2314 |
+
Convert datetime64 data to a supported dtype, localizing if necessary.
|
2315 |
+
"""
|
2316 |
+
# Caller is responsible for ensuring
|
2317 |
+
# lib.is_np_dtype(data.dtype)
|
2318 |
+
|
2319 |
+
new_dtype = data.dtype
|
2320 |
+
if not is_supported_dtype(new_dtype):
|
2321 |
+
# Cast to the nearest supported unit, generally "s"
|
2322 |
+
new_dtype = get_supported_dtype(new_dtype)
|
2323 |
+
data = astype_overflowsafe(data, dtype=new_dtype, copy=False)
|
2324 |
+
copy = False
|
2325 |
+
|
2326 |
+
if data.dtype.byteorder == ">":
|
2327 |
+
# TODO: better way to handle this? non-copying alternative?
|
2328 |
+
# without this, test_constructor_datetime64_bigendian fails
|
2329 |
+
data = data.astype(data.dtype.newbyteorder("<"))
|
2330 |
+
new_dtype = data.dtype
|
2331 |
+
copy = False
|
2332 |
+
|
2333 |
+
if tz is not None:
|
2334 |
+
# Convert tz-naive to UTC
|
2335 |
+
# TODO: if tz is UTC, are there situations where we *don't* want a
|
2336 |
+
# copy? tz_localize_to_utc always makes one.
|
2337 |
+
shape = data.shape
|
2338 |
+
if data.ndim > 1:
|
2339 |
+
data = data.ravel()
|
2340 |
+
|
2341 |
+
data_unit = get_unit_from_dtype(new_dtype)
|
2342 |
+
data = tzconversion.tz_localize_to_utc(
|
2343 |
+
data.view("i8"), tz, ambiguous=ambiguous, creso=data_unit
|
2344 |
+
)
|
2345 |
+
data = data.view(new_dtype)
|
2346 |
+
data = data.reshape(shape)
|
2347 |
+
|
2348 |
+
assert data.dtype == new_dtype, data.dtype
|
2349 |
+
result = data
|
2350 |
+
|
2351 |
+
return result, copy
|
2352 |
+
|
2353 |
+
|
2354 |
+
def objects_to_datetime64(
|
2355 |
+
data: np.ndarray,
|
2356 |
+
dayfirst,
|
2357 |
+
yearfirst,
|
2358 |
+
utc: bool = False,
|
2359 |
+
errors: DateTimeErrorChoices = "raise",
|
2360 |
+
allow_object: bool = False,
|
2361 |
+
out_unit: str = "ns",
|
2362 |
+
):
|
2363 |
+
"""
|
2364 |
+
Convert data to array of timestamps.
|
2365 |
+
|
2366 |
+
Parameters
|
2367 |
+
----------
|
2368 |
+
data : np.ndarray[object]
|
2369 |
+
dayfirst : bool
|
2370 |
+
yearfirst : bool
|
2371 |
+
utc : bool, default False
|
2372 |
+
Whether to convert/localize timestamps to UTC.
|
2373 |
+
errors : {'raise', 'ignore', 'coerce'}
|
2374 |
+
allow_object : bool
|
2375 |
+
Whether to return an object-dtype ndarray instead of raising if the
|
2376 |
+
data contains more than one timezone.
|
2377 |
+
out_unit : str, default "ns"
|
2378 |
+
|
2379 |
+
Returns
|
2380 |
+
-------
|
2381 |
+
result : ndarray
|
2382 |
+
np.datetime64[out_unit] if returned values represent wall times or UTC
|
2383 |
+
timestamps.
|
2384 |
+
object if mixed timezones
|
2385 |
+
inferred_tz : tzinfo or None
|
2386 |
+
If not None, then the datetime64 values in `result` denote UTC timestamps.
|
2387 |
+
|
2388 |
+
Raises
|
2389 |
+
------
|
2390 |
+
ValueError : if data cannot be converted to datetimes
|
2391 |
+
TypeError : When a type cannot be converted to datetime
|
2392 |
+
"""
|
2393 |
+
assert errors in ["raise", "ignore", "coerce"]
|
2394 |
+
|
2395 |
+
# if str-dtype, convert
|
2396 |
+
data = np.asarray(data, dtype=np.object_)
|
2397 |
+
|
2398 |
+
result, tz_parsed = tslib.array_to_datetime(
|
2399 |
+
data,
|
2400 |
+
errors=errors,
|
2401 |
+
utc=utc,
|
2402 |
+
dayfirst=dayfirst,
|
2403 |
+
yearfirst=yearfirst,
|
2404 |
+
creso=abbrev_to_npy_unit(out_unit),
|
2405 |
+
)
|
2406 |
+
|
2407 |
+
if tz_parsed is not None:
|
2408 |
+
# We can take a shortcut since the datetime64 numpy array
|
2409 |
+
# is in UTC
|
2410 |
+
return result, tz_parsed
|
2411 |
+
elif result.dtype.kind == "M":
|
2412 |
+
return result, tz_parsed
|
2413 |
+
elif result.dtype == object:
|
2414 |
+
# GH#23675 when called via `pd.to_datetime`, returning an object-dtype
|
2415 |
+
# array is allowed. When called via `pd.DatetimeIndex`, we can
|
2416 |
+
# only accept datetime64 dtype, so raise TypeError if object-dtype
|
2417 |
+
# is returned, as that indicates the values can be recognized as
|
2418 |
+
# datetimes but they have conflicting timezones/awareness
|
2419 |
+
if allow_object:
|
2420 |
+
return result, tz_parsed
|
2421 |
+
raise TypeError("DatetimeIndex has mixed timezones")
|
2422 |
+
else: # pragma: no cover
|
2423 |
+
# GH#23675 this TypeError should never be hit, whereas the TypeError
|
2424 |
+
# in the object-dtype branch above is reachable.
|
2425 |
+
raise TypeError(result)
|
2426 |
+
|
2427 |
+
|
2428 |
+
def maybe_convert_dtype(data, copy: bool, tz: tzinfo | None = None):
|
2429 |
+
"""
|
2430 |
+
Convert data based on dtype conventions, issuing
|
2431 |
+
errors where appropriate.
|
2432 |
+
|
2433 |
+
Parameters
|
2434 |
+
----------
|
2435 |
+
data : np.ndarray or pd.Index
|
2436 |
+
copy : bool
|
2437 |
+
tz : tzinfo or None, default None
|
2438 |
+
|
2439 |
+
Returns
|
2440 |
+
-------
|
2441 |
+
data : np.ndarray or pd.Index
|
2442 |
+
copy : bool
|
2443 |
+
|
2444 |
+
Raises
|
2445 |
+
------
|
2446 |
+
TypeError : PeriodDType data is passed
|
2447 |
+
"""
|
2448 |
+
if not hasattr(data, "dtype"):
|
2449 |
+
# e.g. collections.deque
|
2450 |
+
return data, copy
|
2451 |
+
|
2452 |
+
if is_float_dtype(data.dtype):
|
2453 |
+
# pre-2.0 we treated these as wall-times, inconsistent with ints
|
2454 |
+
# GH#23675, GH#45573 deprecated to treat symmetrically with integer dtypes.
|
2455 |
+
# Note: data.astype(np.int64) fails ARM tests, see
|
2456 |
+
# https://github.com/pandas-dev/pandas/issues/49468.
|
2457 |
+
data = data.astype(DT64NS_DTYPE).view("i8")
|
2458 |
+
copy = False
|
2459 |
+
|
2460 |
+
elif lib.is_np_dtype(data.dtype, "m") or is_bool_dtype(data.dtype):
|
2461 |
+
# GH#29794 enforcing deprecation introduced in GH#23539
|
2462 |
+
raise TypeError(f"dtype {data.dtype} cannot be converted to datetime64[ns]")
|
2463 |
+
elif isinstance(data.dtype, PeriodDtype):
|
2464 |
+
# Note: without explicitly raising here, PeriodIndex
|
2465 |
+
# test_setops.test_join_does_not_recur fails
|
2466 |
+
raise TypeError(
|
2467 |
+
"Passing PeriodDtype data is invalid. Use `data.to_timestamp()` instead"
|
2468 |
+
)
|
2469 |
+
|
2470 |
+
elif isinstance(data.dtype, ExtensionDtype) and not isinstance(
|
2471 |
+
data.dtype, DatetimeTZDtype
|
2472 |
+
):
|
2473 |
+
# TODO: We have no tests for these
|
2474 |
+
data = np.array(data, dtype=np.object_)
|
2475 |
+
copy = False
|
2476 |
+
|
2477 |
+
return data, copy
|
2478 |
+
|
2479 |
+
|
2480 |
+
# -------------------------------------------------------------------
|
2481 |
+
# Validation and Inference
|
2482 |
+
|
2483 |
+
|
2484 |
+
def _maybe_infer_tz(tz: tzinfo | None, inferred_tz: tzinfo | None) -> tzinfo | None:
|
2485 |
+
"""
|
2486 |
+
If a timezone is inferred from data, check that it is compatible with
|
2487 |
+
the user-provided timezone, if any.
|
2488 |
+
|
2489 |
+
Parameters
|
2490 |
+
----------
|
2491 |
+
tz : tzinfo or None
|
2492 |
+
inferred_tz : tzinfo or None
|
2493 |
+
|
2494 |
+
Returns
|
2495 |
+
-------
|
2496 |
+
tz : tzinfo or None
|
2497 |
+
|
2498 |
+
Raises
|
2499 |
+
------
|
2500 |
+
TypeError : if both timezones are present but do not match
|
2501 |
+
"""
|
2502 |
+
if tz is None:
|
2503 |
+
tz = inferred_tz
|
2504 |
+
elif inferred_tz is None:
|
2505 |
+
pass
|
2506 |
+
elif not timezones.tz_compare(tz, inferred_tz):
|
2507 |
+
raise TypeError(
|
2508 |
+
f"data is already tz-aware {inferred_tz}, unable to "
|
2509 |
+
f"set specified tz: {tz}"
|
2510 |
+
)
|
2511 |
+
return tz
|
2512 |
+
|
2513 |
+
|
2514 |
+
def _validate_dt64_dtype(dtype):
|
2515 |
+
"""
|
2516 |
+
Check that a dtype, if passed, represents either a numpy datetime64[ns]
|
2517 |
+
dtype or a pandas DatetimeTZDtype.
|
2518 |
+
|
2519 |
+
Parameters
|
2520 |
+
----------
|
2521 |
+
dtype : object
|
2522 |
+
|
2523 |
+
Returns
|
2524 |
+
-------
|
2525 |
+
dtype : None, numpy.dtype, or DatetimeTZDtype
|
2526 |
+
|
2527 |
+
Raises
|
2528 |
+
------
|
2529 |
+
ValueError : invalid dtype
|
2530 |
+
|
2531 |
+
Notes
|
2532 |
+
-----
|
2533 |
+
Unlike _validate_tz_from_dtype, this does _not_ allow non-existent
|
2534 |
+
tz errors to go through
|
2535 |
+
"""
|
2536 |
+
if dtype is not None:
|
2537 |
+
dtype = pandas_dtype(dtype)
|
2538 |
+
if dtype == np.dtype("M8"):
|
2539 |
+
# no precision, disallowed GH#24806
|
2540 |
+
msg = (
|
2541 |
+
"Passing in 'datetime64' dtype with no precision is not allowed. "
|
2542 |
+
"Please pass in 'datetime64[ns]' instead."
|
2543 |
+
)
|
2544 |
+
raise ValueError(msg)
|
2545 |
+
|
2546 |
+
if (
|
2547 |
+
isinstance(dtype, np.dtype)
|
2548 |
+
and (dtype.kind != "M" or not is_supported_dtype(dtype))
|
2549 |
+
) or not isinstance(dtype, (np.dtype, DatetimeTZDtype)):
|
2550 |
+
raise ValueError(
|
2551 |
+
f"Unexpected value for 'dtype': '{dtype}'. "
|
2552 |
+
"Must be 'datetime64[s]', 'datetime64[ms]', 'datetime64[us]', "
|
2553 |
+
"'datetime64[ns]' or DatetimeTZDtype'."
|
2554 |
+
)
|
2555 |
+
|
2556 |
+
if getattr(dtype, "tz", None):
|
2557 |
+
# https://github.com/pandas-dev/pandas/issues/18595
|
2558 |
+
# Ensure that we have a standard timezone for pytz objects.
|
2559 |
+
# Without this, things like adding an array of timedeltas and
|
2560 |
+
# a tz-aware Timestamp (with a tz specific to its datetime) will
|
2561 |
+
# be incorrect(ish?) for the array as a whole
|
2562 |
+
dtype = cast(DatetimeTZDtype, dtype)
|
2563 |
+
dtype = DatetimeTZDtype(
|
2564 |
+
unit=dtype.unit, tz=timezones.tz_standardize(dtype.tz)
|
2565 |
+
)
|
2566 |
+
|
2567 |
+
return dtype
|
2568 |
+
|
2569 |
+
|
2570 |
+
def _validate_tz_from_dtype(
|
2571 |
+
dtype, tz: tzinfo | None, explicit_tz_none: bool = False
|
2572 |
+
) -> tzinfo | None:
|
2573 |
+
"""
|
2574 |
+
If the given dtype is a DatetimeTZDtype, extract the implied
|
2575 |
+
tzinfo object from it and check that it does not conflict with the given
|
2576 |
+
tz.
|
2577 |
+
|
2578 |
+
Parameters
|
2579 |
+
----------
|
2580 |
+
dtype : dtype, str
|
2581 |
+
tz : None, tzinfo
|
2582 |
+
explicit_tz_none : bool, default False
|
2583 |
+
Whether tz=None was passed explicitly, as opposed to lib.no_default.
|
2584 |
+
|
2585 |
+
Returns
|
2586 |
+
-------
|
2587 |
+
tz : consensus tzinfo
|
2588 |
+
|
2589 |
+
Raises
|
2590 |
+
------
|
2591 |
+
ValueError : on tzinfo mismatch
|
2592 |
+
"""
|
2593 |
+
if dtype is not None:
|
2594 |
+
if isinstance(dtype, str):
|
2595 |
+
try:
|
2596 |
+
dtype = DatetimeTZDtype.construct_from_string(dtype)
|
2597 |
+
except TypeError:
|
2598 |
+
# Things like `datetime64[ns]`, which is OK for the
|
2599 |
+
# constructors, but also nonsense, which should be validated
|
2600 |
+
# but not by us. We *do* allow non-existent tz errors to
|
2601 |
+
# go through
|
2602 |
+
pass
|
2603 |
+
dtz = getattr(dtype, "tz", None)
|
2604 |
+
if dtz is not None:
|
2605 |
+
if tz is not None and not timezones.tz_compare(tz, dtz):
|
2606 |
+
raise ValueError("cannot supply both a tz and a dtype with a tz")
|
2607 |
+
if explicit_tz_none:
|
2608 |
+
raise ValueError("Cannot pass both a timezone-aware dtype and tz=None")
|
2609 |
+
tz = dtz
|
2610 |
+
|
2611 |
+
if tz is not None and lib.is_np_dtype(dtype, "M"):
|
2612 |
+
# We also need to check for the case where the user passed a
|
2613 |
+
# tz-naive dtype (i.e. datetime64[ns])
|
2614 |
+
if tz is not None and not timezones.tz_compare(tz, dtz):
|
2615 |
+
raise ValueError(
|
2616 |
+
"cannot supply both a tz and a "
|
2617 |
+
"timezone-naive dtype (i.e. datetime64[ns])"
|
2618 |
+
)
|
2619 |
+
|
2620 |
+
return tz
|
2621 |
+
|
2622 |
+
|
2623 |
+
def _infer_tz_from_endpoints(
|
2624 |
+
start: Timestamp, end: Timestamp, tz: tzinfo | None
|
2625 |
+
) -> tzinfo | None:
|
2626 |
+
"""
|
2627 |
+
If a timezone is not explicitly given via `tz`, see if one can
|
2628 |
+
be inferred from the `start` and `end` endpoints. If more than one
|
2629 |
+
of these inputs provides a timezone, require that they all agree.
|
2630 |
+
|
2631 |
+
Parameters
|
2632 |
+
----------
|
2633 |
+
start : Timestamp
|
2634 |
+
end : Timestamp
|
2635 |
+
tz : tzinfo or None
|
2636 |
+
|
2637 |
+
Returns
|
2638 |
+
-------
|
2639 |
+
tz : tzinfo or None
|
2640 |
+
|
2641 |
+
Raises
|
2642 |
+
------
|
2643 |
+
TypeError : if start and end timezones do not agree
|
2644 |
+
"""
|
2645 |
+
try:
|
2646 |
+
inferred_tz = timezones.infer_tzinfo(start, end)
|
2647 |
+
except AssertionError as err:
|
2648 |
+
# infer_tzinfo raises AssertionError if passed mismatched timezones
|
2649 |
+
raise TypeError(
|
2650 |
+
"Start and end cannot both be tz-aware with different timezones"
|
2651 |
+
) from err
|
2652 |
+
|
2653 |
+
inferred_tz = timezones.maybe_get_tz(inferred_tz)
|
2654 |
+
tz = timezones.maybe_get_tz(tz)
|
2655 |
+
|
2656 |
+
if tz is not None and inferred_tz is not None:
|
2657 |
+
if not timezones.tz_compare(inferred_tz, tz):
|
2658 |
+
raise AssertionError("Inferred time zone not equal to passed time zone")
|
2659 |
+
|
2660 |
+
elif inferred_tz is not None:
|
2661 |
+
tz = inferred_tz
|
2662 |
+
|
2663 |
+
return tz
|
2664 |
+
|
2665 |
+
|
2666 |
+
def _maybe_normalize_endpoints(
|
2667 |
+
start: Timestamp | None, end: Timestamp | None, normalize: bool
|
2668 |
+
):
|
2669 |
+
if normalize:
|
2670 |
+
if start is not None:
|
2671 |
+
start = start.normalize()
|
2672 |
+
|
2673 |
+
if end is not None:
|
2674 |
+
end = end.normalize()
|
2675 |
+
|
2676 |
+
return start, end
|
2677 |
+
|
2678 |
+
|
2679 |
+
def _maybe_localize_point(
|
2680 |
+
ts: Timestamp | None, freq, tz, ambiguous, nonexistent
|
2681 |
+
) -> Timestamp | None:
|
2682 |
+
"""
|
2683 |
+
Localize a start or end Timestamp to the timezone of the corresponding
|
2684 |
+
start or end Timestamp
|
2685 |
+
|
2686 |
+
Parameters
|
2687 |
+
----------
|
2688 |
+
ts : start or end Timestamp to potentially localize
|
2689 |
+
freq : Tick, DateOffset, or None
|
2690 |
+
tz : str, timezone object or None
|
2691 |
+
ambiguous: str, localization behavior for ambiguous times
|
2692 |
+
nonexistent: str, localization behavior for nonexistent times
|
2693 |
+
|
2694 |
+
Returns
|
2695 |
+
-------
|
2696 |
+
ts : Timestamp
|
2697 |
+
"""
|
2698 |
+
# Make sure start and end are timezone localized if:
|
2699 |
+
# 1) freq = a Timedelta-like frequency (Tick)
|
2700 |
+
# 2) freq = None i.e. generating a linspaced range
|
2701 |
+
if ts is not None and ts.tzinfo is None:
|
2702 |
+
# Note: We can't ambiguous='infer' a singular ambiguous time; however,
|
2703 |
+
# we have historically defaulted ambiguous=False
|
2704 |
+
ambiguous = ambiguous if ambiguous != "infer" else False
|
2705 |
+
localize_args = {"ambiguous": ambiguous, "nonexistent": nonexistent, "tz": None}
|
2706 |
+
if isinstance(freq, Tick) or freq is None:
|
2707 |
+
localize_args["tz"] = tz
|
2708 |
+
ts = ts.tz_localize(**localize_args)
|
2709 |
+
return ts
|
2710 |
+
|
2711 |
+
|
2712 |
+
def _generate_range(
|
2713 |
+
start: Timestamp | None,
|
2714 |
+
end: Timestamp | None,
|
2715 |
+
periods: int | None,
|
2716 |
+
offset: BaseOffset,
|
2717 |
+
*,
|
2718 |
+
unit: str,
|
2719 |
+
):
|
2720 |
+
"""
|
2721 |
+
Generates a sequence of dates corresponding to the specified time
|
2722 |
+
offset. Similar to dateutil.rrule except uses pandas DateOffset
|
2723 |
+
objects to represent time increments.
|
2724 |
+
|
2725 |
+
Parameters
|
2726 |
+
----------
|
2727 |
+
start : Timestamp or None
|
2728 |
+
end : Timestamp or None
|
2729 |
+
periods : int or None
|
2730 |
+
offset : DateOffset
|
2731 |
+
unit : str
|
2732 |
+
|
2733 |
+
Notes
|
2734 |
+
-----
|
2735 |
+
* This method is faster for generating weekdays than dateutil.rrule
|
2736 |
+
* At least two of (start, end, periods) must be specified.
|
2737 |
+
* If both start and end are specified, the returned dates will
|
2738 |
+
satisfy start <= date <= end.
|
2739 |
+
|
2740 |
+
Returns
|
2741 |
+
-------
|
2742 |
+
dates : generator object
|
2743 |
+
"""
|
2744 |
+
offset = to_offset(offset)
|
2745 |
+
|
2746 |
+
# Argument 1 to "Timestamp" has incompatible type "Optional[Timestamp]";
|
2747 |
+
# expected "Union[integer[Any], float, str, date, datetime64]"
|
2748 |
+
start = Timestamp(start) # type: ignore[arg-type]
|
2749 |
+
if start is not NaT:
|
2750 |
+
start = start.as_unit(unit)
|
2751 |
+
else:
|
2752 |
+
start = None
|
2753 |
+
|
2754 |
+
# Argument 1 to "Timestamp" has incompatible type "Optional[Timestamp]";
|
2755 |
+
# expected "Union[integer[Any], float, str, date, datetime64]"
|
2756 |
+
end = Timestamp(end) # type: ignore[arg-type]
|
2757 |
+
if end is not NaT:
|
2758 |
+
end = end.as_unit(unit)
|
2759 |
+
else:
|
2760 |
+
end = None
|
2761 |
+
|
2762 |
+
if start and not offset.is_on_offset(start):
|
2763 |
+
# Incompatible types in assignment (expression has type "datetime",
|
2764 |
+
# variable has type "Optional[Timestamp]")
|
2765 |
+
start = offset.rollforward(start) # type: ignore[assignment]
|
2766 |
+
|
2767 |
+
elif end and not offset.is_on_offset(end):
|
2768 |
+
# Incompatible types in assignment (expression has type "datetime",
|
2769 |
+
# variable has type "Optional[Timestamp]")
|
2770 |
+
end = offset.rollback(end) # type: ignore[assignment]
|
2771 |
+
|
2772 |
+
# Unsupported operand types for < ("Timestamp" and "None")
|
2773 |
+
if periods is None and end < start and offset.n >= 0: # type: ignore[operator]
|
2774 |
+
end = None
|
2775 |
+
periods = 0
|
2776 |
+
|
2777 |
+
if end is None:
|
2778 |
+
# error: No overload variant of "__radd__" of "BaseOffset" matches
|
2779 |
+
# argument type "None"
|
2780 |
+
end = start + (periods - 1) * offset # type: ignore[operator]
|
2781 |
+
|
2782 |
+
if start is None:
|
2783 |
+
# error: No overload variant of "__radd__" of "BaseOffset" matches
|
2784 |
+
# argument type "None"
|
2785 |
+
start = end - (periods - 1) * offset # type: ignore[operator]
|
2786 |
+
|
2787 |
+
start = cast(Timestamp, start)
|
2788 |
+
end = cast(Timestamp, end)
|
2789 |
+
|
2790 |
+
cur = start
|
2791 |
+
if offset.n >= 0:
|
2792 |
+
while cur <= end:
|
2793 |
+
yield cur
|
2794 |
+
|
2795 |
+
if cur == end:
|
2796 |
+
# GH#24252 avoid overflows by not performing the addition
|
2797 |
+
# in offset.apply unless we have to
|
2798 |
+
break
|
2799 |
+
|
2800 |
+
# faster than cur + offset
|
2801 |
+
next_date = offset._apply(cur)
|
2802 |
+
next_date = next_date.as_unit(unit)
|
2803 |
+
if next_date <= cur:
|
2804 |
+
raise ValueError(f"Offset {offset} did not increment date")
|
2805 |
+
cur = next_date
|
2806 |
+
else:
|
2807 |
+
while cur >= end:
|
2808 |
+
yield cur
|
2809 |
+
|
2810 |
+
if cur == end:
|
2811 |
+
# GH#24252 avoid overflows by not performing the addition
|
2812 |
+
# in offset.apply unless we have to
|
2813 |
+
break
|
2814 |
+
|
2815 |
+
# faster than cur + offset
|
2816 |
+
next_date = offset._apply(cur)
|
2817 |
+
next_date = next_date.as_unit(unit)
|
2818 |
+
if next_date >= cur:
|
2819 |
+
raise ValueError(f"Offset {offset} did not decrement date")
|
2820 |
+
cur = next_date
|
env-llmeval/lib/python3.10/site-packages/pandas/core/arrays/floating.py
ADDED
@@ -0,0 +1,173 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from __future__ import annotations
|
2 |
+
|
3 |
+
from typing import ClassVar
|
4 |
+
|
5 |
+
import numpy as np
|
6 |
+
|
7 |
+
from pandas.core.dtypes.base import register_extension_dtype
|
8 |
+
from pandas.core.dtypes.common import is_float_dtype
|
9 |
+
|
10 |
+
from pandas.core.arrays.numeric import (
|
11 |
+
NumericArray,
|
12 |
+
NumericDtype,
|
13 |
+
)
|
14 |
+
|
15 |
+
|
16 |
+
class FloatingDtype(NumericDtype):
|
17 |
+
"""
|
18 |
+
An ExtensionDtype to hold a single size of floating dtype.
|
19 |
+
|
20 |
+
These specific implementations are subclasses of the non-public
|
21 |
+
FloatingDtype. For example we have Float32Dtype to represent float32.
|
22 |
+
|
23 |
+
The attributes name & type are set when these subclasses are created.
|
24 |
+
"""
|
25 |
+
|
26 |
+
_default_np_dtype = np.dtype(np.float64)
|
27 |
+
_checker = is_float_dtype
|
28 |
+
|
29 |
+
@classmethod
|
30 |
+
def construct_array_type(cls) -> type[FloatingArray]:
|
31 |
+
"""
|
32 |
+
Return the array type associated with this dtype.
|
33 |
+
|
34 |
+
Returns
|
35 |
+
-------
|
36 |
+
type
|
37 |
+
"""
|
38 |
+
return FloatingArray
|
39 |
+
|
40 |
+
@classmethod
|
41 |
+
def _get_dtype_mapping(cls) -> dict[np.dtype, FloatingDtype]:
|
42 |
+
return NUMPY_FLOAT_TO_DTYPE
|
43 |
+
|
44 |
+
@classmethod
|
45 |
+
def _safe_cast(cls, values: np.ndarray, dtype: np.dtype, copy: bool) -> np.ndarray:
|
46 |
+
"""
|
47 |
+
Safely cast the values to the given dtype.
|
48 |
+
|
49 |
+
"safe" in this context means the casting is lossless.
|
50 |
+
"""
|
51 |
+
# This is really only here for compatibility with IntegerDtype
|
52 |
+
# Here for compat with IntegerDtype
|
53 |
+
return values.astype(dtype, copy=copy)
|
54 |
+
|
55 |
+
|
56 |
+
class FloatingArray(NumericArray):
|
57 |
+
"""
|
58 |
+
Array of floating (optional missing) values.
|
59 |
+
|
60 |
+
.. warning::
|
61 |
+
|
62 |
+
FloatingArray is currently experimental, and its API or internal
|
63 |
+
implementation may change without warning. Especially the behaviour
|
64 |
+
regarding NaN (distinct from NA missing values) is subject to change.
|
65 |
+
|
66 |
+
We represent a FloatingArray with 2 numpy arrays:
|
67 |
+
|
68 |
+
- data: contains a numpy float array of the appropriate dtype
|
69 |
+
- mask: a boolean array holding a mask on the data, True is missing
|
70 |
+
|
71 |
+
To construct an FloatingArray from generic array-like input, use
|
72 |
+
:func:`pandas.array` with one of the float dtypes (see examples).
|
73 |
+
|
74 |
+
See :ref:`integer_na` for more.
|
75 |
+
|
76 |
+
Parameters
|
77 |
+
----------
|
78 |
+
values : numpy.ndarray
|
79 |
+
A 1-d float-dtype array.
|
80 |
+
mask : numpy.ndarray
|
81 |
+
A 1-d boolean-dtype array indicating missing values.
|
82 |
+
copy : bool, default False
|
83 |
+
Whether to copy the `values` and `mask`.
|
84 |
+
|
85 |
+
Attributes
|
86 |
+
----------
|
87 |
+
None
|
88 |
+
|
89 |
+
Methods
|
90 |
+
-------
|
91 |
+
None
|
92 |
+
|
93 |
+
Returns
|
94 |
+
-------
|
95 |
+
FloatingArray
|
96 |
+
|
97 |
+
Examples
|
98 |
+
--------
|
99 |
+
Create an FloatingArray with :func:`pandas.array`:
|
100 |
+
|
101 |
+
>>> pd.array([0.1, None, 0.3], dtype=pd.Float32Dtype())
|
102 |
+
<FloatingArray>
|
103 |
+
[0.1, <NA>, 0.3]
|
104 |
+
Length: 3, dtype: Float32
|
105 |
+
|
106 |
+
String aliases for the dtypes are also available. They are capitalized.
|
107 |
+
|
108 |
+
>>> pd.array([0.1, None, 0.3], dtype="Float32")
|
109 |
+
<FloatingArray>
|
110 |
+
[0.1, <NA>, 0.3]
|
111 |
+
Length: 3, dtype: Float32
|
112 |
+
"""
|
113 |
+
|
114 |
+
_dtype_cls = FloatingDtype
|
115 |
+
|
116 |
+
# The value used to fill '_data' to avoid upcasting
|
117 |
+
_internal_fill_value = np.nan
|
118 |
+
# Fill values used for any/all
|
119 |
+
# Incompatible types in assignment (expression has type "float", base class
|
120 |
+
# "BaseMaskedArray" defined the type as "<typing special form>")
|
121 |
+
_truthy_value = 1.0 # type: ignore[assignment]
|
122 |
+
_falsey_value = 0.0 # type: ignore[assignment]
|
123 |
+
|
124 |
+
|
125 |
+
_dtype_docstring = """
|
126 |
+
An ExtensionDtype for {dtype} data.
|
127 |
+
|
128 |
+
This dtype uses ``pd.NA`` as missing value indicator.
|
129 |
+
|
130 |
+
Attributes
|
131 |
+
----------
|
132 |
+
None
|
133 |
+
|
134 |
+
Methods
|
135 |
+
-------
|
136 |
+
None
|
137 |
+
|
138 |
+
Examples
|
139 |
+
--------
|
140 |
+
For Float32Dtype:
|
141 |
+
|
142 |
+
>>> ser = pd.Series([2.25, pd.NA], dtype=pd.Float32Dtype())
|
143 |
+
>>> ser.dtype
|
144 |
+
Float32Dtype()
|
145 |
+
|
146 |
+
For Float64Dtype:
|
147 |
+
|
148 |
+
>>> ser = pd.Series([2.25, pd.NA], dtype=pd.Float64Dtype())
|
149 |
+
>>> ser.dtype
|
150 |
+
Float64Dtype()
|
151 |
+
"""
|
152 |
+
|
153 |
+
# create the Dtype
|
154 |
+
|
155 |
+
|
156 |
+
@register_extension_dtype
|
157 |
+
class Float32Dtype(FloatingDtype):
|
158 |
+
type = np.float32
|
159 |
+
name: ClassVar[str] = "Float32"
|
160 |
+
__doc__ = _dtype_docstring.format(dtype="float32")
|
161 |
+
|
162 |
+
|
163 |
+
@register_extension_dtype
|
164 |
+
class Float64Dtype(FloatingDtype):
|
165 |
+
type = np.float64
|
166 |
+
name: ClassVar[str] = "Float64"
|
167 |
+
__doc__ = _dtype_docstring.format(dtype="float64")
|
168 |
+
|
169 |
+
|
170 |
+
NUMPY_FLOAT_TO_DTYPE: dict[np.dtype, FloatingDtype] = {
|
171 |
+
np.dtype(np.float32): Float32Dtype(),
|
172 |
+
np.dtype(np.float64): Float64Dtype(),
|
173 |
+
}
|
env-llmeval/lib/python3.10/site-packages/pandas/core/arrays/integer.py
ADDED
@@ -0,0 +1,272 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from __future__ import annotations
|
2 |
+
|
3 |
+
from typing import ClassVar
|
4 |
+
|
5 |
+
import numpy as np
|
6 |
+
|
7 |
+
from pandas.core.dtypes.base import register_extension_dtype
|
8 |
+
from pandas.core.dtypes.common import is_integer_dtype
|
9 |
+
|
10 |
+
from pandas.core.arrays.numeric import (
|
11 |
+
NumericArray,
|
12 |
+
NumericDtype,
|
13 |
+
)
|
14 |
+
|
15 |
+
|
16 |
+
class IntegerDtype(NumericDtype):
|
17 |
+
"""
|
18 |
+
An ExtensionDtype to hold a single size & kind of integer dtype.
|
19 |
+
|
20 |
+
These specific implementations are subclasses of the non-public
|
21 |
+
IntegerDtype. For example, we have Int8Dtype to represent signed int 8s.
|
22 |
+
|
23 |
+
The attributes name & type are set when these subclasses are created.
|
24 |
+
"""
|
25 |
+
|
26 |
+
_default_np_dtype = np.dtype(np.int64)
|
27 |
+
_checker = is_integer_dtype
|
28 |
+
|
29 |
+
@classmethod
|
30 |
+
def construct_array_type(cls) -> type[IntegerArray]:
|
31 |
+
"""
|
32 |
+
Return the array type associated with this dtype.
|
33 |
+
|
34 |
+
Returns
|
35 |
+
-------
|
36 |
+
type
|
37 |
+
"""
|
38 |
+
return IntegerArray
|
39 |
+
|
40 |
+
@classmethod
|
41 |
+
def _get_dtype_mapping(cls) -> dict[np.dtype, IntegerDtype]:
|
42 |
+
return NUMPY_INT_TO_DTYPE
|
43 |
+
|
44 |
+
@classmethod
|
45 |
+
def _safe_cast(cls, values: np.ndarray, dtype: np.dtype, copy: bool) -> np.ndarray:
|
46 |
+
"""
|
47 |
+
Safely cast the values to the given dtype.
|
48 |
+
|
49 |
+
"safe" in this context means the casting is lossless. e.g. if 'values'
|
50 |
+
has a floating dtype, each value must be an integer.
|
51 |
+
"""
|
52 |
+
try:
|
53 |
+
return values.astype(dtype, casting="safe", copy=copy)
|
54 |
+
except TypeError as err:
|
55 |
+
casted = values.astype(dtype, copy=copy)
|
56 |
+
if (casted == values).all():
|
57 |
+
return casted
|
58 |
+
|
59 |
+
raise TypeError(
|
60 |
+
f"cannot safely cast non-equivalent {values.dtype} to {np.dtype(dtype)}"
|
61 |
+
) from err
|
62 |
+
|
63 |
+
|
64 |
+
class IntegerArray(NumericArray):
|
65 |
+
"""
|
66 |
+
Array of integer (optional missing) values.
|
67 |
+
|
68 |
+
Uses :attr:`pandas.NA` as the missing value.
|
69 |
+
|
70 |
+
.. warning::
|
71 |
+
|
72 |
+
IntegerArray is currently experimental, and its API or internal
|
73 |
+
implementation may change without warning.
|
74 |
+
|
75 |
+
We represent an IntegerArray with 2 numpy arrays:
|
76 |
+
|
77 |
+
- data: contains a numpy integer array of the appropriate dtype
|
78 |
+
- mask: a boolean array holding a mask on the data, True is missing
|
79 |
+
|
80 |
+
To construct an IntegerArray from generic array-like input, use
|
81 |
+
:func:`pandas.array` with one of the integer dtypes (see examples).
|
82 |
+
|
83 |
+
See :ref:`integer_na` for more.
|
84 |
+
|
85 |
+
Parameters
|
86 |
+
----------
|
87 |
+
values : numpy.ndarray
|
88 |
+
A 1-d integer-dtype array.
|
89 |
+
mask : numpy.ndarray
|
90 |
+
A 1-d boolean-dtype array indicating missing values.
|
91 |
+
copy : bool, default False
|
92 |
+
Whether to copy the `values` and `mask`.
|
93 |
+
|
94 |
+
Attributes
|
95 |
+
----------
|
96 |
+
None
|
97 |
+
|
98 |
+
Methods
|
99 |
+
-------
|
100 |
+
None
|
101 |
+
|
102 |
+
Returns
|
103 |
+
-------
|
104 |
+
IntegerArray
|
105 |
+
|
106 |
+
Examples
|
107 |
+
--------
|
108 |
+
Create an IntegerArray with :func:`pandas.array`.
|
109 |
+
|
110 |
+
>>> int_array = pd.array([1, None, 3], dtype=pd.Int32Dtype())
|
111 |
+
>>> int_array
|
112 |
+
<IntegerArray>
|
113 |
+
[1, <NA>, 3]
|
114 |
+
Length: 3, dtype: Int32
|
115 |
+
|
116 |
+
String aliases for the dtypes are also available. They are capitalized.
|
117 |
+
|
118 |
+
>>> pd.array([1, None, 3], dtype='Int32')
|
119 |
+
<IntegerArray>
|
120 |
+
[1, <NA>, 3]
|
121 |
+
Length: 3, dtype: Int32
|
122 |
+
|
123 |
+
>>> pd.array([1, None, 3], dtype='UInt16')
|
124 |
+
<IntegerArray>
|
125 |
+
[1, <NA>, 3]
|
126 |
+
Length: 3, dtype: UInt16
|
127 |
+
"""
|
128 |
+
|
129 |
+
_dtype_cls = IntegerDtype
|
130 |
+
|
131 |
+
# The value used to fill '_data' to avoid upcasting
|
132 |
+
_internal_fill_value = 1
|
133 |
+
# Fill values used for any/all
|
134 |
+
# Incompatible types in assignment (expression has type "int", base class
|
135 |
+
# "BaseMaskedArray" defined the type as "<typing special form>")
|
136 |
+
_truthy_value = 1 # type: ignore[assignment]
|
137 |
+
_falsey_value = 0 # type: ignore[assignment]
|
138 |
+
|
139 |
+
|
140 |
+
_dtype_docstring = """
|
141 |
+
An ExtensionDtype for {dtype} integer data.
|
142 |
+
|
143 |
+
Uses :attr:`pandas.NA` as its missing value, rather than :attr:`numpy.nan`.
|
144 |
+
|
145 |
+
Attributes
|
146 |
+
----------
|
147 |
+
None
|
148 |
+
|
149 |
+
Methods
|
150 |
+
-------
|
151 |
+
None
|
152 |
+
|
153 |
+
Examples
|
154 |
+
--------
|
155 |
+
For Int8Dtype:
|
156 |
+
|
157 |
+
>>> ser = pd.Series([2, pd.NA], dtype=pd.Int8Dtype())
|
158 |
+
>>> ser.dtype
|
159 |
+
Int8Dtype()
|
160 |
+
|
161 |
+
For Int16Dtype:
|
162 |
+
|
163 |
+
>>> ser = pd.Series([2, pd.NA], dtype=pd.Int16Dtype())
|
164 |
+
>>> ser.dtype
|
165 |
+
Int16Dtype()
|
166 |
+
|
167 |
+
For Int32Dtype:
|
168 |
+
|
169 |
+
>>> ser = pd.Series([2, pd.NA], dtype=pd.Int32Dtype())
|
170 |
+
>>> ser.dtype
|
171 |
+
Int32Dtype()
|
172 |
+
|
173 |
+
For Int64Dtype:
|
174 |
+
|
175 |
+
>>> ser = pd.Series([2, pd.NA], dtype=pd.Int64Dtype())
|
176 |
+
>>> ser.dtype
|
177 |
+
Int64Dtype()
|
178 |
+
|
179 |
+
For UInt8Dtype:
|
180 |
+
|
181 |
+
>>> ser = pd.Series([2, pd.NA], dtype=pd.UInt8Dtype())
|
182 |
+
>>> ser.dtype
|
183 |
+
UInt8Dtype()
|
184 |
+
|
185 |
+
For UInt16Dtype:
|
186 |
+
|
187 |
+
>>> ser = pd.Series([2, pd.NA], dtype=pd.UInt16Dtype())
|
188 |
+
>>> ser.dtype
|
189 |
+
UInt16Dtype()
|
190 |
+
|
191 |
+
For UInt32Dtype:
|
192 |
+
|
193 |
+
>>> ser = pd.Series([2, pd.NA], dtype=pd.UInt32Dtype())
|
194 |
+
>>> ser.dtype
|
195 |
+
UInt32Dtype()
|
196 |
+
|
197 |
+
For UInt64Dtype:
|
198 |
+
|
199 |
+
>>> ser = pd.Series([2, pd.NA], dtype=pd.UInt64Dtype())
|
200 |
+
>>> ser.dtype
|
201 |
+
UInt64Dtype()
|
202 |
+
"""
|
203 |
+
|
204 |
+
# create the Dtype
|
205 |
+
|
206 |
+
|
207 |
+
@register_extension_dtype
|
208 |
+
class Int8Dtype(IntegerDtype):
|
209 |
+
type = np.int8
|
210 |
+
name: ClassVar[str] = "Int8"
|
211 |
+
__doc__ = _dtype_docstring.format(dtype="int8")
|
212 |
+
|
213 |
+
|
214 |
+
@register_extension_dtype
|
215 |
+
class Int16Dtype(IntegerDtype):
|
216 |
+
type = np.int16
|
217 |
+
name: ClassVar[str] = "Int16"
|
218 |
+
__doc__ = _dtype_docstring.format(dtype="int16")
|
219 |
+
|
220 |
+
|
221 |
+
@register_extension_dtype
|
222 |
+
class Int32Dtype(IntegerDtype):
|
223 |
+
type = np.int32
|
224 |
+
name: ClassVar[str] = "Int32"
|
225 |
+
__doc__ = _dtype_docstring.format(dtype="int32")
|
226 |
+
|
227 |
+
|
228 |
+
@register_extension_dtype
|
229 |
+
class Int64Dtype(IntegerDtype):
|
230 |
+
type = np.int64
|
231 |
+
name: ClassVar[str] = "Int64"
|
232 |
+
__doc__ = _dtype_docstring.format(dtype="int64")
|
233 |
+
|
234 |
+
|
235 |
+
@register_extension_dtype
|
236 |
+
class UInt8Dtype(IntegerDtype):
|
237 |
+
type = np.uint8
|
238 |
+
name: ClassVar[str] = "UInt8"
|
239 |
+
__doc__ = _dtype_docstring.format(dtype="uint8")
|
240 |
+
|
241 |
+
|
242 |
+
@register_extension_dtype
|
243 |
+
class UInt16Dtype(IntegerDtype):
|
244 |
+
type = np.uint16
|
245 |
+
name: ClassVar[str] = "UInt16"
|
246 |
+
__doc__ = _dtype_docstring.format(dtype="uint16")
|
247 |
+
|
248 |
+
|
249 |
+
@register_extension_dtype
|
250 |
+
class UInt32Dtype(IntegerDtype):
|
251 |
+
type = np.uint32
|
252 |
+
name: ClassVar[str] = "UInt32"
|
253 |
+
__doc__ = _dtype_docstring.format(dtype="uint32")
|
254 |
+
|
255 |
+
|
256 |
+
@register_extension_dtype
|
257 |
+
class UInt64Dtype(IntegerDtype):
|
258 |
+
type = np.uint64
|
259 |
+
name: ClassVar[str] = "UInt64"
|
260 |
+
__doc__ = _dtype_docstring.format(dtype="uint64")
|
261 |
+
|
262 |
+
|
263 |
+
NUMPY_INT_TO_DTYPE: dict[np.dtype, IntegerDtype] = {
|
264 |
+
np.dtype(np.int8): Int8Dtype(),
|
265 |
+
np.dtype(np.int16): Int16Dtype(),
|
266 |
+
np.dtype(np.int32): Int32Dtype(),
|
267 |
+
np.dtype(np.int64): Int64Dtype(),
|
268 |
+
np.dtype(np.uint8): UInt8Dtype(),
|
269 |
+
np.dtype(np.uint16): UInt16Dtype(),
|
270 |
+
np.dtype(np.uint32): UInt32Dtype(),
|
271 |
+
np.dtype(np.uint64): UInt64Dtype(),
|
272 |
+
}
|
env-llmeval/lib/python3.10/site-packages/pandas/core/arrays/interval.py
ADDED
@@ -0,0 +1,1917 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from __future__ import annotations
|
2 |
+
|
3 |
+
import operator
|
4 |
+
from operator import (
|
5 |
+
le,
|
6 |
+
lt,
|
7 |
+
)
|
8 |
+
import textwrap
|
9 |
+
from typing import (
|
10 |
+
TYPE_CHECKING,
|
11 |
+
Literal,
|
12 |
+
Union,
|
13 |
+
overload,
|
14 |
+
)
|
15 |
+
import warnings
|
16 |
+
|
17 |
+
import numpy as np
|
18 |
+
|
19 |
+
from pandas._libs import lib
|
20 |
+
from pandas._libs.interval import (
|
21 |
+
VALID_CLOSED,
|
22 |
+
Interval,
|
23 |
+
IntervalMixin,
|
24 |
+
intervals_to_interval_bounds,
|
25 |
+
)
|
26 |
+
from pandas._libs.missing import NA
|
27 |
+
from pandas._typing import (
|
28 |
+
ArrayLike,
|
29 |
+
AxisInt,
|
30 |
+
Dtype,
|
31 |
+
FillnaOptions,
|
32 |
+
IntervalClosedType,
|
33 |
+
NpDtype,
|
34 |
+
PositionalIndexer,
|
35 |
+
ScalarIndexer,
|
36 |
+
Self,
|
37 |
+
SequenceIndexer,
|
38 |
+
SortKind,
|
39 |
+
TimeArrayLike,
|
40 |
+
npt,
|
41 |
+
)
|
42 |
+
from pandas.compat.numpy import function as nv
|
43 |
+
from pandas.errors import IntCastingNaNError
|
44 |
+
from pandas.util._decorators import Appender
|
45 |
+
|
46 |
+
from pandas.core.dtypes.cast import (
|
47 |
+
LossySetitemError,
|
48 |
+
maybe_upcast_numeric_to_64bit,
|
49 |
+
)
|
50 |
+
from pandas.core.dtypes.common import (
|
51 |
+
is_float_dtype,
|
52 |
+
is_integer_dtype,
|
53 |
+
is_list_like,
|
54 |
+
is_object_dtype,
|
55 |
+
is_scalar,
|
56 |
+
is_string_dtype,
|
57 |
+
needs_i8_conversion,
|
58 |
+
pandas_dtype,
|
59 |
+
)
|
60 |
+
from pandas.core.dtypes.dtypes import (
|
61 |
+
CategoricalDtype,
|
62 |
+
IntervalDtype,
|
63 |
+
)
|
64 |
+
from pandas.core.dtypes.generic import (
|
65 |
+
ABCDataFrame,
|
66 |
+
ABCDatetimeIndex,
|
67 |
+
ABCIntervalIndex,
|
68 |
+
ABCPeriodIndex,
|
69 |
+
)
|
70 |
+
from pandas.core.dtypes.missing import (
|
71 |
+
is_valid_na_for_dtype,
|
72 |
+
isna,
|
73 |
+
notna,
|
74 |
+
)
|
75 |
+
|
76 |
+
from pandas.core.algorithms import (
|
77 |
+
isin,
|
78 |
+
take,
|
79 |
+
unique,
|
80 |
+
value_counts_internal as value_counts,
|
81 |
+
)
|
82 |
+
from pandas.core.arrays import ArrowExtensionArray
|
83 |
+
from pandas.core.arrays.base import (
|
84 |
+
ExtensionArray,
|
85 |
+
_extension_array_shared_docs,
|
86 |
+
)
|
87 |
+
from pandas.core.arrays.datetimes import DatetimeArray
|
88 |
+
from pandas.core.arrays.timedeltas import TimedeltaArray
|
89 |
+
import pandas.core.common as com
|
90 |
+
from pandas.core.construction import (
|
91 |
+
array as pd_array,
|
92 |
+
ensure_wrapped_if_datetimelike,
|
93 |
+
extract_array,
|
94 |
+
)
|
95 |
+
from pandas.core.indexers import check_array_indexer
|
96 |
+
from pandas.core.ops import (
|
97 |
+
invalid_comparison,
|
98 |
+
unpack_zerodim_and_defer,
|
99 |
+
)
|
100 |
+
|
101 |
+
if TYPE_CHECKING:
|
102 |
+
from collections.abc import (
|
103 |
+
Iterator,
|
104 |
+
Sequence,
|
105 |
+
)
|
106 |
+
|
107 |
+
from pandas import (
|
108 |
+
Index,
|
109 |
+
Series,
|
110 |
+
)
|
111 |
+
|
112 |
+
|
113 |
+
IntervalSide = Union[TimeArrayLike, np.ndarray]
|
114 |
+
IntervalOrNA = Union[Interval, float]
|
115 |
+
|
116 |
+
_interval_shared_docs: dict[str, str] = {}
|
117 |
+
|
118 |
+
_shared_docs_kwargs = {
|
119 |
+
"klass": "IntervalArray",
|
120 |
+
"qualname": "arrays.IntervalArray",
|
121 |
+
"name": "",
|
122 |
+
}
|
123 |
+
|
124 |
+
|
125 |
+
_interval_shared_docs[
|
126 |
+
"class"
|
127 |
+
] = """
|
128 |
+
%(summary)s
|
129 |
+
|
130 |
+
Parameters
|
131 |
+
----------
|
132 |
+
data : array-like (1-dimensional)
|
133 |
+
Array-like (ndarray, :class:`DateTimeArray`, :class:`TimeDeltaArray`) containing
|
134 |
+
Interval objects from which to build the %(klass)s.
|
135 |
+
closed : {'left', 'right', 'both', 'neither'}, default 'right'
|
136 |
+
Whether the intervals are closed on the left-side, right-side, both or
|
137 |
+
neither.
|
138 |
+
dtype : dtype or None, default None
|
139 |
+
If None, dtype will be inferred.
|
140 |
+
copy : bool, default False
|
141 |
+
Copy the input data.
|
142 |
+
%(name)s\
|
143 |
+
verify_integrity : bool, default True
|
144 |
+
Verify that the %(klass)s is valid.
|
145 |
+
|
146 |
+
Attributes
|
147 |
+
----------
|
148 |
+
left
|
149 |
+
right
|
150 |
+
closed
|
151 |
+
mid
|
152 |
+
length
|
153 |
+
is_empty
|
154 |
+
is_non_overlapping_monotonic
|
155 |
+
%(extra_attributes)s\
|
156 |
+
|
157 |
+
Methods
|
158 |
+
-------
|
159 |
+
from_arrays
|
160 |
+
from_tuples
|
161 |
+
from_breaks
|
162 |
+
contains
|
163 |
+
overlaps
|
164 |
+
set_closed
|
165 |
+
to_tuples
|
166 |
+
%(extra_methods)s\
|
167 |
+
|
168 |
+
See Also
|
169 |
+
--------
|
170 |
+
Index : The base pandas Index type.
|
171 |
+
Interval : A bounded slice-like interval; the elements of an %(klass)s.
|
172 |
+
interval_range : Function to create a fixed frequency IntervalIndex.
|
173 |
+
cut : Bin values into discrete Intervals.
|
174 |
+
qcut : Bin values into equal-sized Intervals based on rank or sample quantiles.
|
175 |
+
|
176 |
+
Notes
|
177 |
+
-----
|
178 |
+
See the `user guide
|
179 |
+
<https://pandas.pydata.org/pandas-docs/stable/user_guide/advanced.html#intervalindex>`__
|
180 |
+
for more.
|
181 |
+
|
182 |
+
%(examples)s\
|
183 |
+
"""
|
184 |
+
|
185 |
+
|
186 |
+
@Appender(
|
187 |
+
_interval_shared_docs["class"]
|
188 |
+
% {
|
189 |
+
"klass": "IntervalArray",
|
190 |
+
"summary": "Pandas array for interval data that are closed on the same side.",
|
191 |
+
"name": "",
|
192 |
+
"extra_attributes": "",
|
193 |
+
"extra_methods": "",
|
194 |
+
"examples": textwrap.dedent(
|
195 |
+
"""\
|
196 |
+
Examples
|
197 |
+
--------
|
198 |
+
A new ``IntervalArray`` can be constructed directly from an array-like of
|
199 |
+
``Interval`` objects:
|
200 |
+
|
201 |
+
>>> pd.arrays.IntervalArray([pd.Interval(0, 1), pd.Interval(1, 5)])
|
202 |
+
<IntervalArray>
|
203 |
+
[(0, 1], (1, 5]]
|
204 |
+
Length: 2, dtype: interval[int64, right]
|
205 |
+
|
206 |
+
It may also be constructed using one of the constructor
|
207 |
+
methods: :meth:`IntervalArray.from_arrays`,
|
208 |
+
:meth:`IntervalArray.from_breaks`, and :meth:`IntervalArray.from_tuples`.
|
209 |
+
"""
|
210 |
+
),
|
211 |
+
}
|
212 |
+
)
|
213 |
+
class IntervalArray(IntervalMixin, ExtensionArray):
|
214 |
+
can_hold_na = True
|
215 |
+
_na_value = _fill_value = np.nan
|
216 |
+
|
217 |
+
@property
|
218 |
+
def ndim(self) -> Literal[1]:
|
219 |
+
return 1
|
220 |
+
|
221 |
+
# To make mypy recognize the fields
|
222 |
+
_left: IntervalSide
|
223 |
+
_right: IntervalSide
|
224 |
+
_dtype: IntervalDtype
|
225 |
+
|
226 |
+
# ---------------------------------------------------------------------
|
227 |
+
# Constructors
|
228 |
+
|
229 |
+
def __new__(
|
230 |
+
cls,
|
231 |
+
data,
|
232 |
+
closed: IntervalClosedType | None = None,
|
233 |
+
dtype: Dtype | None = None,
|
234 |
+
copy: bool = False,
|
235 |
+
verify_integrity: bool = True,
|
236 |
+
):
|
237 |
+
data = extract_array(data, extract_numpy=True)
|
238 |
+
|
239 |
+
if isinstance(data, cls):
|
240 |
+
left: IntervalSide = data._left
|
241 |
+
right: IntervalSide = data._right
|
242 |
+
closed = closed or data.closed
|
243 |
+
dtype = IntervalDtype(left.dtype, closed=closed)
|
244 |
+
else:
|
245 |
+
# don't allow scalars
|
246 |
+
if is_scalar(data):
|
247 |
+
msg = (
|
248 |
+
f"{cls.__name__}(...) must be called with a collection "
|
249 |
+
f"of some kind, {data} was passed"
|
250 |
+
)
|
251 |
+
raise TypeError(msg)
|
252 |
+
|
253 |
+
# might need to convert empty or purely na data
|
254 |
+
data = _maybe_convert_platform_interval(data)
|
255 |
+
left, right, infer_closed = intervals_to_interval_bounds(
|
256 |
+
data, validate_closed=closed is None
|
257 |
+
)
|
258 |
+
if left.dtype == object:
|
259 |
+
left = lib.maybe_convert_objects(left)
|
260 |
+
right = lib.maybe_convert_objects(right)
|
261 |
+
closed = closed or infer_closed
|
262 |
+
|
263 |
+
left, right, dtype = cls._ensure_simple_new_inputs(
|
264 |
+
left,
|
265 |
+
right,
|
266 |
+
closed=closed,
|
267 |
+
copy=copy,
|
268 |
+
dtype=dtype,
|
269 |
+
)
|
270 |
+
|
271 |
+
if verify_integrity:
|
272 |
+
cls._validate(left, right, dtype=dtype)
|
273 |
+
|
274 |
+
return cls._simple_new(
|
275 |
+
left,
|
276 |
+
right,
|
277 |
+
dtype=dtype,
|
278 |
+
)
|
279 |
+
|
280 |
+
@classmethod
|
281 |
+
def _simple_new(
|
282 |
+
cls,
|
283 |
+
left: IntervalSide,
|
284 |
+
right: IntervalSide,
|
285 |
+
dtype: IntervalDtype,
|
286 |
+
) -> Self:
|
287 |
+
result = IntervalMixin.__new__(cls)
|
288 |
+
result._left = left
|
289 |
+
result._right = right
|
290 |
+
result._dtype = dtype
|
291 |
+
|
292 |
+
return result
|
293 |
+
|
294 |
+
@classmethod
|
295 |
+
def _ensure_simple_new_inputs(
|
296 |
+
cls,
|
297 |
+
left,
|
298 |
+
right,
|
299 |
+
closed: IntervalClosedType | None = None,
|
300 |
+
copy: bool = False,
|
301 |
+
dtype: Dtype | None = None,
|
302 |
+
) -> tuple[IntervalSide, IntervalSide, IntervalDtype]:
|
303 |
+
"""Ensure correctness of input parameters for cls._simple_new."""
|
304 |
+
from pandas.core.indexes.base import ensure_index
|
305 |
+
|
306 |
+
left = ensure_index(left, copy=copy)
|
307 |
+
left = maybe_upcast_numeric_to_64bit(left)
|
308 |
+
|
309 |
+
right = ensure_index(right, copy=copy)
|
310 |
+
right = maybe_upcast_numeric_to_64bit(right)
|
311 |
+
|
312 |
+
if closed is None and isinstance(dtype, IntervalDtype):
|
313 |
+
closed = dtype.closed
|
314 |
+
|
315 |
+
closed = closed or "right"
|
316 |
+
|
317 |
+
if dtype is not None:
|
318 |
+
# GH 19262: dtype must be an IntervalDtype to override inferred
|
319 |
+
dtype = pandas_dtype(dtype)
|
320 |
+
if isinstance(dtype, IntervalDtype):
|
321 |
+
if dtype.subtype is not None:
|
322 |
+
left = left.astype(dtype.subtype)
|
323 |
+
right = right.astype(dtype.subtype)
|
324 |
+
else:
|
325 |
+
msg = f"dtype must be an IntervalDtype, got {dtype}"
|
326 |
+
raise TypeError(msg)
|
327 |
+
|
328 |
+
if dtype.closed is None:
|
329 |
+
# possibly loading an old pickle
|
330 |
+
dtype = IntervalDtype(dtype.subtype, closed)
|
331 |
+
elif closed != dtype.closed:
|
332 |
+
raise ValueError("closed keyword does not match dtype.closed")
|
333 |
+
|
334 |
+
# coerce dtypes to match if needed
|
335 |
+
if is_float_dtype(left.dtype) and is_integer_dtype(right.dtype):
|
336 |
+
right = right.astype(left.dtype)
|
337 |
+
elif is_float_dtype(right.dtype) and is_integer_dtype(left.dtype):
|
338 |
+
left = left.astype(right.dtype)
|
339 |
+
|
340 |
+
if type(left) != type(right):
|
341 |
+
msg = (
|
342 |
+
f"must not have differing left [{type(left).__name__}] and "
|
343 |
+
f"right [{type(right).__name__}] types"
|
344 |
+
)
|
345 |
+
raise ValueError(msg)
|
346 |
+
if isinstance(left.dtype, CategoricalDtype) or is_string_dtype(left.dtype):
|
347 |
+
# GH 19016
|
348 |
+
msg = (
|
349 |
+
"category, object, and string subtypes are not supported "
|
350 |
+
"for IntervalArray"
|
351 |
+
)
|
352 |
+
raise TypeError(msg)
|
353 |
+
if isinstance(left, ABCPeriodIndex):
|
354 |
+
msg = "Period dtypes are not supported, use a PeriodIndex instead"
|
355 |
+
raise ValueError(msg)
|
356 |
+
if isinstance(left, ABCDatetimeIndex) and str(left.tz) != str(right.tz):
|
357 |
+
msg = (
|
358 |
+
"left and right must have the same time zone, got "
|
359 |
+
f"'{left.tz}' and '{right.tz}'"
|
360 |
+
)
|
361 |
+
raise ValueError(msg)
|
362 |
+
elif needs_i8_conversion(left.dtype) and left.unit != right.unit:
|
363 |
+
# e.g. m8[s] vs m8[ms], try to cast to a common dtype GH#55714
|
364 |
+
left_arr, right_arr = left._data._ensure_matching_resos(right._data)
|
365 |
+
left = ensure_index(left_arr)
|
366 |
+
right = ensure_index(right_arr)
|
367 |
+
|
368 |
+
# For dt64/td64 we want DatetimeArray/TimedeltaArray instead of ndarray
|
369 |
+
left = ensure_wrapped_if_datetimelike(left)
|
370 |
+
left = extract_array(left, extract_numpy=True)
|
371 |
+
right = ensure_wrapped_if_datetimelike(right)
|
372 |
+
right = extract_array(right, extract_numpy=True)
|
373 |
+
|
374 |
+
if isinstance(left, ArrowExtensionArray) or isinstance(
|
375 |
+
right, ArrowExtensionArray
|
376 |
+
):
|
377 |
+
pass
|
378 |
+
else:
|
379 |
+
lbase = getattr(left, "_ndarray", left)
|
380 |
+
lbase = getattr(lbase, "_data", lbase).base
|
381 |
+
rbase = getattr(right, "_ndarray", right)
|
382 |
+
rbase = getattr(rbase, "_data", rbase).base
|
383 |
+
if lbase is not None and lbase is rbase:
|
384 |
+
# If these share data, then setitem could corrupt our IA
|
385 |
+
right = right.copy()
|
386 |
+
|
387 |
+
dtype = IntervalDtype(left.dtype, closed=closed)
|
388 |
+
|
389 |
+
return left, right, dtype
|
390 |
+
|
391 |
+
@classmethod
|
392 |
+
def _from_sequence(
|
393 |
+
cls,
|
394 |
+
scalars,
|
395 |
+
*,
|
396 |
+
dtype: Dtype | None = None,
|
397 |
+
copy: bool = False,
|
398 |
+
) -> Self:
|
399 |
+
return cls(scalars, dtype=dtype, copy=copy)
|
400 |
+
|
401 |
+
@classmethod
|
402 |
+
def _from_factorized(cls, values: np.ndarray, original: IntervalArray) -> Self:
|
403 |
+
return cls._from_sequence(values, dtype=original.dtype)
|
404 |
+
|
405 |
+
_interval_shared_docs["from_breaks"] = textwrap.dedent(
|
406 |
+
"""
|
407 |
+
Construct an %(klass)s from an array of splits.
|
408 |
+
|
409 |
+
Parameters
|
410 |
+
----------
|
411 |
+
breaks : array-like (1-dimensional)
|
412 |
+
Left and right bounds for each interval.
|
413 |
+
closed : {'left', 'right', 'both', 'neither'}, default 'right'
|
414 |
+
Whether the intervals are closed on the left-side, right-side, both
|
415 |
+
or neither.\
|
416 |
+
%(name)s
|
417 |
+
copy : bool, default False
|
418 |
+
Copy the data.
|
419 |
+
dtype : dtype or None, default None
|
420 |
+
If None, dtype will be inferred.
|
421 |
+
|
422 |
+
Returns
|
423 |
+
-------
|
424 |
+
%(klass)s
|
425 |
+
|
426 |
+
See Also
|
427 |
+
--------
|
428 |
+
interval_range : Function to create a fixed frequency IntervalIndex.
|
429 |
+
%(klass)s.from_arrays : Construct from a left and right array.
|
430 |
+
%(klass)s.from_tuples : Construct from a sequence of tuples.
|
431 |
+
|
432 |
+
%(examples)s\
|
433 |
+
"""
|
434 |
+
)
|
435 |
+
|
436 |
+
@classmethod
|
437 |
+
@Appender(
|
438 |
+
_interval_shared_docs["from_breaks"]
|
439 |
+
% {
|
440 |
+
"klass": "IntervalArray",
|
441 |
+
"name": "",
|
442 |
+
"examples": textwrap.dedent(
|
443 |
+
"""\
|
444 |
+
Examples
|
445 |
+
--------
|
446 |
+
>>> pd.arrays.IntervalArray.from_breaks([0, 1, 2, 3])
|
447 |
+
<IntervalArray>
|
448 |
+
[(0, 1], (1, 2], (2, 3]]
|
449 |
+
Length: 3, dtype: interval[int64, right]
|
450 |
+
"""
|
451 |
+
),
|
452 |
+
}
|
453 |
+
)
|
454 |
+
def from_breaks(
|
455 |
+
cls,
|
456 |
+
breaks,
|
457 |
+
closed: IntervalClosedType | None = "right",
|
458 |
+
copy: bool = False,
|
459 |
+
dtype: Dtype | None = None,
|
460 |
+
) -> Self:
|
461 |
+
breaks = _maybe_convert_platform_interval(breaks)
|
462 |
+
|
463 |
+
return cls.from_arrays(breaks[:-1], breaks[1:], closed, copy=copy, dtype=dtype)
|
464 |
+
|
465 |
+
_interval_shared_docs["from_arrays"] = textwrap.dedent(
|
466 |
+
"""
|
467 |
+
Construct from two arrays defining the left and right bounds.
|
468 |
+
|
469 |
+
Parameters
|
470 |
+
----------
|
471 |
+
left : array-like (1-dimensional)
|
472 |
+
Left bounds for each interval.
|
473 |
+
right : array-like (1-dimensional)
|
474 |
+
Right bounds for each interval.
|
475 |
+
closed : {'left', 'right', 'both', 'neither'}, default 'right'
|
476 |
+
Whether the intervals are closed on the left-side, right-side, both
|
477 |
+
or neither.\
|
478 |
+
%(name)s
|
479 |
+
copy : bool, default False
|
480 |
+
Copy the data.
|
481 |
+
dtype : dtype, optional
|
482 |
+
If None, dtype will be inferred.
|
483 |
+
|
484 |
+
Returns
|
485 |
+
-------
|
486 |
+
%(klass)s
|
487 |
+
|
488 |
+
Raises
|
489 |
+
------
|
490 |
+
ValueError
|
491 |
+
When a value is missing in only one of `left` or `right`.
|
492 |
+
When a value in `left` is greater than the corresponding value
|
493 |
+
in `right`.
|
494 |
+
|
495 |
+
See Also
|
496 |
+
--------
|
497 |
+
interval_range : Function to create a fixed frequency IntervalIndex.
|
498 |
+
%(klass)s.from_breaks : Construct an %(klass)s from an array of
|
499 |
+
splits.
|
500 |
+
%(klass)s.from_tuples : Construct an %(klass)s from an
|
501 |
+
array-like of tuples.
|
502 |
+
|
503 |
+
Notes
|
504 |
+
-----
|
505 |
+
Each element of `left` must be less than or equal to the `right`
|
506 |
+
element at the same position. If an element is missing, it must be
|
507 |
+
missing in both `left` and `right`. A TypeError is raised when
|
508 |
+
using an unsupported type for `left` or `right`. At the moment,
|
509 |
+
'category', 'object', and 'string' subtypes are not supported.
|
510 |
+
|
511 |
+
%(examples)s\
|
512 |
+
"""
|
513 |
+
)
|
514 |
+
|
515 |
+
@classmethod
|
516 |
+
@Appender(
|
517 |
+
_interval_shared_docs["from_arrays"]
|
518 |
+
% {
|
519 |
+
"klass": "IntervalArray",
|
520 |
+
"name": "",
|
521 |
+
"examples": textwrap.dedent(
|
522 |
+
"""\
|
523 |
+
Examples
|
524 |
+
--------
|
525 |
+
>>> pd.arrays.IntervalArray.from_arrays([0, 1, 2], [1, 2, 3])
|
526 |
+
<IntervalArray>
|
527 |
+
[(0, 1], (1, 2], (2, 3]]
|
528 |
+
Length: 3, dtype: interval[int64, right]
|
529 |
+
"""
|
530 |
+
),
|
531 |
+
}
|
532 |
+
)
|
533 |
+
def from_arrays(
|
534 |
+
cls,
|
535 |
+
left,
|
536 |
+
right,
|
537 |
+
closed: IntervalClosedType | None = "right",
|
538 |
+
copy: bool = False,
|
539 |
+
dtype: Dtype | None = None,
|
540 |
+
) -> Self:
|
541 |
+
left = _maybe_convert_platform_interval(left)
|
542 |
+
right = _maybe_convert_platform_interval(right)
|
543 |
+
|
544 |
+
left, right, dtype = cls._ensure_simple_new_inputs(
|
545 |
+
left,
|
546 |
+
right,
|
547 |
+
closed=closed,
|
548 |
+
copy=copy,
|
549 |
+
dtype=dtype,
|
550 |
+
)
|
551 |
+
cls._validate(left, right, dtype=dtype)
|
552 |
+
|
553 |
+
return cls._simple_new(left, right, dtype=dtype)
|
554 |
+
|
555 |
+
_interval_shared_docs["from_tuples"] = textwrap.dedent(
|
556 |
+
"""
|
557 |
+
Construct an %(klass)s from an array-like of tuples.
|
558 |
+
|
559 |
+
Parameters
|
560 |
+
----------
|
561 |
+
data : array-like (1-dimensional)
|
562 |
+
Array of tuples.
|
563 |
+
closed : {'left', 'right', 'both', 'neither'}, default 'right'
|
564 |
+
Whether the intervals are closed on the left-side, right-side, both
|
565 |
+
or neither.\
|
566 |
+
%(name)s
|
567 |
+
copy : bool, default False
|
568 |
+
By-default copy the data, this is compat only and ignored.
|
569 |
+
dtype : dtype or None, default None
|
570 |
+
If None, dtype will be inferred.
|
571 |
+
|
572 |
+
Returns
|
573 |
+
-------
|
574 |
+
%(klass)s
|
575 |
+
|
576 |
+
See Also
|
577 |
+
--------
|
578 |
+
interval_range : Function to create a fixed frequency IntervalIndex.
|
579 |
+
%(klass)s.from_arrays : Construct an %(klass)s from a left and
|
580 |
+
right array.
|
581 |
+
%(klass)s.from_breaks : Construct an %(klass)s from an array of
|
582 |
+
splits.
|
583 |
+
|
584 |
+
%(examples)s\
|
585 |
+
"""
|
586 |
+
)
|
587 |
+
|
588 |
+
@classmethod
|
589 |
+
@Appender(
|
590 |
+
_interval_shared_docs["from_tuples"]
|
591 |
+
% {
|
592 |
+
"klass": "IntervalArray",
|
593 |
+
"name": "",
|
594 |
+
"examples": textwrap.dedent(
|
595 |
+
"""\
|
596 |
+
Examples
|
597 |
+
--------
|
598 |
+
>>> pd.arrays.IntervalArray.from_tuples([(0, 1), (1, 2)])
|
599 |
+
<IntervalArray>
|
600 |
+
[(0, 1], (1, 2]]
|
601 |
+
Length: 2, dtype: interval[int64, right]
|
602 |
+
"""
|
603 |
+
),
|
604 |
+
}
|
605 |
+
)
|
606 |
+
def from_tuples(
|
607 |
+
cls,
|
608 |
+
data,
|
609 |
+
closed: IntervalClosedType | None = "right",
|
610 |
+
copy: bool = False,
|
611 |
+
dtype: Dtype | None = None,
|
612 |
+
) -> Self:
|
613 |
+
if len(data):
|
614 |
+
left, right = [], []
|
615 |
+
else:
|
616 |
+
# ensure that empty data keeps input dtype
|
617 |
+
left = right = data
|
618 |
+
|
619 |
+
for d in data:
|
620 |
+
if not isinstance(d, tuple) and isna(d):
|
621 |
+
lhs = rhs = np.nan
|
622 |
+
else:
|
623 |
+
name = cls.__name__
|
624 |
+
try:
|
625 |
+
# need list of length 2 tuples, e.g. [(0, 1), (1, 2), ...]
|
626 |
+
lhs, rhs = d
|
627 |
+
except ValueError as err:
|
628 |
+
msg = f"{name}.from_tuples requires tuples of length 2, got {d}"
|
629 |
+
raise ValueError(msg) from err
|
630 |
+
except TypeError as err:
|
631 |
+
msg = f"{name}.from_tuples received an invalid item, {d}"
|
632 |
+
raise TypeError(msg) from err
|
633 |
+
left.append(lhs)
|
634 |
+
right.append(rhs)
|
635 |
+
|
636 |
+
return cls.from_arrays(left, right, closed, copy=False, dtype=dtype)
|
637 |
+
|
638 |
+
@classmethod
|
639 |
+
def _validate(cls, left, right, dtype: IntervalDtype) -> None:
|
640 |
+
"""
|
641 |
+
Verify that the IntervalArray is valid.
|
642 |
+
|
643 |
+
Checks that
|
644 |
+
|
645 |
+
* dtype is correct
|
646 |
+
* left and right match lengths
|
647 |
+
* left and right have the same missing values
|
648 |
+
* left is always below right
|
649 |
+
"""
|
650 |
+
if not isinstance(dtype, IntervalDtype):
|
651 |
+
msg = f"invalid dtype: {dtype}"
|
652 |
+
raise ValueError(msg)
|
653 |
+
if len(left) != len(right):
|
654 |
+
msg = "left and right must have the same length"
|
655 |
+
raise ValueError(msg)
|
656 |
+
left_mask = notna(left)
|
657 |
+
right_mask = notna(right)
|
658 |
+
if not (left_mask == right_mask).all():
|
659 |
+
msg = (
|
660 |
+
"missing values must be missing in the same "
|
661 |
+
"location both left and right sides"
|
662 |
+
)
|
663 |
+
raise ValueError(msg)
|
664 |
+
if not (left[left_mask] <= right[left_mask]).all():
|
665 |
+
msg = "left side of interval must be <= right side"
|
666 |
+
raise ValueError(msg)
|
667 |
+
|
668 |
+
def _shallow_copy(self, left, right) -> Self:
|
669 |
+
"""
|
670 |
+
Return a new IntervalArray with the replacement attributes
|
671 |
+
|
672 |
+
Parameters
|
673 |
+
----------
|
674 |
+
left : Index
|
675 |
+
Values to be used for the left-side of the intervals.
|
676 |
+
right : Index
|
677 |
+
Values to be used for the right-side of the intervals.
|
678 |
+
"""
|
679 |
+
dtype = IntervalDtype(left.dtype, closed=self.closed)
|
680 |
+
left, right, dtype = self._ensure_simple_new_inputs(left, right, dtype=dtype)
|
681 |
+
|
682 |
+
return self._simple_new(left, right, dtype=dtype)
|
683 |
+
|
684 |
+
# ---------------------------------------------------------------------
|
685 |
+
# Descriptive
|
686 |
+
|
687 |
+
@property
|
688 |
+
def dtype(self) -> IntervalDtype:
|
689 |
+
return self._dtype
|
690 |
+
|
691 |
+
@property
|
692 |
+
def nbytes(self) -> int:
|
693 |
+
return self.left.nbytes + self.right.nbytes
|
694 |
+
|
695 |
+
@property
|
696 |
+
def size(self) -> int:
|
697 |
+
# Avoid materializing self.values
|
698 |
+
return self.left.size
|
699 |
+
|
700 |
+
# ---------------------------------------------------------------------
|
701 |
+
# EA Interface
|
702 |
+
|
703 |
+
def __iter__(self) -> Iterator:
|
704 |
+
return iter(np.asarray(self))
|
705 |
+
|
706 |
+
def __len__(self) -> int:
|
707 |
+
return len(self._left)
|
708 |
+
|
709 |
+
@overload
|
710 |
+
def __getitem__(self, key: ScalarIndexer) -> IntervalOrNA:
|
711 |
+
...
|
712 |
+
|
713 |
+
@overload
|
714 |
+
def __getitem__(self, key: SequenceIndexer) -> Self:
|
715 |
+
...
|
716 |
+
|
717 |
+
def __getitem__(self, key: PositionalIndexer) -> Self | IntervalOrNA:
|
718 |
+
key = check_array_indexer(self, key)
|
719 |
+
left = self._left[key]
|
720 |
+
right = self._right[key]
|
721 |
+
|
722 |
+
if not isinstance(left, (np.ndarray, ExtensionArray)):
|
723 |
+
# scalar
|
724 |
+
if is_scalar(left) and isna(left):
|
725 |
+
return self._fill_value
|
726 |
+
return Interval(left, right, self.closed)
|
727 |
+
if np.ndim(left) > 1:
|
728 |
+
# GH#30588 multi-dimensional indexer disallowed
|
729 |
+
raise ValueError("multi-dimensional indexing not allowed")
|
730 |
+
# Argument 2 to "_simple_new" of "IntervalArray" has incompatible type
|
731 |
+
# "Union[Period, Timestamp, Timedelta, NaTType, DatetimeArray, TimedeltaArray,
|
732 |
+
# ndarray[Any, Any]]"; expected "Union[Union[DatetimeArray, TimedeltaArray],
|
733 |
+
# ndarray[Any, Any]]"
|
734 |
+
return self._simple_new(left, right, dtype=self.dtype) # type: ignore[arg-type]
|
735 |
+
|
736 |
+
def __setitem__(self, key, value) -> None:
|
737 |
+
value_left, value_right = self._validate_setitem_value(value)
|
738 |
+
key = check_array_indexer(self, key)
|
739 |
+
|
740 |
+
self._left[key] = value_left
|
741 |
+
self._right[key] = value_right
|
742 |
+
|
743 |
+
def _cmp_method(self, other, op):
|
744 |
+
# ensure pandas array for list-like and eliminate non-interval scalars
|
745 |
+
if is_list_like(other):
|
746 |
+
if len(self) != len(other):
|
747 |
+
raise ValueError("Lengths must match to compare")
|
748 |
+
other = pd_array(other)
|
749 |
+
elif not isinstance(other, Interval):
|
750 |
+
# non-interval scalar -> no matches
|
751 |
+
if other is NA:
|
752 |
+
# GH#31882
|
753 |
+
from pandas.core.arrays import BooleanArray
|
754 |
+
|
755 |
+
arr = np.empty(self.shape, dtype=bool)
|
756 |
+
mask = np.ones(self.shape, dtype=bool)
|
757 |
+
return BooleanArray(arr, mask)
|
758 |
+
return invalid_comparison(self, other, op)
|
759 |
+
|
760 |
+
# determine the dtype of the elements we want to compare
|
761 |
+
if isinstance(other, Interval):
|
762 |
+
other_dtype = pandas_dtype("interval")
|
763 |
+
elif not isinstance(other.dtype, CategoricalDtype):
|
764 |
+
other_dtype = other.dtype
|
765 |
+
else:
|
766 |
+
# for categorical defer to categories for dtype
|
767 |
+
other_dtype = other.categories.dtype
|
768 |
+
|
769 |
+
# extract intervals if we have interval categories with matching closed
|
770 |
+
if isinstance(other_dtype, IntervalDtype):
|
771 |
+
if self.closed != other.categories.closed:
|
772 |
+
return invalid_comparison(self, other, op)
|
773 |
+
|
774 |
+
other = other.categories._values.take(
|
775 |
+
other.codes, allow_fill=True, fill_value=other.categories._na_value
|
776 |
+
)
|
777 |
+
|
778 |
+
# interval-like -> need same closed and matching endpoints
|
779 |
+
if isinstance(other_dtype, IntervalDtype):
|
780 |
+
if self.closed != other.closed:
|
781 |
+
return invalid_comparison(self, other, op)
|
782 |
+
elif not isinstance(other, Interval):
|
783 |
+
other = type(self)(other)
|
784 |
+
|
785 |
+
if op is operator.eq:
|
786 |
+
return (self._left == other.left) & (self._right == other.right)
|
787 |
+
elif op is operator.ne:
|
788 |
+
return (self._left != other.left) | (self._right != other.right)
|
789 |
+
elif op is operator.gt:
|
790 |
+
return (self._left > other.left) | (
|
791 |
+
(self._left == other.left) & (self._right > other.right)
|
792 |
+
)
|
793 |
+
elif op is operator.ge:
|
794 |
+
return (self == other) | (self > other)
|
795 |
+
elif op is operator.lt:
|
796 |
+
return (self._left < other.left) | (
|
797 |
+
(self._left == other.left) & (self._right < other.right)
|
798 |
+
)
|
799 |
+
else:
|
800 |
+
# operator.lt
|
801 |
+
return (self == other) | (self < other)
|
802 |
+
|
803 |
+
# non-interval/non-object dtype -> no matches
|
804 |
+
if not is_object_dtype(other_dtype):
|
805 |
+
return invalid_comparison(self, other, op)
|
806 |
+
|
807 |
+
# object dtype -> iteratively check for intervals
|
808 |
+
result = np.zeros(len(self), dtype=bool)
|
809 |
+
for i, obj in enumerate(other):
|
810 |
+
try:
|
811 |
+
result[i] = op(self[i], obj)
|
812 |
+
except TypeError:
|
813 |
+
if obj is NA:
|
814 |
+
# comparison with np.nan returns NA
|
815 |
+
# github.com/pandas-dev/pandas/pull/37124#discussion_r509095092
|
816 |
+
result = result.astype(object)
|
817 |
+
result[i] = NA
|
818 |
+
else:
|
819 |
+
raise
|
820 |
+
return result
|
821 |
+
|
822 |
+
@unpack_zerodim_and_defer("__eq__")
|
823 |
+
def __eq__(self, other):
|
824 |
+
return self._cmp_method(other, operator.eq)
|
825 |
+
|
826 |
+
@unpack_zerodim_and_defer("__ne__")
|
827 |
+
def __ne__(self, other):
|
828 |
+
return self._cmp_method(other, operator.ne)
|
829 |
+
|
830 |
+
@unpack_zerodim_and_defer("__gt__")
|
831 |
+
def __gt__(self, other):
|
832 |
+
return self._cmp_method(other, operator.gt)
|
833 |
+
|
834 |
+
@unpack_zerodim_and_defer("__ge__")
|
835 |
+
def __ge__(self, other):
|
836 |
+
return self._cmp_method(other, operator.ge)
|
837 |
+
|
838 |
+
@unpack_zerodim_and_defer("__lt__")
|
839 |
+
def __lt__(self, other):
|
840 |
+
return self._cmp_method(other, operator.lt)
|
841 |
+
|
842 |
+
@unpack_zerodim_and_defer("__le__")
|
843 |
+
def __le__(self, other):
|
844 |
+
return self._cmp_method(other, operator.le)
|
845 |
+
|
846 |
+
def argsort(
|
847 |
+
self,
|
848 |
+
*,
|
849 |
+
ascending: bool = True,
|
850 |
+
kind: SortKind = "quicksort",
|
851 |
+
na_position: str = "last",
|
852 |
+
**kwargs,
|
853 |
+
) -> np.ndarray:
|
854 |
+
ascending = nv.validate_argsort_with_ascending(ascending, (), kwargs)
|
855 |
+
|
856 |
+
if ascending and kind == "quicksort" and na_position == "last":
|
857 |
+
# TODO: in an IntervalIndex we can reuse the cached
|
858 |
+
# IntervalTree.left_sorter
|
859 |
+
return np.lexsort((self.right, self.left))
|
860 |
+
|
861 |
+
# TODO: other cases we can use lexsort for? much more performant.
|
862 |
+
return super().argsort(
|
863 |
+
ascending=ascending, kind=kind, na_position=na_position, **kwargs
|
864 |
+
)
|
865 |
+
|
866 |
+
def min(self, *, axis: AxisInt | None = None, skipna: bool = True) -> IntervalOrNA:
|
867 |
+
nv.validate_minmax_axis(axis, self.ndim)
|
868 |
+
|
869 |
+
if not len(self):
|
870 |
+
return self._na_value
|
871 |
+
|
872 |
+
mask = self.isna()
|
873 |
+
if mask.any():
|
874 |
+
if not skipna:
|
875 |
+
return self._na_value
|
876 |
+
obj = self[~mask]
|
877 |
+
else:
|
878 |
+
obj = self
|
879 |
+
|
880 |
+
indexer = obj.argsort()[0]
|
881 |
+
return obj[indexer]
|
882 |
+
|
883 |
+
def max(self, *, axis: AxisInt | None = None, skipna: bool = True) -> IntervalOrNA:
|
884 |
+
nv.validate_minmax_axis(axis, self.ndim)
|
885 |
+
|
886 |
+
if not len(self):
|
887 |
+
return self._na_value
|
888 |
+
|
889 |
+
mask = self.isna()
|
890 |
+
if mask.any():
|
891 |
+
if not skipna:
|
892 |
+
return self._na_value
|
893 |
+
obj = self[~mask]
|
894 |
+
else:
|
895 |
+
obj = self
|
896 |
+
|
897 |
+
indexer = obj.argsort()[-1]
|
898 |
+
return obj[indexer]
|
899 |
+
|
900 |
+
def _pad_or_backfill( # pylint: disable=useless-parent-delegation
|
901 |
+
self,
|
902 |
+
*,
|
903 |
+
method: FillnaOptions,
|
904 |
+
limit: int | None = None,
|
905 |
+
limit_area: Literal["inside", "outside"] | None = None,
|
906 |
+
copy: bool = True,
|
907 |
+
) -> Self:
|
908 |
+
# TODO(3.0): after EA.fillna 'method' deprecation is enforced, we can remove
|
909 |
+
# this method entirely.
|
910 |
+
return super()._pad_or_backfill(
|
911 |
+
method=method, limit=limit, limit_area=limit_area, copy=copy
|
912 |
+
)
|
913 |
+
|
914 |
+
def fillna(
|
915 |
+
self, value=None, method=None, limit: int | None = None, copy: bool = True
|
916 |
+
) -> Self:
|
917 |
+
"""
|
918 |
+
Fill NA/NaN values using the specified method.
|
919 |
+
|
920 |
+
Parameters
|
921 |
+
----------
|
922 |
+
value : scalar, dict, Series
|
923 |
+
If a scalar value is passed it is used to fill all missing values.
|
924 |
+
Alternatively, a Series or dict can be used to fill in different
|
925 |
+
values for each index. The value should not be a list. The
|
926 |
+
value(s) passed should be either Interval objects or NA/NaN.
|
927 |
+
method : {'backfill', 'bfill', 'pad', 'ffill', None}, default None
|
928 |
+
(Not implemented yet for IntervalArray)
|
929 |
+
Method to use for filling holes in reindexed Series
|
930 |
+
limit : int, default None
|
931 |
+
(Not implemented yet for IntervalArray)
|
932 |
+
If method is specified, this is the maximum number of consecutive
|
933 |
+
NaN values to forward/backward fill. In other words, if there is
|
934 |
+
a gap with more than this number of consecutive NaNs, it will only
|
935 |
+
be partially filled. If method is not specified, this is the
|
936 |
+
maximum number of entries along the entire axis where NaNs will be
|
937 |
+
filled.
|
938 |
+
copy : bool, default True
|
939 |
+
Whether to make a copy of the data before filling. If False, then
|
940 |
+
the original should be modified and no new memory should be allocated.
|
941 |
+
For ExtensionArray subclasses that cannot do this, it is at the
|
942 |
+
author's discretion whether to ignore "copy=False" or to raise.
|
943 |
+
|
944 |
+
Returns
|
945 |
+
-------
|
946 |
+
filled : IntervalArray with NA/NaN filled
|
947 |
+
"""
|
948 |
+
if copy is False:
|
949 |
+
raise NotImplementedError
|
950 |
+
if method is not None:
|
951 |
+
return super().fillna(value=value, method=method, limit=limit)
|
952 |
+
|
953 |
+
value_left, value_right = self._validate_scalar(value)
|
954 |
+
|
955 |
+
left = self.left.fillna(value=value_left)
|
956 |
+
right = self.right.fillna(value=value_right)
|
957 |
+
return self._shallow_copy(left, right)
|
958 |
+
|
959 |
+
def astype(self, dtype, copy: bool = True):
|
960 |
+
"""
|
961 |
+
Cast to an ExtensionArray or NumPy array with dtype 'dtype'.
|
962 |
+
|
963 |
+
Parameters
|
964 |
+
----------
|
965 |
+
dtype : str or dtype
|
966 |
+
Typecode or data-type to which the array is cast.
|
967 |
+
|
968 |
+
copy : bool, default True
|
969 |
+
Whether to copy the data, even if not necessary. If False,
|
970 |
+
a copy is made only if the old dtype does not match the
|
971 |
+
new dtype.
|
972 |
+
|
973 |
+
Returns
|
974 |
+
-------
|
975 |
+
array : ExtensionArray or ndarray
|
976 |
+
ExtensionArray or NumPy ndarray with 'dtype' for its dtype.
|
977 |
+
"""
|
978 |
+
from pandas import Index
|
979 |
+
|
980 |
+
if dtype is not None:
|
981 |
+
dtype = pandas_dtype(dtype)
|
982 |
+
|
983 |
+
if isinstance(dtype, IntervalDtype):
|
984 |
+
if dtype == self.dtype:
|
985 |
+
return self.copy() if copy else self
|
986 |
+
|
987 |
+
if is_float_dtype(self.dtype.subtype) and needs_i8_conversion(
|
988 |
+
dtype.subtype
|
989 |
+
):
|
990 |
+
# This is allowed on the Index.astype but we disallow it here
|
991 |
+
msg = (
|
992 |
+
f"Cannot convert {self.dtype} to {dtype}; subtypes are incompatible"
|
993 |
+
)
|
994 |
+
raise TypeError(msg)
|
995 |
+
|
996 |
+
# need to cast to different subtype
|
997 |
+
try:
|
998 |
+
# We need to use Index rules for astype to prevent casting
|
999 |
+
# np.nan entries to int subtypes
|
1000 |
+
new_left = Index(self._left, copy=False).astype(dtype.subtype)
|
1001 |
+
new_right = Index(self._right, copy=False).astype(dtype.subtype)
|
1002 |
+
except IntCastingNaNError:
|
1003 |
+
# e.g test_subtype_integer
|
1004 |
+
raise
|
1005 |
+
except (TypeError, ValueError) as err:
|
1006 |
+
# e.g. test_subtype_integer_errors f8->u8 can be lossy
|
1007 |
+
# and raises ValueError
|
1008 |
+
msg = (
|
1009 |
+
f"Cannot convert {self.dtype} to {dtype}; subtypes are incompatible"
|
1010 |
+
)
|
1011 |
+
raise TypeError(msg) from err
|
1012 |
+
return self._shallow_copy(new_left, new_right)
|
1013 |
+
else:
|
1014 |
+
try:
|
1015 |
+
return super().astype(dtype, copy=copy)
|
1016 |
+
except (TypeError, ValueError) as err:
|
1017 |
+
msg = f"Cannot cast {type(self).__name__} to dtype {dtype}"
|
1018 |
+
raise TypeError(msg) from err
|
1019 |
+
|
1020 |
+
def equals(self, other) -> bool:
|
1021 |
+
if type(self) != type(other):
|
1022 |
+
return False
|
1023 |
+
|
1024 |
+
return bool(
|
1025 |
+
self.closed == other.closed
|
1026 |
+
and self.left.equals(other.left)
|
1027 |
+
and self.right.equals(other.right)
|
1028 |
+
)
|
1029 |
+
|
1030 |
+
@classmethod
|
1031 |
+
def _concat_same_type(cls, to_concat: Sequence[IntervalArray]) -> Self:
|
1032 |
+
"""
|
1033 |
+
Concatenate multiple IntervalArray
|
1034 |
+
|
1035 |
+
Parameters
|
1036 |
+
----------
|
1037 |
+
to_concat : sequence of IntervalArray
|
1038 |
+
|
1039 |
+
Returns
|
1040 |
+
-------
|
1041 |
+
IntervalArray
|
1042 |
+
"""
|
1043 |
+
closed_set = {interval.closed for interval in to_concat}
|
1044 |
+
if len(closed_set) != 1:
|
1045 |
+
raise ValueError("Intervals must all be closed on the same side.")
|
1046 |
+
closed = closed_set.pop()
|
1047 |
+
|
1048 |
+
left: IntervalSide = np.concatenate([interval.left for interval in to_concat])
|
1049 |
+
right: IntervalSide = np.concatenate([interval.right for interval in to_concat])
|
1050 |
+
|
1051 |
+
left, right, dtype = cls._ensure_simple_new_inputs(left, right, closed=closed)
|
1052 |
+
|
1053 |
+
return cls._simple_new(left, right, dtype=dtype)
|
1054 |
+
|
1055 |
+
def copy(self) -> Self:
|
1056 |
+
"""
|
1057 |
+
Return a copy of the array.
|
1058 |
+
|
1059 |
+
Returns
|
1060 |
+
-------
|
1061 |
+
IntervalArray
|
1062 |
+
"""
|
1063 |
+
left = self._left.copy()
|
1064 |
+
right = self._right.copy()
|
1065 |
+
dtype = self.dtype
|
1066 |
+
return self._simple_new(left, right, dtype=dtype)
|
1067 |
+
|
1068 |
+
def isna(self) -> np.ndarray:
|
1069 |
+
return isna(self._left)
|
1070 |
+
|
1071 |
+
def shift(self, periods: int = 1, fill_value: object = None) -> IntervalArray:
|
1072 |
+
if not len(self) or periods == 0:
|
1073 |
+
return self.copy()
|
1074 |
+
|
1075 |
+
self._validate_scalar(fill_value)
|
1076 |
+
|
1077 |
+
# ExtensionArray.shift doesn't work for two reasons
|
1078 |
+
# 1. IntervalArray.dtype.na_value may not be correct for the dtype.
|
1079 |
+
# 2. IntervalArray._from_sequence only accepts NaN for missing values,
|
1080 |
+
# not other values like NaT
|
1081 |
+
|
1082 |
+
empty_len = min(abs(periods), len(self))
|
1083 |
+
if isna(fill_value):
|
1084 |
+
from pandas import Index
|
1085 |
+
|
1086 |
+
fill_value = Index(self._left, copy=False)._na_value
|
1087 |
+
empty = IntervalArray.from_breaks([fill_value] * (empty_len + 1))
|
1088 |
+
else:
|
1089 |
+
empty = self._from_sequence([fill_value] * empty_len, dtype=self.dtype)
|
1090 |
+
|
1091 |
+
if periods > 0:
|
1092 |
+
a = empty
|
1093 |
+
b = self[:-periods]
|
1094 |
+
else:
|
1095 |
+
a = self[abs(periods) :]
|
1096 |
+
b = empty
|
1097 |
+
return self._concat_same_type([a, b])
|
1098 |
+
|
1099 |
+
def take(
|
1100 |
+
self,
|
1101 |
+
indices,
|
1102 |
+
*,
|
1103 |
+
allow_fill: bool = False,
|
1104 |
+
fill_value=None,
|
1105 |
+
axis=None,
|
1106 |
+
**kwargs,
|
1107 |
+
) -> Self:
|
1108 |
+
"""
|
1109 |
+
Take elements from the IntervalArray.
|
1110 |
+
|
1111 |
+
Parameters
|
1112 |
+
----------
|
1113 |
+
indices : sequence of integers
|
1114 |
+
Indices to be taken.
|
1115 |
+
|
1116 |
+
allow_fill : bool, default False
|
1117 |
+
How to handle negative values in `indices`.
|
1118 |
+
|
1119 |
+
* False: negative values in `indices` indicate positional indices
|
1120 |
+
from the right (the default). This is similar to
|
1121 |
+
:func:`numpy.take`.
|
1122 |
+
|
1123 |
+
* True: negative values in `indices` indicate
|
1124 |
+
missing values. These values are set to `fill_value`. Any other
|
1125 |
+
other negative values raise a ``ValueError``.
|
1126 |
+
|
1127 |
+
fill_value : Interval or NA, optional
|
1128 |
+
Fill value to use for NA-indices when `allow_fill` is True.
|
1129 |
+
This may be ``None``, in which case the default NA value for
|
1130 |
+
the type, ``self.dtype.na_value``, is used.
|
1131 |
+
|
1132 |
+
For many ExtensionArrays, there will be two representations of
|
1133 |
+
`fill_value`: a user-facing "boxed" scalar, and a low-level
|
1134 |
+
physical NA value. `fill_value` should be the user-facing version,
|
1135 |
+
and the implementation should handle translating that to the
|
1136 |
+
physical version for processing the take if necessary.
|
1137 |
+
|
1138 |
+
axis : any, default None
|
1139 |
+
Present for compat with IntervalIndex; does nothing.
|
1140 |
+
|
1141 |
+
Returns
|
1142 |
+
-------
|
1143 |
+
IntervalArray
|
1144 |
+
|
1145 |
+
Raises
|
1146 |
+
------
|
1147 |
+
IndexError
|
1148 |
+
When the indices are out of bounds for the array.
|
1149 |
+
ValueError
|
1150 |
+
When `indices` contains negative values other than ``-1``
|
1151 |
+
and `allow_fill` is True.
|
1152 |
+
"""
|
1153 |
+
nv.validate_take((), kwargs)
|
1154 |
+
|
1155 |
+
fill_left = fill_right = fill_value
|
1156 |
+
if allow_fill:
|
1157 |
+
fill_left, fill_right = self._validate_scalar(fill_value)
|
1158 |
+
|
1159 |
+
left_take = take(
|
1160 |
+
self._left, indices, allow_fill=allow_fill, fill_value=fill_left
|
1161 |
+
)
|
1162 |
+
right_take = take(
|
1163 |
+
self._right, indices, allow_fill=allow_fill, fill_value=fill_right
|
1164 |
+
)
|
1165 |
+
|
1166 |
+
return self._shallow_copy(left_take, right_take)
|
1167 |
+
|
1168 |
+
def _validate_listlike(self, value):
|
1169 |
+
# list-like of intervals
|
1170 |
+
try:
|
1171 |
+
array = IntervalArray(value)
|
1172 |
+
self._check_closed_matches(array, name="value")
|
1173 |
+
value_left, value_right = array.left, array.right
|
1174 |
+
except TypeError as err:
|
1175 |
+
# wrong type: not interval or NA
|
1176 |
+
msg = f"'value' should be an interval type, got {type(value)} instead."
|
1177 |
+
raise TypeError(msg) from err
|
1178 |
+
|
1179 |
+
try:
|
1180 |
+
self.left._validate_fill_value(value_left)
|
1181 |
+
except (LossySetitemError, TypeError) as err:
|
1182 |
+
msg = (
|
1183 |
+
"'value' should be a compatible interval type, "
|
1184 |
+
f"got {type(value)} instead."
|
1185 |
+
)
|
1186 |
+
raise TypeError(msg) from err
|
1187 |
+
|
1188 |
+
return value_left, value_right
|
1189 |
+
|
1190 |
+
def _validate_scalar(self, value):
|
1191 |
+
if isinstance(value, Interval):
|
1192 |
+
self._check_closed_matches(value, name="value")
|
1193 |
+
left, right = value.left, value.right
|
1194 |
+
# TODO: check subdtype match like _validate_setitem_value?
|
1195 |
+
elif is_valid_na_for_dtype(value, self.left.dtype):
|
1196 |
+
# GH#18295
|
1197 |
+
left = right = self.left._na_value
|
1198 |
+
else:
|
1199 |
+
raise TypeError(
|
1200 |
+
"can only insert Interval objects and NA into an IntervalArray"
|
1201 |
+
)
|
1202 |
+
return left, right
|
1203 |
+
|
1204 |
+
def _validate_setitem_value(self, value):
|
1205 |
+
if is_valid_na_for_dtype(value, self.left.dtype):
|
1206 |
+
# na value: need special casing to set directly on numpy arrays
|
1207 |
+
value = self.left._na_value
|
1208 |
+
if is_integer_dtype(self.dtype.subtype):
|
1209 |
+
# can't set NaN on a numpy integer array
|
1210 |
+
# GH#45484 TypeError, not ValueError, matches what we get with
|
1211 |
+
# non-NA un-holdable value.
|
1212 |
+
raise TypeError("Cannot set float NaN to integer-backed IntervalArray")
|
1213 |
+
value_left, value_right = value, value
|
1214 |
+
|
1215 |
+
elif isinstance(value, Interval):
|
1216 |
+
# scalar interval
|
1217 |
+
self._check_closed_matches(value, name="value")
|
1218 |
+
value_left, value_right = value.left, value.right
|
1219 |
+
self.left._validate_fill_value(value_left)
|
1220 |
+
self.left._validate_fill_value(value_right)
|
1221 |
+
|
1222 |
+
else:
|
1223 |
+
return self._validate_listlike(value)
|
1224 |
+
|
1225 |
+
return value_left, value_right
|
1226 |
+
|
1227 |
+
def value_counts(self, dropna: bool = True) -> Series:
|
1228 |
+
"""
|
1229 |
+
Returns a Series containing counts of each interval.
|
1230 |
+
|
1231 |
+
Parameters
|
1232 |
+
----------
|
1233 |
+
dropna : bool, default True
|
1234 |
+
Don't include counts of NaN.
|
1235 |
+
|
1236 |
+
Returns
|
1237 |
+
-------
|
1238 |
+
counts : Series
|
1239 |
+
|
1240 |
+
See Also
|
1241 |
+
--------
|
1242 |
+
Series.value_counts
|
1243 |
+
"""
|
1244 |
+
# TODO: implement this is a non-naive way!
|
1245 |
+
with warnings.catch_warnings():
|
1246 |
+
warnings.filterwarnings(
|
1247 |
+
"ignore",
|
1248 |
+
"The behavior of value_counts with object-dtype is deprecated",
|
1249 |
+
category=FutureWarning,
|
1250 |
+
)
|
1251 |
+
result = value_counts(np.asarray(self), dropna=dropna)
|
1252 |
+
# Once the deprecation is enforced, we will need to do
|
1253 |
+
# `result.index = result.index.astype(self.dtype)`
|
1254 |
+
return result
|
1255 |
+
|
1256 |
+
# ---------------------------------------------------------------------
|
1257 |
+
# Rendering Methods
|
1258 |
+
|
1259 |
+
def _formatter(self, boxed: bool = False):
|
1260 |
+
# returning 'str' here causes us to render as e.g. "(0, 1]" instead of
|
1261 |
+
# "Interval(0, 1, closed='right')"
|
1262 |
+
return str
|
1263 |
+
|
1264 |
+
# ---------------------------------------------------------------------
|
1265 |
+
# Vectorized Interval Properties/Attributes
|
1266 |
+
|
1267 |
+
@property
|
1268 |
+
def left(self) -> Index:
|
1269 |
+
"""
|
1270 |
+
Return the left endpoints of each Interval in the IntervalArray as an Index.
|
1271 |
+
|
1272 |
+
Examples
|
1273 |
+
--------
|
1274 |
+
|
1275 |
+
>>> interv_arr = pd.arrays.IntervalArray([pd.Interval(0, 1), pd.Interval(2, 5)])
|
1276 |
+
>>> interv_arr
|
1277 |
+
<IntervalArray>
|
1278 |
+
[(0, 1], (2, 5]]
|
1279 |
+
Length: 2, dtype: interval[int64, right]
|
1280 |
+
>>> interv_arr.left
|
1281 |
+
Index([0, 2], dtype='int64')
|
1282 |
+
"""
|
1283 |
+
from pandas import Index
|
1284 |
+
|
1285 |
+
return Index(self._left, copy=False)
|
1286 |
+
|
1287 |
+
@property
|
1288 |
+
def right(self) -> Index:
|
1289 |
+
"""
|
1290 |
+
Return the right endpoints of each Interval in the IntervalArray as an Index.
|
1291 |
+
|
1292 |
+
Examples
|
1293 |
+
--------
|
1294 |
+
|
1295 |
+
>>> interv_arr = pd.arrays.IntervalArray([pd.Interval(0, 1), pd.Interval(2, 5)])
|
1296 |
+
>>> interv_arr
|
1297 |
+
<IntervalArray>
|
1298 |
+
[(0, 1], (2, 5]]
|
1299 |
+
Length: 2, dtype: interval[int64, right]
|
1300 |
+
>>> interv_arr.right
|
1301 |
+
Index([1, 5], dtype='int64')
|
1302 |
+
"""
|
1303 |
+
from pandas import Index
|
1304 |
+
|
1305 |
+
return Index(self._right, copy=False)
|
1306 |
+
|
1307 |
+
@property
|
1308 |
+
def length(self) -> Index:
|
1309 |
+
"""
|
1310 |
+
Return an Index with entries denoting the length of each Interval.
|
1311 |
+
|
1312 |
+
Examples
|
1313 |
+
--------
|
1314 |
+
|
1315 |
+
>>> interv_arr = pd.arrays.IntervalArray([pd.Interval(0, 1), pd.Interval(1, 5)])
|
1316 |
+
>>> interv_arr
|
1317 |
+
<IntervalArray>
|
1318 |
+
[(0, 1], (1, 5]]
|
1319 |
+
Length: 2, dtype: interval[int64, right]
|
1320 |
+
>>> interv_arr.length
|
1321 |
+
Index([1, 4], dtype='int64')
|
1322 |
+
"""
|
1323 |
+
return self.right - self.left
|
1324 |
+
|
1325 |
+
@property
|
1326 |
+
def mid(self) -> Index:
|
1327 |
+
"""
|
1328 |
+
Return the midpoint of each Interval in the IntervalArray as an Index.
|
1329 |
+
|
1330 |
+
Examples
|
1331 |
+
--------
|
1332 |
+
|
1333 |
+
>>> interv_arr = pd.arrays.IntervalArray([pd.Interval(0, 1), pd.Interval(1, 5)])
|
1334 |
+
>>> interv_arr
|
1335 |
+
<IntervalArray>
|
1336 |
+
[(0, 1], (1, 5]]
|
1337 |
+
Length: 2, dtype: interval[int64, right]
|
1338 |
+
>>> interv_arr.mid
|
1339 |
+
Index([0.5, 3.0], dtype='float64')
|
1340 |
+
"""
|
1341 |
+
try:
|
1342 |
+
return 0.5 * (self.left + self.right)
|
1343 |
+
except TypeError:
|
1344 |
+
# datetime safe version
|
1345 |
+
return self.left + 0.5 * self.length
|
1346 |
+
|
1347 |
+
_interval_shared_docs["overlaps"] = textwrap.dedent(
|
1348 |
+
"""
|
1349 |
+
Check elementwise if an Interval overlaps the values in the %(klass)s.
|
1350 |
+
|
1351 |
+
Two intervals overlap if they share a common point, including closed
|
1352 |
+
endpoints. Intervals that only have an open endpoint in common do not
|
1353 |
+
overlap.
|
1354 |
+
|
1355 |
+
Parameters
|
1356 |
+
----------
|
1357 |
+
other : %(klass)s
|
1358 |
+
Interval to check against for an overlap.
|
1359 |
+
|
1360 |
+
Returns
|
1361 |
+
-------
|
1362 |
+
ndarray
|
1363 |
+
Boolean array positionally indicating where an overlap occurs.
|
1364 |
+
|
1365 |
+
See Also
|
1366 |
+
--------
|
1367 |
+
Interval.overlaps : Check whether two Interval objects overlap.
|
1368 |
+
|
1369 |
+
Examples
|
1370 |
+
--------
|
1371 |
+
%(examples)s
|
1372 |
+
>>> intervals.overlaps(pd.Interval(0.5, 1.5))
|
1373 |
+
array([ True, True, False])
|
1374 |
+
|
1375 |
+
Intervals that share closed endpoints overlap:
|
1376 |
+
|
1377 |
+
>>> intervals.overlaps(pd.Interval(1, 3, closed='left'))
|
1378 |
+
array([ True, True, True])
|
1379 |
+
|
1380 |
+
Intervals that only have an open endpoint in common do not overlap:
|
1381 |
+
|
1382 |
+
>>> intervals.overlaps(pd.Interval(1, 2, closed='right'))
|
1383 |
+
array([False, True, False])
|
1384 |
+
"""
|
1385 |
+
)
|
1386 |
+
|
1387 |
+
@Appender(
|
1388 |
+
_interval_shared_docs["overlaps"]
|
1389 |
+
% {
|
1390 |
+
"klass": "IntervalArray",
|
1391 |
+
"examples": textwrap.dedent(
|
1392 |
+
"""\
|
1393 |
+
>>> data = [(0, 1), (1, 3), (2, 4)]
|
1394 |
+
>>> intervals = pd.arrays.IntervalArray.from_tuples(data)
|
1395 |
+
>>> intervals
|
1396 |
+
<IntervalArray>
|
1397 |
+
[(0, 1], (1, 3], (2, 4]]
|
1398 |
+
Length: 3, dtype: interval[int64, right]
|
1399 |
+
"""
|
1400 |
+
),
|
1401 |
+
}
|
1402 |
+
)
|
1403 |
+
def overlaps(self, other):
|
1404 |
+
if isinstance(other, (IntervalArray, ABCIntervalIndex)):
|
1405 |
+
raise NotImplementedError
|
1406 |
+
if not isinstance(other, Interval):
|
1407 |
+
msg = f"`other` must be Interval-like, got {type(other).__name__}"
|
1408 |
+
raise TypeError(msg)
|
1409 |
+
|
1410 |
+
# equality is okay if both endpoints are closed (overlap at a point)
|
1411 |
+
op1 = le if (self.closed_left and other.closed_right) else lt
|
1412 |
+
op2 = le if (other.closed_left and self.closed_right) else lt
|
1413 |
+
|
1414 |
+
# overlaps is equivalent negation of two interval being disjoint:
|
1415 |
+
# disjoint = (A.left > B.right) or (B.left > A.right)
|
1416 |
+
# (simplifying the negation allows this to be done in less operations)
|
1417 |
+
return op1(self.left, other.right) & op2(other.left, self.right)
|
1418 |
+
|
1419 |
+
# ---------------------------------------------------------------------
|
1420 |
+
|
1421 |
+
@property
|
1422 |
+
def closed(self) -> IntervalClosedType:
|
1423 |
+
"""
|
1424 |
+
String describing the inclusive side the intervals.
|
1425 |
+
|
1426 |
+
Either ``left``, ``right``, ``both`` or ``neither``.
|
1427 |
+
|
1428 |
+
Examples
|
1429 |
+
--------
|
1430 |
+
|
1431 |
+
For arrays:
|
1432 |
+
|
1433 |
+
>>> interv_arr = pd.arrays.IntervalArray([pd.Interval(0, 1), pd.Interval(1, 5)])
|
1434 |
+
>>> interv_arr
|
1435 |
+
<IntervalArray>
|
1436 |
+
[(0, 1], (1, 5]]
|
1437 |
+
Length: 2, dtype: interval[int64, right]
|
1438 |
+
>>> interv_arr.closed
|
1439 |
+
'right'
|
1440 |
+
|
1441 |
+
For Interval Index:
|
1442 |
+
|
1443 |
+
>>> interv_idx = pd.interval_range(start=0, end=2)
|
1444 |
+
>>> interv_idx
|
1445 |
+
IntervalIndex([(0, 1], (1, 2]], dtype='interval[int64, right]')
|
1446 |
+
>>> interv_idx.closed
|
1447 |
+
'right'
|
1448 |
+
"""
|
1449 |
+
return self.dtype.closed
|
1450 |
+
|
1451 |
+
_interval_shared_docs["set_closed"] = textwrap.dedent(
|
1452 |
+
"""
|
1453 |
+
Return an identical %(klass)s closed on the specified side.
|
1454 |
+
|
1455 |
+
Parameters
|
1456 |
+
----------
|
1457 |
+
closed : {'left', 'right', 'both', 'neither'}
|
1458 |
+
Whether the intervals are closed on the left-side, right-side, both
|
1459 |
+
or neither.
|
1460 |
+
|
1461 |
+
Returns
|
1462 |
+
-------
|
1463 |
+
%(klass)s
|
1464 |
+
|
1465 |
+
%(examples)s\
|
1466 |
+
"""
|
1467 |
+
)
|
1468 |
+
|
1469 |
+
@Appender(
|
1470 |
+
_interval_shared_docs["set_closed"]
|
1471 |
+
% {
|
1472 |
+
"klass": "IntervalArray",
|
1473 |
+
"examples": textwrap.dedent(
|
1474 |
+
"""\
|
1475 |
+
Examples
|
1476 |
+
--------
|
1477 |
+
>>> index = pd.arrays.IntervalArray.from_breaks(range(4))
|
1478 |
+
>>> index
|
1479 |
+
<IntervalArray>
|
1480 |
+
[(0, 1], (1, 2], (2, 3]]
|
1481 |
+
Length: 3, dtype: interval[int64, right]
|
1482 |
+
>>> index.set_closed('both')
|
1483 |
+
<IntervalArray>
|
1484 |
+
[[0, 1], [1, 2], [2, 3]]
|
1485 |
+
Length: 3, dtype: interval[int64, both]
|
1486 |
+
"""
|
1487 |
+
),
|
1488 |
+
}
|
1489 |
+
)
|
1490 |
+
def set_closed(self, closed: IntervalClosedType) -> Self:
|
1491 |
+
if closed not in VALID_CLOSED:
|
1492 |
+
msg = f"invalid option for 'closed': {closed}"
|
1493 |
+
raise ValueError(msg)
|
1494 |
+
|
1495 |
+
left, right = self._left, self._right
|
1496 |
+
dtype = IntervalDtype(left.dtype, closed=closed)
|
1497 |
+
return self._simple_new(left, right, dtype=dtype)
|
1498 |
+
|
1499 |
+
_interval_shared_docs[
|
1500 |
+
"is_non_overlapping_monotonic"
|
1501 |
+
] = """
|
1502 |
+
Return a boolean whether the %(klass)s is non-overlapping and monotonic.
|
1503 |
+
|
1504 |
+
Non-overlapping means (no Intervals share points), and monotonic means
|
1505 |
+
either monotonic increasing or monotonic decreasing.
|
1506 |
+
|
1507 |
+
Examples
|
1508 |
+
--------
|
1509 |
+
For arrays:
|
1510 |
+
|
1511 |
+
>>> interv_arr = pd.arrays.IntervalArray([pd.Interval(0, 1), pd.Interval(1, 5)])
|
1512 |
+
>>> interv_arr
|
1513 |
+
<IntervalArray>
|
1514 |
+
[(0, 1], (1, 5]]
|
1515 |
+
Length: 2, dtype: interval[int64, right]
|
1516 |
+
>>> interv_arr.is_non_overlapping_monotonic
|
1517 |
+
True
|
1518 |
+
|
1519 |
+
>>> interv_arr = pd.arrays.IntervalArray([pd.Interval(0, 1),
|
1520 |
+
... pd.Interval(-1, 0.1)])
|
1521 |
+
>>> interv_arr
|
1522 |
+
<IntervalArray>
|
1523 |
+
[(0.0, 1.0], (-1.0, 0.1]]
|
1524 |
+
Length: 2, dtype: interval[float64, right]
|
1525 |
+
>>> interv_arr.is_non_overlapping_monotonic
|
1526 |
+
False
|
1527 |
+
|
1528 |
+
For Interval Index:
|
1529 |
+
|
1530 |
+
>>> interv_idx = pd.interval_range(start=0, end=2)
|
1531 |
+
>>> interv_idx
|
1532 |
+
IntervalIndex([(0, 1], (1, 2]], dtype='interval[int64, right]')
|
1533 |
+
>>> interv_idx.is_non_overlapping_monotonic
|
1534 |
+
True
|
1535 |
+
|
1536 |
+
>>> interv_idx = pd.interval_range(start=0, end=2, closed='both')
|
1537 |
+
>>> interv_idx
|
1538 |
+
IntervalIndex([[0, 1], [1, 2]], dtype='interval[int64, both]')
|
1539 |
+
>>> interv_idx.is_non_overlapping_monotonic
|
1540 |
+
False
|
1541 |
+
"""
|
1542 |
+
|
1543 |
+
@property
|
1544 |
+
@Appender(
|
1545 |
+
_interval_shared_docs["is_non_overlapping_monotonic"] % _shared_docs_kwargs
|
1546 |
+
)
|
1547 |
+
def is_non_overlapping_monotonic(self) -> bool:
|
1548 |
+
# must be increasing (e.g., [0, 1), [1, 2), [2, 3), ... )
|
1549 |
+
# or decreasing (e.g., [-1, 0), [-2, -1), [-3, -2), ...)
|
1550 |
+
# we already require left <= right
|
1551 |
+
|
1552 |
+
# strict inequality for closed == 'both'; equality implies overlapping
|
1553 |
+
# at a point when both sides of intervals are included
|
1554 |
+
if self.closed == "both":
|
1555 |
+
return bool(
|
1556 |
+
(self._right[:-1] < self._left[1:]).all()
|
1557 |
+
or (self._left[:-1] > self._right[1:]).all()
|
1558 |
+
)
|
1559 |
+
|
1560 |
+
# non-strict inequality when closed != 'both'; at least one side is
|
1561 |
+
# not included in the intervals, so equality does not imply overlapping
|
1562 |
+
return bool(
|
1563 |
+
(self._right[:-1] <= self._left[1:]).all()
|
1564 |
+
or (self._left[:-1] >= self._right[1:]).all()
|
1565 |
+
)
|
1566 |
+
|
1567 |
+
# ---------------------------------------------------------------------
|
1568 |
+
# Conversion
|
1569 |
+
|
1570 |
+
def __array__(
|
1571 |
+
self, dtype: NpDtype | None = None, copy: bool | None = None
|
1572 |
+
) -> np.ndarray:
|
1573 |
+
"""
|
1574 |
+
Return the IntervalArray's data as a numpy array of Interval
|
1575 |
+
objects (with dtype='object')
|
1576 |
+
"""
|
1577 |
+
left = self._left
|
1578 |
+
right = self._right
|
1579 |
+
mask = self.isna()
|
1580 |
+
closed = self.closed
|
1581 |
+
|
1582 |
+
result = np.empty(len(left), dtype=object)
|
1583 |
+
for i, left_value in enumerate(left):
|
1584 |
+
if mask[i]:
|
1585 |
+
result[i] = np.nan
|
1586 |
+
else:
|
1587 |
+
result[i] = Interval(left_value, right[i], closed)
|
1588 |
+
return result
|
1589 |
+
|
1590 |
+
def __arrow_array__(self, type=None):
|
1591 |
+
"""
|
1592 |
+
Convert myself into a pyarrow Array.
|
1593 |
+
"""
|
1594 |
+
import pyarrow
|
1595 |
+
|
1596 |
+
from pandas.core.arrays.arrow.extension_types import ArrowIntervalType
|
1597 |
+
|
1598 |
+
try:
|
1599 |
+
subtype = pyarrow.from_numpy_dtype(self.dtype.subtype)
|
1600 |
+
except TypeError as err:
|
1601 |
+
raise TypeError(
|
1602 |
+
f"Conversion to arrow with subtype '{self.dtype.subtype}' "
|
1603 |
+
"is not supported"
|
1604 |
+
) from err
|
1605 |
+
interval_type = ArrowIntervalType(subtype, self.closed)
|
1606 |
+
storage_array = pyarrow.StructArray.from_arrays(
|
1607 |
+
[
|
1608 |
+
pyarrow.array(self._left, type=subtype, from_pandas=True),
|
1609 |
+
pyarrow.array(self._right, type=subtype, from_pandas=True),
|
1610 |
+
],
|
1611 |
+
names=["left", "right"],
|
1612 |
+
)
|
1613 |
+
mask = self.isna()
|
1614 |
+
if mask.any():
|
1615 |
+
# if there are missing values, set validity bitmap also on the array level
|
1616 |
+
null_bitmap = pyarrow.array(~mask).buffers()[1]
|
1617 |
+
storage_array = pyarrow.StructArray.from_buffers(
|
1618 |
+
storage_array.type,
|
1619 |
+
len(storage_array),
|
1620 |
+
[null_bitmap],
|
1621 |
+
children=[storage_array.field(0), storage_array.field(1)],
|
1622 |
+
)
|
1623 |
+
|
1624 |
+
if type is not None:
|
1625 |
+
if type.equals(interval_type.storage_type):
|
1626 |
+
return storage_array
|
1627 |
+
elif isinstance(type, ArrowIntervalType):
|
1628 |
+
# ensure we have the same subtype and closed attributes
|
1629 |
+
if not type.equals(interval_type):
|
1630 |
+
raise TypeError(
|
1631 |
+
"Not supported to convert IntervalArray to type with "
|
1632 |
+
f"different 'subtype' ({self.dtype.subtype} vs {type.subtype}) "
|
1633 |
+
f"and 'closed' ({self.closed} vs {type.closed}) attributes"
|
1634 |
+
)
|
1635 |
+
else:
|
1636 |
+
raise TypeError(
|
1637 |
+
f"Not supported to convert IntervalArray to '{type}' type"
|
1638 |
+
)
|
1639 |
+
|
1640 |
+
return pyarrow.ExtensionArray.from_storage(interval_type, storage_array)
|
1641 |
+
|
1642 |
+
_interval_shared_docs["to_tuples"] = textwrap.dedent(
|
1643 |
+
"""
|
1644 |
+
Return an %(return_type)s of tuples of the form (left, right).
|
1645 |
+
|
1646 |
+
Parameters
|
1647 |
+
----------
|
1648 |
+
na_tuple : bool, default True
|
1649 |
+
If ``True``, return ``NA`` as a tuple ``(nan, nan)``. If ``False``,
|
1650 |
+
just return ``NA`` as ``nan``.
|
1651 |
+
|
1652 |
+
Returns
|
1653 |
+
-------
|
1654 |
+
tuples: %(return_type)s
|
1655 |
+
%(examples)s\
|
1656 |
+
"""
|
1657 |
+
)
|
1658 |
+
|
1659 |
+
@Appender(
|
1660 |
+
_interval_shared_docs["to_tuples"]
|
1661 |
+
% {
|
1662 |
+
"return_type": (
|
1663 |
+
"ndarray (if self is IntervalArray) or Index (if self is IntervalIndex)"
|
1664 |
+
),
|
1665 |
+
"examples": textwrap.dedent(
|
1666 |
+
"""\
|
1667 |
+
|
1668 |
+
Examples
|
1669 |
+
--------
|
1670 |
+
For :class:`pandas.IntervalArray`:
|
1671 |
+
|
1672 |
+
>>> idx = pd.arrays.IntervalArray.from_tuples([(0, 1), (1, 2)])
|
1673 |
+
>>> idx
|
1674 |
+
<IntervalArray>
|
1675 |
+
[(0, 1], (1, 2]]
|
1676 |
+
Length: 2, dtype: interval[int64, right]
|
1677 |
+
>>> idx.to_tuples()
|
1678 |
+
array([(0, 1), (1, 2)], dtype=object)
|
1679 |
+
|
1680 |
+
For :class:`pandas.IntervalIndex`:
|
1681 |
+
|
1682 |
+
>>> idx = pd.interval_range(start=0, end=2)
|
1683 |
+
>>> idx
|
1684 |
+
IntervalIndex([(0, 1], (1, 2]], dtype='interval[int64, right]')
|
1685 |
+
>>> idx.to_tuples()
|
1686 |
+
Index([(0, 1), (1, 2)], dtype='object')
|
1687 |
+
"""
|
1688 |
+
),
|
1689 |
+
}
|
1690 |
+
)
|
1691 |
+
def to_tuples(self, na_tuple: bool = True) -> np.ndarray:
|
1692 |
+
tuples = com.asarray_tuplesafe(zip(self._left, self._right))
|
1693 |
+
if not na_tuple:
|
1694 |
+
# GH 18756
|
1695 |
+
tuples = np.where(~self.isna(), tuples, np.nan)
|
1696 |
+
return tuples
|
1697 |
+
|
1698 |
+
# ---------------------------------------------------------------------
|
1699 |
+
|
1700 |
+
def _putmask(self, mask: npt.NDArray[np.bool_], value) -> None:
|
1701 |
+
value_left, value_right = self._validate_setitem_value(value)
|
1702 |
+
|
1703 |
+
if isinstance(self._left, np.ndarray):
|
1704 |
+
np.putmask(self._left, mask, value_left)
|
1705 |
+
assert isinstance(self._right, np.ndarray)
|
1706 |
+
np.putmask(self._right, mask, value_right)
|
1707 |
+
else:
|
1708 |
+
self._left._putmask(mask, value_left)
|
1709 |
+
assert not isinstance(self._right, np.ndarray)
|
1710 |
+
self._right._putmask(mask, value_right)
|
1711 |
+
|
1712 |
+
def insert(self, loc: int, item: Interval) -> Self:
|
1713 |
+
"""
|
1714 |
+
Return a new IntervalArray inserting new item at location. Follows
|
1715 |
+
Python numpy.insert semantics for negative values. Only Interval
|
1716 |
+
objects and NA can be inserted into an IntervalIndex
|
1717 |
+
|
1718 |
+
Parameters
|
1719 |
+
----------
|
1720 |
+
loc : int
|
1721 |
+
item : Interval
|
1722 |
+
|
1723 |
+
Returns
|
1724 |
+
-------
|
1725 |
+
IntervalArray
|
1726 |
+
"""
|
1727 |
+
left_insert, right_insert = self._validate_scalar(item)
|
1728 |
+
|
1729 |
+
new_left = self.left.insert(loc, left_insert)
|
1730 |
+
new_right = self.right.insert(loc, right_insert)
|
1731 |
+
|
1732 |
+
return self._shallow_copy(new_left, new_right)
|
1733 |
+
|
1734 |
+
def delete(self, loc) -> Self:
|
1735 |
+
if isinstance(self._left, np.ndarray):
|
1736 |
+
new_left = np.delete(self._left, loc)
|
1737 |
+
assert isinstance(self._right, np.ndarray)
|
1738 |
+
new_right = np.delete(self._right, loc)
|
1739 |
+
else:
|
1740 |
+
new_left = self._left.delete(loc)
|
1741 |
+
assert not isinstance(self._right, np.ndarray)
|
1742 |
+
new_right = self._right.delete(loc)
|
1743 |
+
return self._shallow_copy(left=new_left, right=new_right)
|
1744 |
+
|
1745 |
+
@Appender(_extension_array_shared_docs["repeat"] % _shared_docs_kwargs)
|
1746 |
+
def repeat(
|
1747 |
+
self,
|
1748 |
+
repeats: int | Sequence[int],
|
1749 |
+
axis: AxisInt | None = None,
|
1750 |
+
) -> Self:
|
1751 |
+
nv.validate_repeat((), {"axis": axis})
|
1752 |
+
left_repeat = self.left.repeat(repeats)
|
1753 |
+
right_repeat = self.right.repeat(repeats)
|
1754 |
+
return self._shallow_copy(left=left_repeat, right=right_repeat)
|
1755 |
+
|
1756 |
+
_interval_shared_docs["contains"] = textwrap.dedent(
|
1757 |
+
"""
|
1758 |
+
Check elementwise if the Intervals contain the value.
|
1759 |
+
|
1760 |
+
Return a boolean mask whether the value is contained in the Intervals
|
1761 |
+
of the %(klass)s.
|
1762 |
+
|
1763 |
+
Parameters
|
1764 |
+
----------
|
1765 |
+
other : scalar
|
1766 |
+
The value to check whether it is contained in the Intervals.
|
1767 |
+
|
1768 |
+
Returns
|
1769 |
+
-------
|
1770 |
+
boolean array
|
1771 |
+
|
1772 |
+
See Also
|
1773 |
+
--------
|
1774 |
+
Interval.contains : Check whether Interval object contains value.
|
1775 |
+
%(klass)s.overlaps : Check if an Interval overlaps the values in the
|
1776 |
+
%(klass)s.
|
1777 |
+
|
1778 |
+
Examples
|
1779 |
+
--------
|
1780 |
+
%(examples)s
|
1781 |
+
>>> intervals.contains(0.5)
|
1782 |
+
array([ True, False, False])
|
1783 |
+
"""
|
1784 |
+
)
|
1785 |
+
|
1786 |
+
@Appender(
|
1787 |
+
_interval_shared_docs["contains"]
|
1788 |
+
% {
|
1789 |
+
"klass": "IntervalArray",
|
1790 |
+
"examples": textwrap.dedent(
|
1791 |
+
"""\
|
1792 |
+
>>> intervals = pd.arrays.IntervalArray.from_tuples([(0, 1), (1, 3), (2, 4)])
|
1793 |
+
>>> intervals
|
1794 |
+
<IntervalArray>
|
1795 |
+
[(0, 1], (1, 3], (2, 4]]
|
1796 |
+
Length: 3, dtype: interval[int64, right]
|
1797 |
+
"""
|
1798 |
+
),
|
1799 |
+
}
|
1800 |
+
)
|
1801 |
+
def contains(self, other):
|
1802 |
+
if isinstance(other, Interval):
|
1803 |
+
raise NotImplementedError("contains not implemented for two intervals")
|
1804 |
+
|
1805 |
+
return (self._left < other if self.open_left else self._left <= other) & (
|
1806 |
+
other < self._right if self.open_right else other <= self._right
|
1807 |
+
)
|
1808 |
+
|
1809 |
+
def isin(self, values: ArrayLike) -> npt.NDArray[np.bool_]:
|
1810 |
+
if isinstance(values, IntervalArray):
|
1811 |
+
if self.closed != values.closed:
|
1812 |
+
# not comparable -> no overlap
|
1813 |
+
return np.zeros(self.shape, dtype=bool)
|
1814 |
+
|
1815 |
+
if self.dtype == values.dtype:
|
1816 |
+
# GH#38353 instead of casting to object, operating on a
|
1817 |
+
# complex128 ndarray is much more performant.
|
1818 |
+
left = self._combined.view("complex128")
|
1819 |
+
right = values._combined.view("complex128")
|
1820 |
+
# error: Argument 1 to "isin" has incompatible type
|
1821 |
+
# "Union[ExtensionArray, ndarray[Any, Any],
|
1822 |
+
# ndarray[Any, dtype[Any]]]"; expected
|
1823 |
+
# "Union[_SupportsArray[dtype[Any]],
|
1824 |
+
# _NestedSequence[_SupportsArray[dtype[Any]]], bool,
|
1825 |
+
# int, float, complex, str, bytes, _NestedSequence[
|
1826 |
+
# Union[bool, int, float, complex, str, bytes]]]"
|
1827 |
+
return np.isin(left, right).ravel() # type: ignore[arg-type]
|
1828 |
+
|
1829 |
+
elif needs_i8_conversion(self.left.dtype) ^ needs_i8_conversion(
|
1830 |
+
values.left.dtype
|
1831 |
+
):
|
1832 |
+
# not comparable -> no overlap
|
1833 |
+
return np.zeros(self.shape, dtype=bool)
|
1834 |
+
|
1835 |
+
return isin(self.astype(object), values.astype(object))
|
1836 |
+
|
1837 |
+
@property
|
1838 |
+
def _combined(self) -> IntervalSide:
|
1839 |
+
# error: Item "ExtensionArray" of "ExtensionArray | ndarray[Any, Any]"
|
1840 |
+
# has no attribute "reshape" [union-attr]
|
1841 |
+
left = self.left._values.reshape(-1, 1) # type: ignore[union-attr]
|
1842 |
+
right = self.right._values.reshape(-1, 1) # type: ignore[union-attr]
|
1843 |
+
if needs_i8_conversion(left.dtype):
|
1844 |
+
# error: Item "ndarray[Any, Any]" of "Any | ndarray[Any, Any]" has
|
1845 |
+
# no attribute "_concat_same_type"
|
1846 |
+
comb = left._concat_same_type( # type: ignore[union-attr]
|
1847 |
+
[left, right], axis=1
|
1848 |
+
)
|
1849 |
+
else:
|
1850 |
+
comb = np.concatenate([left, right], axis=1)
|
1851 |
+
return comb
|
1852 |
+
|
1853 |
+
def _from_combined(self, combined: np.ndarray) -> IntervalArray:
|
1854 |
+
"""
|
1855 |
+
Create a new IntervalArray with our dtype from a 1D complex128 ndarray.
|
1856 |
+
"""
|
1857 |
+
nc = combined.view("i8").reshape(-1, 2)
|
1858 |
+
|
1859 |
+
dtype = self._left.dtype
|
1860 |
+
if needs_i8_conversion(dtype):
|
1861 |
+
assert isinstance(self._left, (DatetimeArray, TimedeltaArray))
|
1862 |
+
new_left = type(self._left)._from_sequence(nc[:, 0], dtype=dtype)
|
1863 |
+
assert isinstance(self._right, (DatetimeArray, TimedeltaArray))
|
1864 |
+
new_right = type(self._right)._from_sequence(nc[:, 1], dtype=dtype)
|
1865 |
+
else:
|
1866 |
+
assert isinstance(dtype, np.dtype)
|
1867 |
+
new_left = nc[:, 0].view(dtype)
|
1868 |
+
new_right = nc[:, 1].view(dtype)
|
1869 |
+
return self._shallow_copy(left=new_left, right=new_right)
|
1870 |
+
|
1871 |
+
def unique(self) -> IntervalArray:
|
1872 |
+
# No overload variant of "__getitem__" of "ExtensionArray" matches argument
|
1873 |
+
# type "Tuple[slice, int]"
|
1874 |
+
nc = unique(
|
1875 |
+
self._combined.view("complex128")[:, 0] # type: ignore[call-overload]
|
1876 |
+
)
|
1877 |
+
nc = nc[:, None]
|
1878 |
+
return self._from_combined(nc)
|
1879 |
+
|
1880 |
+
|
1881 |
+
def _maybe_convert_platform_interval(values) -> ArrayLike:
|
1882 |
+
"""
|
1883 |
+
Try to do platform conversion, with special casing for IntervalArray.
|
1884 |
+
Wrapper around maybe_convert_platform that alters the default return
|
1885 |
+
dtype in certain cases to be compatible with IntervalArray. For example,
|
1886 |
+
empty lists return with integer dtype instead of object dtype, which is
|
1887 |
+
prohibited for IntervalArray.
|
1888 |
+
|
1889 |
+
Parameters
|
1890 |
+
----------
|
1891 |
+
values : array-like
|
1892 |
+
|
1893 |
+
Returns
|
1894 |
+
-------
|
1895 |
+
array
|
1896 |
+
"""
|
1897 |
+
if isinstance(values, (list, tuple)) and len(values) == 0:
|
1898 |
+
# GH 19016
|
1899 |
+
# empty lists/tuples get object dtype by default, but this is
|
1900 |
+
# prohibited for IntervalArray, so coerce to integer instead
|
1901 |
+
return np.array([], dtype=np.int64)
|
1902 |
+
elif not is_list_like(values) or isinstance(values, ABCDataFrame):
|
1903 |
+
# This will raise later, but we avoid passing to maybe_convert_platform
|
1904 |
+
return values
|
1905 |
+
elif isinstance(getattr(values, "dtype", None), CategoricalDtype):
|
1906 |
+
values = np.asarray(values)
|
1907 |
+
elif not hasattr(values, "dtype") and not isinstance(values, (list, tuple, range)):
|
1908 |
+
# TODO: should we just cast these to list?
|
1909 |
+
return values
|
1910 |
+
else:
|
1911 |
+
values = extract_array(values, extract_numpy=True)
|
1912 |
+
|
1913 |
+
if not hasattr(values, "dtype"):
|
1914 |
+
values = np.asarray(values)
|
1915 |
+
if values.dtype.kind in "iu" and values.dtype != np.int64:
|
1916 |
+
values = values.astype(np.int64)
|
1917 |
+
return values
|
env-llmeval/lib/python3.10/site-packages/pandas/core/arrays/masked.py
ADDED
@@ -0,0 +1,1650 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from __future__ import annotations
|
2 |
+
|
3 |
+
from typing import (
|
4 |
+
TYPE_CHECKING,
|
5 |
+
Any,
|
6 |
+
Callable,
|
7 |
+
Literal,
|
8 |
+
overload,
|
9 |
+
)
|
10 |
+
import warnings
|
11 |
+
|
12 |
+
import numpy as np
|
13 |
+
|
14 |
+
from pandas._libs import (
|
15 |
+
lib,
|
16 |
+
missing as libmissing,
|
17 |
+
)
|
18 |
+
from pandas._libs.tslibs import is_supported_dtype
|
19 |
+
from pandas._typing import (
|
20 |
+
ArrayLike,
|
21 |
+
AstypeArg,
|
22 |
+
AxisInt,
|
23 |
+
DtypeObj,
|
24 |
+
FillnaOptions,
|
25 |
+
InterpolateOptions,
|
26 |
+
NpDtype,
|
27 |
+
PositionalIndexer,
|
28 |
+
Scalar,
|
29 |
+
ScalarIndexer,
|
30 |
+
Self,
|
31 |
+
SequenceIndexer,
|
32 |
+
Shape,
|
33 |
+
npt,
|
34 |
+
)
|
35 |
+
from pandas.compat import (
|
36 |
+
IS64,
|
37 |
+
is_platform_windows,
|
38 |
+
)
|
39 |
+
from pandas.errors import AbstractMethodError
|
40 |
+
from pandas.util._decorators import doc
|
41 |
+
from pandas.util._validators import validate_fillna_kwargs
|
42 |
+
|
43 |
+
from pandas.core.dtypes.base import ExtensionDtype
|
44 |
+
from pandas.core.dtypes.common import (
|
45 |
+
is_bool,
|
46 |
+
is_integer_dtype,
|
47 |
+
is_list_like,
|
48 |
+
is_scalar,
|
49 |
+
is_string_dtype,
|
50 |
+
pandas_dtype,
|
51 |
+
)
|
52 |
+
from pandas.core.dtypes.dtypes import BaseMaskedDtype
|
53 |
+
from pandas.core.dtypes.missing import (
|
54 |
+
array_equivalent,
|
55 |
+
is_valid_na_for_dtype,
|
56 |
+
isna,
|
57 |
+
notna,
|
58 |
+
)
|
59 |
+
|
60 |
+
from pandas.core import (
|
61 |
+
algorithms as algos,
|
62 |
+
arraylike,
|
63 |
+
missing,
|
64 |
+
nanops,
|
65 |
+
ops,
|
66 |
+
)
|
67 |
+
from pandas.core.algorithms import (
|
68 |
+
factorize_array,
|
69 |
+
isin,
|
70 |
+
map_array,
|
71 |
+
mode,
|
72 |
+
take,
|
73 |
+
)
|
74 |
+
from pandas.core.array_algos import (
|
75 |
+
masked_accumulations,
|
76 |
+
masked_reductions,
|
77 |
+
)
|
78 |
+
from pandas.core.array_algos.quantile import quantile_with_mask
|
79 |
+
from pandas.core.arraylike import OpsMixin
|
80 |
+
from pandas.core.arrays._utils import to_numpy_dtype_inference
|
81 |
+
from pandas.core.arrays.base import ExtensionArray
|
82 |
+
from pandas.core.construction import (
|
83 |
+
array as pd_array,
|
84 |
+
ensure_wrapped_if_datetimelike,
|
85 |
+
extract_array,
|
86 |
+
)
|
87 |
+
from pandas.core.indexers import check_array_indexer
|
88 |
+
from pandas.core.ops import invalid_comparison
|
89 |
+
from pandas.core.util.hashing import hash_array
|
90 |
+
|
91 |
+
if TYPE_CHECKING:
|
92 |
+
from collections.abc import (
|
93 |
+
Iterator,
|
94 |
+
Sequence,
|
95 |
+
)
|
96 |
+
from pandas import Series
|
97 |
+
from pandas.core.arrays import BooleanArray
|
98 |
+
from pandas._typing import (
|
99 |
+
NumpySorter,
|
100 |
+
NumpyValueArrayLike,
|
101 |
+
)
|
102 |
+
from pandas.core.arrays import FloatingArray
|
103 |
+
|
104 |
+
from pandas.compat.numpy import function as nv
|
105 |
+
|
106 |
+
|
107 |
+
class BaseMaskedArray(OpsMixin, ExtensionArray):
|
108 |
+
"""
|
109 |
+
Base class for masked arrays (which use _data and _mask to store the data).
|
110 |
+
|
111 |
+
numpy based
|
112 |
+
"""
|
113 |
+
|
114 |
+
# The value used to fill '_data' to avoid upcasting
|
115 |
+
_internal_fill_value: Scalar
|
116 |
+
# our underlying data and mask are each ndarrays
|
117 |
+
_data: np.ndarray
|
118 |
+
_mask: npt.NDArray[np.bool_]
|
119 |
+
|
120 |
+
# Fill values used for any/all
|
121 |
+
_truthy_value = Scalar # bool(_truthy_value) = True
|
122 |
+
_falsey_value = Scalar # bool(_falsey_value) = False
|
123 |
+
|
124 |
+
@classmethod
|
125 |
+
def _simple_new(cls, values: np.ndarray, mask: npt.NDArray[np.bool_]) -> Self:
|
126 |
+
result = BaseMaskedArray.__new__(cls)
|
127 |
+
result._data = values
|
128 |
+
result._mask = mask
|
129 |
+
return result
|
130 |
+
|
131 |
+
def __init__(
|
132 |
+
self, values: np.ndarray, mask: npt.NDArray[np.bool_], copy: bool = False
|
133 |
+
) -> None:
|
134 |
+
# values is supposed to already be validated in the subclass
|
135 |
+
if not (isinstance(mask, np.ndarray) and mask.dtype == np.bool_):
|
136 |
+
raise TypeError(
|
137 |
+
"mask should be boolean numpy array. Use "
|
138 |
+
"the 'pd.array' function instead"
|
139 |
+
)
|
140 |
+
if values.shape != mask.shape:
|
141 |
+
raise ValueError("values.shape must match mask.shape")
|
142 |
+
|
143 |
+
if copy:
|
144 |
+
values = values.copy()
|
145 |
+
mask = mask.copy()
|
146 |
+
|
147 |
+
self._data = values
|
148 |
+
self._mask = mask
|
149 |
+
|
150 |
+
@classmethod
|
151 |
+
def _from_sequence(cls, scalars, *, dtype=None, copy: bool = False) -> Self:
|
152 |
+
values, mask = cls._coerce_to_array(scalars, dtype=dtype, copy=copy)
|
153 |
+
return cls(values, mask)
|
154 |
+
|
155 |
+
@classmethod
|
156 |
+
@doc(ExtensionArray._empty)
|
157 |
+
def _empty(cls, shape: Shape, dtype: ExtensionDtype):
|
158 |
+
values = np.empty(shape, dtype=dtype.type)
|
159 |
+
values.fill(cls._internal_fill_value)
|
160 |
+
mask = np.ones(shape, dtype=bool)
|
161 |
+
result = cls(values, mask)
|
162 |
+
if not isinstance(result, cls) or dtype != result.dtype:
|
163 |
+
raise NotImplementedError(
|
164 |
+
f"Default 'empty' implementation is invalid for dtype='{dtype}'"
|
165 |
+
)
|
166 |
+
return result
|
167 |
+
|
168 |
+
def _formatter(self, boxed: bool = False) -> Callable[[Any], str | None]:
|
169 |
+
# NEP 51: https://github.com/numpy/numpy/pull/22449
|
170 |
+
return str
|
171 |
+
|
172 |
+
@property
|
173 |
+
def dtype(self) -> BaseMaskedDtype:
|
174 |
+
raise AbstractMethodError(self)
|
175 |
+
|
176 |
+
@overload
|
177 |
+
def __getitem__(self, item: ScalarIndexer) -> Any:
|
178 |
+
...
|
179 |
+
|
180 |
+
@overload
|
181 |
+
def __getitem__(self, item: SequenceIndexer) -> Self:
|
182 |
+
...
|
183 |
+
|
184 |
+
def __getitem__(self, item: PositionalIndexer) -> Self | Any:
|
185 |
+
item = check_array_indexer(self, item)
|
186 |
+
|
187 |
+
newmask = self._mask[item]
|
188 |
+
if is_bool(newmask):
|
189 |
+
# This is a scalar indexing
|
190 |
+
if newmask:
|
191 |
+
return self.dtype.na_value
|
192 |
+
return self._data[item]
|
193 |
+
|
194 |
+
return self._simple_new(self._data[item], newmask)
|
195 |
+
|
196 |
+
def _pad_or_backfill(
|
197 |
+
self,
|
198 |
+
*,
|
199 |
+
method: FillnaOptions,
|
200 |
+
limit: int | None = None,
|
201 |
+
limit_area: Literal["inside", "outside"] | None = None,
|
202 |
+
copy: bool = True,
|
203 |
+
) -> Self:
|
204 |
+
mask = self._mask
|
205 |
+
|
206 |
+
if mask.any():
|
207 |
+
func = missing.get_fill_func(method, ndim=self.ndim)
|
208 |
+
|
209 |
+
npvalues = self._data.T
|
210 |
+
new_mask = mask.T
|
211 |
+
if copy:
|
212 |
+
npvalues = npvalues.copy()
|
213 |
+
new_mask = new_mask.copy()
|
214 |
+
elif limit_area is not None:
|
215 |
+
mask = mask.copy()
|
216 |
+
func(npvalues, limit=limit, mask=new_mask)
|
217 |
+
|
218 |
+
if limit_area is not None and not mask.all():
|
219 |
+
mask = mask.T
|
220 |
+
neg_mask = ~mask
|
221 |
+
first = neg_mask.argmax()
|
222 |
+
last = len(neg_mask) - neg_mask[::-1].argmax() - 1
|
223 |
+
if limit_area == "inside":
|
224 |
+
new_mask[:first] |= mask[:first]
|
225 |
+
new_mask[last + 1 :] |= mask[last + 1 :]
|
226 |
+
elif limit_area == "outside":
|
227 |
+
new_mask[first + 1 : last] |= mask[first + 1 : last]
|
228 |
+
|
229 |
+
if copy:
|
230 |
+
return self._simple_new(npvalues.T, new_mask.T)
|
231 |
+
else:
|
232 |
+
return self
|
233 |
+
else:
|
234 |
+
if copy:
|
235 |
+
new_values = self.copy()
|
236 |
+
else:
|
237 |
+
new_values = self
|
238 |
+
return new_values
|
239 |
+
|
240 |
+
@doc(ExtensionArray.fillna)
|
241 |
+
def fillna(
|
242 |
+
self, value=None, method=None, limit: int | None = None, copy: bool = True
|
243 |
+
) -> Self:
|
244 |
+
value, method = validate_fillna_kwargs(value, method)
|
245 |
+
|
246 |
+
mask = self._mask
|
247 |
+
|
248 |
+
value = missing.check_value_size(value, mask, len(self))
|
249 |
+
|
250 |
+
if mask.any():
|
251 |
+
if method is not None:
|
252 |
+
func = missing.get_fill_func(method, ndim=self.ndim)
|
253 |
+
npvalues = self._data.T
|
254 |
+
new_mask = mask.T
|
255 |
+
if copy:
|
256 |
+
npvalues = npvalues.copy()
|
257 |
+
new_mask = new_mask.copy()
|
258 |
+
func(npvalues, limit=limit, mask=new_mask)
|
259 |
+
return self._simple_new(npvalues.T, new_mask.T)
|
260 |
+
else:
|
261 |
+
# fill with value
|
262 |
+
if copy:
|
263 |
+
new_values = self.copy()
|
264 |
+
else:
|
265 |
+
new_values = self[:]
|
266 |
+
new_values[mask] = value
|
267 |
+
else:
|
268 |
+
if copy:
|
269 |
+
new_values = self.copy()
|
270 |
+
else:
|
271 |
+
new_values = self[:]
|
272 |
+
return new_values
|
273 |
+
|
274 |
+
@classmethod
|
275 |
+
def _coerce_to_array(
|
276 |
+
cls, values, *, dtype: DtypeObj, copy: bool = False
|
277 |
+
) -> tuple[np.ndarray, np.ndarray]:
|
278 |
+
raise AbstractMethodError(cls)
|
279 |
+
|
280 |
+
def _validate_setitem_value(self, value):
|
281 |
+
"""
|
282 |
+
Check if we have a scalar that we can cast losslessly.
|
283 |
+
|
284 |
+
Raises
|
285 |
+
------
|
286 |
+
TypeError
|
287 |
+
"""
|
288 |
+
kind = self.dtype.kind
|
289 |
+
# TODO: get this all from np_can_hold_element?
|
290 |
+
if kind == "b":
|
291 |
+
if lib.is_bool(value):
|
292 |
+
return value
|
293 |
+
|
294 |
+
elif kind == "f":
|
295 |
+
if lib.is_integer(value) or lib.is_float(value):
|
296 |
+
return value
|
297 |
+
|
298 |
+
else:
|
299 |
+
if lib.is_integer(value) or (lib.is_float(value) and value.is_integer()):
|
300 |
+
return value
|
301 |
+
# TODO: unsigned checks
|
302 |
+
|
303 |
+
# Note: without the "str" here, the f-string rendering raises in
|
304 |
+
# py38 builds.
|
305 |
+
raise TypeError(f"Invalid value '{str(value)}' for dtype {self.dtype}")
|
306 |
+
|
307 |
+
def __setitem__(self, key, value) -> None:
|
308 |
+
key = check_array_indexer(self, key)
|
309 |
+
|
310 |
+
if is_scalar(value):
|
311 |
+
if is_valid_na_for_dtype(value, self.dtype):
|
312 |
+
self._mask[key] = True
|
313 |
+
else:
|
314 |
+
value = self._validate_setitem_value(value)
|
315 |
+
self._data[key] = value
|
316 |
+
self._mask[key] = False
|
317 |
+
return
|
318 |
+
|
319 |
+
value, mask = self._coerce_to_array(value, dtype=self.dtype)
|
320 |
+
|
321 |
+
self._data[key] = value
|
322 |
+
self._mask[key] = mask
|
323 |
+
|
324 |
+
def __contains__(self, key) -> bool:
|
325 |
+
if isna(key) and key is not self.dtype.na_value:
|
326 |
+
# GH#52840
|
327 |
+
if self._data.dtype.kind == "f" and lib.is_float(key):
|
328 |
+
return bool((np.isnan(self._data) & ~self._mask).any())
|
329 |
+
|
330 |
+
return bool(super().__contains__(key))
|
331 |
+
|
332 |
+
def __iter__(self) -> Iterator:
|
333 |
+
if self.ndim == 1:
|
334 |
+
if not self._hasna:
|
335 |
+
for val in self._data:
|
336 |
+
yield val
|
337 |
+
else:
|
338 |
+
na_value = self.dtype.na_value
|
339 |
+
for isna_, val in zip(self._mask, self._data):
|
340 |
+
if isna_:
|
341 |
+
yield na_value
|
342 |
+
else:
|
343 |
+
yield val
|
344 |
+
else:
|
345 |
+
for i in range(len(self)):
|
346 |
+
yield self[i]
|
347 |
+
|
348 |
+
def __len__(self) -> int:
|
349 |
+
return len(self._data)
|
350 |
+
|
351 |
+
@property
|
352 |
+
def shape(self) -> Shape:
|
353 |
+
return self._data.shape
|
354 |
+
|
355 |
+
@property
|
356 |
+
def ndim(self) -> int:
|
357 |
+
return self._data.ndim
|
358 |
+
|
359 |
+
def swapaxes(self, axis1, axis2) -> Self:
|
360 |
+
data = self._data.swapaxes(axis1, axis2)
|
361 |
+
mask = self._mask.swapaxes(axis1, axis2)
|
362 |
+
return self._simple_new(data, mask)
|
363 |
+
|
364 |
+
def delete(self, loc, axis: AxisInt = 0) -> Self:
|
365 |
+
data = np.delete(self._data, loc, axis=axis)
|
366 |
+
mask = np.delete(self._mask, loc, axis=axis)
|
367 |
+
return self._simple_new(data, mask)
|
368 |
+
|
369 |
+
def reshape(self, *args, **kwargs) -> Self:
|
370 |
+
data = self._data.reshape(*args, **kwargs)
|
371 |
+
mask = self._mask.reshape(*args, **kwargs)
|
372 |
+
return self._simple_new(data, mask)
|
373 |
+
|
374 |
+
def ravel(self, *args, **kwargs) -> Self:
|
375 |
+
# TODO: need to make sure we have the same order for data/mask
|
376 |
+
data = self._data.ravel(*args, **kwargs)
|
377 |
+
mask = self._mask.ravel(*args, **kwargs)
|
378 |
+
return type(self)(data, mask)
|
379 |
+
|
380 |
+
@property
|
381 |
+
def T(self) -> Self:
|
382 |
+
return self._simple_new(self._data.T, self._mask.T)
|
383 |
+
|
384 |
+
def round(self, decimals: int = 0, *args, **kwargs):
|
385 |
+
"""
|
386 |
+
Round each value in the array a to the given number of decimals.
|
387 |
+
|
388 |
+
Parameters
|
389 |
+
----------
|
390 |
+
decimals : int, default 0
|
391 |
+
Number of decimal places to round to. If decimals is negative,
|
392 |
+
it specifies the number of positions to the left of the decimal point.
|
393 |
+
*args, **kwargs
|
394 |
+
Additional arguments and keywords have no effect but might be
|
395 |
+
accepted for compatibility with NumPy.
|
396 |
+
|
397 |
+
Returns
|
398 |
+
-------
|
399 |
+
NumericArray
|
400 |
+
Rounded values of the NumericArray.
|
401 |
+
|
402 |
+
See Also
|
403 |
+
--------
|
404 |
+
numpy.around : Round values of an np.array.
|
405 |
+
DataFrame.round : Round values of a DataFrame.
|
406 |
+
Series.round : Round values of a Series.
|
407 |
+
"""
|
408 |
+
if self.dtype.kind == "b":
|
409 |
+
return self
|
410 |
+
nv.validate_round(args, kwargs)
|
411 |
+
values = np.round(self._data, decimals=decimals, **kwargs)
|
412 |
+
|
413 |
+
# Usually we'll get same type as self, but ndarray[bool] casts to float
|
414 |
+
return self._maybe_mask_result(values, self._mask.copy())
|
415 |
+
|
416 |
+
# ------------------------------------------------------------------
|
417 |
+
# Unary Methods
|
418 |
+
|
419 |
+
def __invert__(self) -> Self:
|
420 |
+
return self._simple_new(~self._data, self._mask.copy())
|
421 |
+
|
422 |
+
def __neg__(self) -> Self:
|
423 |
+
return self._simple_new(-self._data, self._mask.copy())
|
424 |
+
|
425 |
+
def __pos__(self) -> Self:
|
426 |
+
return self.copy()
|
427 |
+
|
428 |
+
def __abs__(self) -> Self:
|
429 |
+
return self._simple_new(abs(self._data), self._mask.copy())
|
430 |
+
|
431 |
+
# ------------------------------------------------------------------
|
432 |
+
|
433 |
+
def _values_for_json(self) -> np.ndarray:
|
434 |
+
return np.asarray(self, dtype=object)
|
435 |
+
|
436 |
+
def to_numpy(
|
437 |
+
self,
|
438 |
+
dtype: npt.DTypeLike | None = None,
|
439 |
+
copy: bool = False,
|
440 |
+
na_value: object = lib.no_default,
|
441 |
+
) -> np.ndarray:
|
442 |
+
"""
|
443 |
+
Convert to a NumPy Array.
|
444 |
+
|
445 |
+
By default converts to an object-dtype NumPy array. Specify the `dtype` and
|
446 |
+
`na_value` keywords to customize the conversion.
|
447 |
+
|
448 |
+
Parameters
|
449 |
+
----------
|
450 |
+
dtype : dtype, default object
|
451 |
+
The numpy dtype to convert to.
|
452 |
+
copy : bool, default False
|
453 |
+
Whether to ensure that the returned value is a not a view on
|
454 |
+
the array. Note that ``copy=False`` does not *ensure* that
|
455 |
+
``to_numpy()`` is no-copy. Rather, ``copy=True`` ensure that
|
456 |
+
a copy is made, even if not strictly necessary. This is typically
|
457 |
+
only possible when no missing values are present and `dtype`
|
458 |
+
is the equivalent numpy dtype.
|
459 |
+
na_value : scalar, optional
|
460 |
+
Scalar missing value indicator to use in numpy array. Defaults
|
461 |
+
to the native missing value indicator of this array (pd.NA).
|
462 |
+
|
463 |
+
Returns
|
464 |
+
-------
|
465 |
+
numpy.ndarray
|
466 |
+
|
467 |
+
Examples
|
468 |
+
--------
|
469 |
+
An object-dtype is the default result
|
470 |
+
|
471 |
+
>>> a = pd.array([True, False, pd.NA], dtype="boolean")
|
472 |
+
>>> a.to_numpy()
|
473 |
+
array([True, False, <NA>], dtype=object)
|
474 |
+
|
475 |
+
When no missing values are present, an equivalent dtype can be used.
|
476 |
+
|
477 |
+
>>> pd.array([True, False], dtype="boolean").to_numpy(dtype="bool")
|
478 |
+
array([ True, False])
|
479 |
+
>>> pd.array([1, 2], dtype="Int64").to_numpy("int64")
|
480 |
+
array([1, 2])
|
481 |
+
|
482 |
+
However, requesting such dtype will raise a ValueError if
|
483 |
+
missing values are present and the default missing value :attr:`NA`
|
484 |
+
is used.
|
485 |
+
|
486 |
+
>>> a = pd.array([True, False, pd.NA], dtype="boolean")
|
487 |
+
>>> a
|
488 |
+
<BooleanArray>
|
489 |
+
[True, False, <NA>]
|
490 |
+
Length: 3, dtype: boolean
|
491 |
+
|
492 |
+
>>> a.to_numpy(dtype="bool")
|
493 |
+
Traceback (most recent call last):
|
494 |
+
...
|
495 |
+
ValueError: cannot convert to bool numpy array in presence of missing values
|
496 |
+
|
497 |
+
Specify a valid `na_value` instead
|
498 |
+
|
499 |
+
>>> a.to_numpy(dtype="bool", na_value=False)
|
500 |
+
array([ True, False, False])
|
501 |
+
"""
|
502 |
+
hasna = self._hasna
|
503 |
+
dtype, na_value = to_numpy_dtype_inference(self, dtype, na_value, hasna)
|
504 |
+
if dtype is None:
|
505 |
+
dtype = object
|
506 |
+
|
507 |
+
if hasna:
|
508 |
+
if (
|
509 |
+
dtype != object
|
510 |
+
and not is_string_dtype(dtype)
|
511 |
+
and na_value is libmissing.NA
|
512 |
+
):
|
513 |
+
raise ValueError(
|
514 |
+
f"cannot convert to '{dtype}'-dtype NumPy array "
|
515 |
+
"with missing values. Specify an appropriate 'na_value' "
|
516 |
+
"for this dtype."
|
517 |
+
)
|
518 |
+
# don't pass copy to astype -> always need a copy since we are mutating
|
519 |
+
with warnings.catch_warnings():
|
520 |
+
warnings.filterwarnings("ignore", category=RuntimeWarning)
|
521 |
+
data = self._data.astype(dtype)
|
522 |
+
data[self._mask] = na_value
|
523 |
+
else:
|
524 |
+
with warnings.catch_warnings():
|
525 |
+
warnings.filterwarnings("ignore", category=RuntimeWarning)
|
526 |
+
data = self._data.astype(dtype, copy=copy)
|
527 |
+
return data
|
528 |
+
|
529 |
+
@doc(ExtensionArray.tolist)
|
530 |
+
def tolist(self):
|
531 |
+
if self.ndim > 1:
|
532 |
+
return [x.tolist() for x in self]
|
533 |
+
dtype = None if self._hasna else self._data.dtype
|
534 |
+
return self.to_numpy(dtype=dtype, na_value=libmissing.NA).tolist()
|
535 |
+
|
536 |
+
@overload
|
537 |
+
def astype(self, dtype: npt.DTypeLike, copy: bool = ...) -> np.ndarray:
|
538 |
+
...
|
539 |
+
|
540 |
+
@overload
|
541 |
+
def astype(self, dtype: ExtensionDtype, copy: bool = ...) -> ExtensionArray:
|
542 |
+
...
|
543 |
+
|
544 |
+
@overload
|
545 |
+
def astype(self, dtype: AstypeArg, copy: bool = ...) -> ArrayLike:
|
546 |
+
...
|
547 |
+
|
548 |
+
def astype(self, dtype: AstypeArg, copy: bool = True) -> ArrayLike:
|
549 |
+
dtype = pandas_dtype(dtype)
|
550 |
+
|
551 |
+
if dtype == self.dtype:
|
552 |
+
if copy:
|
553 |
+
return self.copy()
|
554 |
+
return self
|
555 |
+
|
556 |
+
# if we are astyping to another nullable masked dtype, we can fastpath
|
557 |
+
if isinstance(dtype, BaseMaskedDtype):
|
558 |
+
# TODO deal with NaNs for FloatingArray case
|
559 |
+
with warnings.catch_warnings():
|
560 |
+
warnings.filterwarnings("ignore", category=RuntimeWarning)
|
561 |
+
# TODO: Is rounding what we want long term?
|
562 |
+
data = self._data.astype(dtype.numpy_dtype, copy=copy)
|
563 |
+
# mask is copied depending on whether the data was copied, and
|
564 |
+
# not directly depending on the `copy` keyword
|
565 |
+
mask = self._mask if data is self._data else self._mask.copy()
|
566 |
+
cls = dtype.construct_array_type()
|
567 |
+
return cls(data, mask, copy=False)
|
568 |
+
|
569 |
+
if isinstance(dtype, ExtensionDtype):
|
570 |
+
eacls = dtype.construct_array_type()
|
571 |
+
return eacls._from_sequence(self, dtype=dtype, copy=copy)
|
572 |
+
|
573 |
+
na_value: float | np.datetime64 | lib.NoDefault
|
574 |
+
|
575 |
+
# coerce
|
576 |
+
if dtype.kind == "f":
|
577 |
+
# In astype, we consider dtype=float to also mean na_value=np.nan
|
578 |
+
na_value = np.nan
|
579 |
+
elif dtype.kind == "M":
|
580 |
+
na_value = np.datetime64("NaT")
|
581 |
+
else:
|
582 |
+
na_value = lib.no_default
|
583 |
+
|
584 |
+
# to_numpy will also raise, but we get somewhat nicer exception messages here
|
585 |
+
if dtype.kind in "iu" and self._hasna:
|
586 |
+
raise ValueError("cannot convert NA to integer")
|
587 |
+
if dtype.kind == "b" and self._hasna:
|
588 |
+
# careful: astype_nansafe converts np.nan to True
|
589 |
+
raise ValueError("cannot convert float NaN to bool")
|
590 |
+
|
591 |
+
data = self.to_numpy(dtype=dtype, na_value=na_value, copy=copy)
|
592 |
+
return data
|
593 |
+
|
594 |
+
__array_priority__ = 1000 # higher than ndarray so ops dispatch to us
|
595 |
+
|
596 |
+
def __array__(
|
597 |
+
self, dtype: NpDtype | None = None, copy: bool | None = None
|
598 |
+
) -> np.ndarray:
|
599 |
+
"""
|
600 |
+
the array interface, return my values
|
601 |
+
We return an object array here to preserve our scalar values
|
602 |
+
"""
|
603 |
+
return self.to_numpy(dtype=dtype)
|
604 |
+
|
605 |
+
_HANDLED_TYPES: tuple[type, ...]
|
606 |
+
|
607 |
+
def __array_ufunc__(self, ufunc: np.ufunc, method: str, *inputs, **kwargs):
|
608 |
+
# For MaskedArray inputs, we apply the ufunc to ._data
|
609 |
+
# and mask the result.
|
610 |
+
|
611 |
+
out = kwargs.get("out", ())
|
612 |
+
|
613 |
+
for x in inputs + out:
|
614 |
+
if not isinstance(x, self._HANDLED_TYPES + (BaseMaskedArray,)):
|
615 |
+
return NotImplemented
|
616 |
+
|
617 |
+
# for binary ops, use our custom dunder methods
|
618 |
+
result = arraylike.maybe_dispatch_ufunc_to_dunder_op(
|
619 |
+
self, ufunc, method, *inputs, **kwargs
|
620 |
+
)
|
621 |
+
if result is not NotImplemented:
|
622 |
+
return result
|
623 |
+
|
624 |
+
if "out" in kwargs:
|
625 |
+
# e.g. test_ufunc_with_out
|
626 |
+
return arraylike.dispatch_ufunc_with_out(
|
627 |
+
self, ufunc, method, *inputs, **kwargs
|
628 |
+
)
|
629 |
+
|
630 |
+
if method == "reduce":
|
631 |
+
result = arraylike.dispatch_reduction_ufunc(
|
632 |
+
self, ufunc, method, *inputs, **kwargs
|
633 |
+
)
|
634 |
+
if result is not NotImplemented:
|
635 |
+
return result
|
636 |
+
|
637 |
+
mask = np.zeros(len(self), dtype=bool)
|
638 |
+
inputs2 = []
|
639 |
+
for x in inputs:
|
640 |
+
if isinstance(x, BaseMaskedArray):
|
641 |
+
mask |= x._mask
|
642 |
+
inputs2.append(x._data)
|
643 |
+
else:
|
644 |
+
inputs2.append(x)
|
645 |
+
|
646 |
+
def reconstruct(x: np.ndarray):
|
647 |
+
# we don't worry about scalar `x` here, since we
|
648 |
+
# raise for reduce up above.
|
649 |
+
from pandas.core.arrays import (
|
650 |
+
BooleanArray,
|
651 |
+
FloatingArray,
|
652 |
+
IntegerArray,
|
653 |
+
)
|
654 |
+
|
655 |
+
if x.dtype.kind == "b":
|
656 |
+
m = mask.copy()
|
657 |
+
return BooleanArray(x, m)
|
658 |
+
elif x.dtype.kind in "iu":
|
659 |
+
m = mask.copy()
|
660 |
+
return IntegerArray(x, m)
|
661 |
+
elif x.dtype.kind == "f":
|
662 |
+
m = mask.copy()
|
663 |
+
if x.dtype == np.float16:
|
664 |
+
# reached in e.g. np.sqrt on BooleanArray
|
665 |
+
# we don't support float16
|
666 |
+
x = x.astype(np.float32)
|
667 |
+
return FloatingArray(x, m)
|
668 |
+
else:
|
669 |
+
x[mask] = np.nan
|
670 |
+
return x
|
671 |
+
|
672 |
+
result = getattr(ufunc, method)(*inputs2, **kwargs)
|
673 |
+
if ufunc.nout > 1:
|
674 |
+
# e.g. np.divmod
|
675 |
+
return tuple(reconstruct(x) for x in result)
|
676 |
+
elif method == "reduce":
|
677 |
+
# e.g. np.add.reduce; test_ufunc_reduce_raises
|
678 |
+
if self._mask.any():
|
679 |
+
return self._na_value
|
680 |
+
return result
|
681 |
+
else:
|
682 |
+
return reconstruct(result)
|
683 |
+
|
684 |
+
def __arrow_array__(self, type=None):
|
685 |
+
"""
|
686 |
+
Convert myself into a pyarrow Array.
|
687 |
+
"""
|
688 |
+
import pyarrow as pa
|
689 |
+
|
690 |
+
return pa.array(self._data, mask=self._mask, type=type)
|
691 |
+
|
692 |
+
@property
|
693 |
+
def _hasna(self) -> bool:
|
694 |
+
# Note: this is expensive right now! The hope is that we can
|
695 |
+
# make this faster by having an optional mask, but not have to change
|
696 |
+
# source code using it..
|
697 |
+
|
698 |
+
# error: Incompatible return value type (got "bool_", expected "bool")
|
699 |
+
return self._mask.any() # type: ignore[return-value]
|
700 |
+
|
701 |
+
def _propagate_mask(
|
702 |
+
self, mask: npt.NDArray[np.bool_] | None, other
|
703 |
+
) -> npt.NDArray[np.bool_]:
|
704 |
+
if mask is None:
|
705 |
+
mask = self._mask.copy() # TODO: need test for BooleanArray needing a copy
|
706 |
+
if other is libmissing.NA:
|
707 |
+
# GH#45421 don't alter inplace
|
708 |
+
mask = mask | True
|
709 |
+
elif is_list_like(other) and len(other) == len(mask):
|
710 |
+
mask = mask | isna(other)
|
711 |
+
else:
|
712 |
+
mask = self._mask | mask
|
713 |
+
# Incompatible return value type (got "Optional[ndarray[Any, dtype[bool_]]]",
|
714 |
+
# expected "ndarray[Any, dtype[bool_]]")
|
715 |
+
return mask # type: ignore[return-value]
|
716 |
+
|
717 |
+
def _arith_method(self, other, op):
|
718 |
+
op_name = op.__name__
|
719 |
+
omask = None
|
720 |
+
|
721 |
+
if (
|
722 |
+
not hasattr(other, "dtype")
|
723 |
+
and is_list_like(other)
|
724 |
+
and len(other) == len(self)
|
725 |
+
):
|
726 |
+
# Try inferring masked dtype instead of casting to object
|
727 |
+
other = pd_array(other)
|
728 |
+
other = extract_array(other, extract_numpy=True)
|
729 |
+
|
730 |
+
if isinstance(other, BaseMaskedArray):
|
731 |
+
other, omask = other._data, other._mask
|
732 |
+
|
733 |
+
elif is_list_like(other):
|
734 |
+
if not isinstance(other, ExtensionArray):
|
735 |
+
other = np.asarray(other)
|
736 |
+
if other.ndim > 1:
|
737 |
+
raise NotImplementedError("can only perform ops with 1-d structures")
|
738 |
+
|
739 |
+
# We wrap the non-masked arithmetic logic used for numpy dtypes
|
740 |
+
# in Series/Index arithmetic ops.
|
741 |
+
other = ops.maybe_prepare_scalar_for_op(other, (len(self),))
|
742 |
+
pd_op = ops.get_array_op(op)
|
743 |
+
other = ensure_wrapped_if_datetimelike(other)
|
744 |
+
|
745 |
+
if op_name in {"pow", "rpow"} and isinstance(other, np.bool_):
|
746 |
+
# Avoid DeprecationWarning: In future, it will be an error
|
747 |
+
# for 'np.bool_' scalars to be interpreted as an index
|
748 |
+
# e.g. test_array_scalar_like_equivalence
|
749 |
+
other = bool(other)
|
750 |
+
|
751 |
+
mask = self._propagate_mask(omask, other)
|
752 |
+
|
753 |
+
if other is libmissing.NA:
|
754 |
+
result = np.ones_like(self._data)
|
755 |
+
if self.dtype.kind == "b":
|
756 |
+
if op_name in {
|
757 |
+
"floordiv",
|
758 |
+
"rfloordiv",
|
759 |
+
"pow",
|
760 |
+
"rpow",
|
761 |
+
"truediv",
|
762 |
+
"rtruediv",
|
763 |
+
}:
|
764 |
+
# GH#41165 Try to match non-masked Series behavior
|
765 |
+
# This is still imperfect GH#46043
|
766 |
+
raise NotImplementedError(
|
767 |
+
f"operator '{op_name}' not implemented for bool dtypes"
|
768 |
+
)
|
769 |
+
if op_name in {"mod", "rmod"}:
|
770 |
+
dtype = "int8"
|
771 |
+
else:
|
772 |
+
dtype = "bool"
|
773 |
+
result = result.astype(dtype)
|
774 |
+
elif "truediv" in op_name and self.dtype.kind != "f":
|
775 |
+
# The actual data here doesn't matter since the mask
|
776 |
+
# will be all-True, but since this is division, we want
|
777 |
+
# to end up with floating dtype.
|
778 |
+
result = result.astype(np.float64)
|
779 |
+
else:
|
780 |
+
# Make sure we do this before the "pow" mask checks
|
781 |
+
# to get an expected exception message on shape mismatch.
|
782 |
+
if self.dtype.kind in "iu" and op_name in ["floordiv", "mod"]:
|
783 |
+
# TODO(GH#30188) ATM we don't match the behavior of non-masked
|
784 |
+
# types with respect to floordiv-by-zero
|
785 |
+
pd_op = op
|
786 |
+
|
787 |
+
with np.errstate(all="ignore"):
|
788 |
+
result = pd_op(self._data, other)
|
789 |
+
|
790 |
+
if op_name == "pow":
|
791 |
+
# 1 ** x is 1.
|
792 |
+
mask = np.where((self._data == 1) & ~self._mask, False, mask)
|
793 |
+
# x ** 0 is 1.
|
794 |
+
if omask is not None:
|
795 |
+
mask = np.where((other == 0) & ~omask, False, mask)
|
796 |
+
elif other is not libmissing.NA:
|
797 |
+
mask = np.where(other == 0, False, mask)
|
798 |
+
|
799 |
+
elif op_name == "rpow":
|
800 |
+
# 1 ** x is 1.
|
801 |
+
if omask is not None:
|
802 |
+
mask = np.where((other == 1) & ~omask, False, mask)
|
803 |
+
elif other is not libmissing.NA:
|
804 |
+
mask = np.where(other == 1, False, mask)
|
805 |
+
# x ** 0 is 1.
|
806 |
+
mask = np.where((self._data == 0) & ~self._mask, False, mask)
|
807 |
+
|
808 |
+
return self._maybe_mask_result(result, mask)
|
809 |
+
|
810 |
+
_logical_method = _arith_method
|
811 |
+
|
812 |
+
def _cmp_method(self, other, op) -> BooleanArray:
|
813 |
+
from pandas.core.arrays import BooleanArray
|
814 |
+
|
815 |
+
mask = None
|
816 |
+
|
817 |
+
if isinstance(other, BaseMaskedArray):
|
818 |
+
other, mask = other._data, other._mask
|
819 |
+
|
820 |
+
elif is_list_like(other):
|
821 |
+
other = np.asarray(other)
|
822 |
+
if other.ndim > 1:
|
823 |
+
raise NotImplementedError("can only perform ops with 1-d structures")
|
824 |
+
if len(self) != len(other):
|
825 |
+
raise ValueError("Lengths must match to compare")
|
826 |
+
|
827 |
+
if other is libmissing.NA:
|
828 |
+
# numpy does not handle pd.NA well as "other" scalar (it returns
|
829 |
+
# a scalar False instead of an array)
|
830 |
+
# This may be fixed by NA.__array_ufunc__. Revisit this check
|
831 |
+
# once that's implemented.
|
832 |
+
result = np.zeros(self._data.shape, dtype="bool")
|
833 |
+
mask = np.ones(self._data.shape, dtype="bool")
|
834 |
+
else:
|
835 |
+
with warnings.catch_warnings():
|
836 |
+
# numpy may show a FutureWarning or DeprecationWarning:
|
837 |
+
# elementwise comparison failed; returning scalar instead,
|
838 |
+
# but in the future will perform elementwise comparison
|
839 |
+
# before returning NotImplemented. We fall back to the correct
|
840 |
+
# behavior today, so that should be fine to ignore.
|
841 |
+
warnings.filterwarnings("ignore", "elementwise", FutureWarning)
|
842 |
+
warnings.filterwarnings("ignore", "elementwise", DeprecationWarning)
|
843 |
+
method = getattr(self._data, f"__{op.__name__}__")
|
844 |
+
result = method(other)
|
845 |
+
|
846 |
+
if result is NotImplemented:
|
847 |
+
result = invalid_comparison(self._data, other, op)
|
848 |
+
|
849 |
+
mask = self._propagate_mask(mask, other)
|
850 |
+
return BooleanArray(result, mask, copy=False)
|
851 |
+
|
852 |
+
def _maybe_mask_result(
|
853 |
+
self, result: np.ndarray | tuple[np.ndarray, np.ndarray], mask: np.ndarray
|
854 |
+
):
|
855 |
+
"""
|
856 |
+
Parameters
|
857 |
+
----------
|
858 |
+
result : array-like or tuple[array-like]
|
859 |
+
mask : array-like bool
|
860 |
+
"""
|
861 |
+
if isinstance(result, tuple):
|
862 |
+
# i.e. divmod
|
863 |
+
div, mod = result
|
864 |
+
return (
|
865 |
+
self._maybe_mask_result(div, mask),
|
866 |
+
self._maybe_mask_result(mod, mask),
|
867 |
+
)
|
868 |
+
|
869 |
+
if result.dtype.kind == "f":
|
870 |
+
from pandas.core.arrays import FloatingArray
|
871 |
+
|
872 |
+
return FloatingArray(result, mask, copy=False)
|
873 |
+
|
874 |
+
elif result.dtype.kind == "b":
|
875 |
+
from pandas.core.arrays import BooleanArray
|
876 |
+
|
877 |
+
return BooleanArray(result, mask, copy=False)
|
878 |
+
|
879 |
+
elif lib.is_np_dtype(result.dtype, "m") and is_supported_dtype(result.dtype):
|
880 |
+
# e.g. test_numeric_arr_mul_tdscalar_numexpr_path
|
881 |
+
from pandas.core.arrays import TimedeltaArray
|
882 |
+
|
883 |
+
result[mask] = result.dtype.type("NaT")
|
884 |
+
|
885 |
+
if not isinstance(result, TimedeltaArray):
|
886 |
+
return TimedeltaArray._simple_new(result, dtype=result.dtype)
|
887 |
+
|
888 |
+
return result
|
889 |
+
|
890 |
+
elif result.dtype.kind in "iu":
|
891 |
+
from pandas.core.arrays import IntegerArray
|
892 |
+
|
893 |
+
return IntegerArray(result, mask, copy=False)
|
894 |
+
|
895 |
+
else:
|
896 |
+
result[mask] = np.nan
|
897 |
+
return result
|
898 |
+
|
899 |
+
def isna(self) -> np.ndarray:
|
900 |
+
return self._mask.copy()
|
901 |
+
|
902 |
+
@property
|
903 |
+
def _na_value(self):
|
904 |
+
return self.dtype.na_value
|
905 |
+
|
906 |
+
@property
|
907 |
+
def nbytes(self) -> int:
|
908 |
+
return self._data.nbytes + self._mask.nbytes
|
909 |
+
|
910 |
+
@classmethod
|
911 |
+
def _concat_same_type(
|
912 |
+
cls,
|
913 |
+
to_concat: Sequence[Self],
|
914 |
+
axis: AxisInt = 0,
|
915 |
+
) -> Self:
|
916 |
+
data = np.concatenate([x._data for x in to_concat], axis=axis)
|
917 |
+
mask = np.concatenate([x._mask for x in to_concat], axis=axis)
|
918 |
+
return cls(data, mask)
|
919 |
+
|
920 |
+
def _hash_pandas_object(
|
921 |
+
self, *, encoding: str, hash_key: str, categorize: bool
|
922 |
+
) -> npt.NDArray[np.uint64]:
|
923 |
+
hashed_array = hash_array(
|
924 |
+
self._data, encoding=encoding, hash_key=hash_key, categorize=categorize
|
925 |
+
)
|
926 |
+
hashed_array[self.isna()] = hash(self.dtype.na_value)
|
927 |
+
return hashed_array
|
928 |
+
|
929 |
+
def take(
|
930 |
+
self,
|
931 |
+
indexer,
|
932 |
+
*,
|
933 |
+
allow_fill: bool = False,
|
934 |
+
fill_value: Scalar | None = None,
|
935 |
+
axis: AxisInt = 0,
|
936 |
+
) -> Self:
|
937 |
+
# we always fill with 1 internally
|
938 |
+
# to avoid upcasting
|
939 |
+
data_fill_value = self._internal_fill_value if isna(fill_value) else fill_value
|
940 |
+
result = take(
|
941 |
+
self._data,
|
942 |
+
indexer,
|
943 |
+
fill_value=data_fill_value,
|
944 |
+
allow_fill=allow_fill,
|
945 |
+
axis=axis,
|
946 |
+
)
|
947 |
+
|
948 |
+
mask = take(
|
949 |
+
self._mask, indexer, fill_value=True, allow_fill=allow_fill, axis=axis
|
950 |
+
)
|
951 |
+
|
952 |
+
# if we are filling
|
953 |
+
# we only fill where the indexer is null
|
954 |
+
# not existing missing values
|
955 |
+
# TODO(jreback) what if we have a non-na float as a fill value?
|
956 |
+
if allow_fill and notna(fill_value):
|
957 |
+
fill_mask = np.asarray(indexer) == -1
|
958 |
+
result[fill_mask] = fill_value
|
959 |
+
mask = mask ^ fill_mask
|
960 |
+
|
961 |
+
return self._simple_new(result, mask)
|
962 |
+
|
963 |
+
# error: Return type "BooleanArray" of "isin" incompatible with return type
|
964 |
+
# "ndarray" in supertype "ExtensionArray"
|
965 |
+
def isin(self, values: ArrayLike) -> BooleanArray: # type: ignore[override]
|
966 |
+
from pandas.core.arrays import BooleanArray
|
967 |
+
|
968 |
+
# algorithms.isin will eventually convert values to an ndarray, so no extra
|
969 |
+
# cost to doing it here first
|
970 |
+
values_arr = np.asarray(values)
|
971 |
+
result = isin(self._data, values_arr)
|
972 |
+
|
973 |
+
if self._hasna:
|
974 |
+
values_have_NA = values_arr.dtype == object and any(
|
975 |
+
val is self.dtype.na_value for val in values_arr
|
976 |
+
)
|
977 |
+
|
978 |
+
# For now, NA does not propagate so set result according to presence of NA,
|
979 |
+
# see https://github.com/pandas-dev/pandas/pull/38379 for some discussion
|
980 |
+
result[self._mask] = values_have_NA
|
981 |
+
|
982 |
+
mask = np.zeros(self._data.shape, dtype=bool)
|
983 |
+
return BooleanArray(result, mask, copy=False)
|
984 |
+
|
985 |
+
def copy(self) -> Self:
|
986 |
+
data = self._data.copy()
|
987 |
+
mask = self._mask.copy()
|
988 |
+
return self._simple_new(data, mask)
|
989 |
+
|
990 |
+
@doc(ExtensionArray.duplicated)
|
991 |
+
def duplicated(
|
992 |
+
self, keep: Literal["first", "last", False] = "first"
|
993 |
+
) -> npt.NDArray[np.bool_]:
|
994 |
+
values = self._data
|
995 |
+
mask = self._mask
|
996 |
+
return algos.duplicated(values, keep=keep, mask=mask)
|
997 |
+
|
998 |
+
def unique(self) -> Self:
|
999 |
+
"""
|
1000 |
+
Compute the BaseMaskedArray of unique values.
|
1001 |
+
|
1002 |
+
Returns
|
1003 |
+
-------
|
1004 |
+
uniques : BaseMaskedArray
|
1005 |
+
"""
|
1006 |
+
uniques, mask = algos.unique_with_mask(self._data, self._mask)
|
1007 |
+
return self._simple_new(uniques, mask)
|
1008 |
+
|
1009 |
+
@doc(ExtensionArray.searchsorted)
|
1010 |
+
def searchsorted(
|
1011 |
+
self,
|
1012 |
+
value: NumpyValueArrayLike | ExtensionArray,
|
1013 |
+
side: Literal["left", "right"] = "left",
|
1014 |
+
sorter: NumpySorter | None = None,
|
1015 |
+
) -> npt.NDArray[np.intp] | np.intp:
|
1016 |
+
if self._hasna:
|
1017 |
+
raise ValueError(
|
1018 |
+
"searchsorted requires array to be sorted, which is impossible "
|
1019 |
+
"with NAs present."
|
1020 |
+
)
|
1021 |
+
if isinstance(value, ExtensionArray):
|
1022 |
+
value = value.astype(object)
|
1023 |
+
# Base class searchsorted would cast to object, which is *much* slower.
|
1024 |
+
return self._data.searchsorted(value, side=side, sorter=sorter)
|
1025 |
+
|
1026 |
+
@doc(ExtensionArray.factorize)
|
1027 |
+
def factorize(
|
1028 |
+
self,
|
1029 |
+
use_na_sentinel: bool = True,
|
1030 |
+
) -> tuple[np.ndarray, ExtensionArray]:
|
1031 |
+
arr = self._data
|
1032 |
+
mask = self._mask
|
1033 |
+
|
1034 |
+
# Use a sentinel for na; recode and add NA to uniques if necessary below
|
1035 |
+
codes, uniques = factorize_array(arr, use_na_sentinel=True, mask=mask)
|
1036 |
+
|
1037 |
+
# check that factorize_array correctly preserves dtype.
|
1038 |
+
assert uniques.dtype == self.dtype.numpy_dtype, (uniques.dtype, self.dtype)
|
1039 |
+
|
1040 |
+
has_na = mask.any()
|
1041 |
+
if use_na_sentinel or not has_na:
|
1042 |
+
size = len(uniques)
|
1043 |
+
else:
|
1044 |
+
# Make room for an NA value
|
1045 |
+
size = len(uniques) + 1
|
1046 |
+
uniques_mask = np.zeros(size, dtype=bool)
|
1047 |
+
if not use_na_sentinel and has_na:
|
1048 |
+
na_index = mask.argmax()
|
1049 |
+
# Insert na with the proper code
|
1050 |
+
if na_index == 0:
|
1051 |
+
na_code = np.intp(0)
|
1052 |
+
else:
|
1053 |
+
na_code = codes[:na_index].max() + 1
|
1054 |
+
codes[codes >= na_code] += 1
|
1055 |
+
codes[codes == -1] = na_code
|
1056 |
+
# dummy value for uniques; not used since uniques_mask will be True
|
1057 |
+
uniques = np.insert(uniques, na_code, 0)
|
1058 |
+
uniques_mask[na_code] = True
|
1059 |
+
uniques_ea = self._simple_new(uniques, uniques_mask)
|
1060 |
+
|
1061 |
+
return codes, uniques_ea
|
1062 |
+
|
1063 |
+
@doc(ExtensionArray._values_for_argsort)
|
1064 |
+
def _values_for_argsort(self) -> np.ndarray:
|
1065 |
+
return self._data
|
1066 |
+
|
1067 |
+
def value_counts(self, dropna: bool = True) -> Series:
|
1068 |
+
"""
|
1069 |
+
Returns a Series containing counts of each unique value.
|
1070 |
+
|
1071 |
+
Parameters
|
1072 |
+
----------
|
1073 |
+
dropna : bool, default True
|
1074 |
+
Don't include counts of missing values.
|
1075 |
+
|
1076 |
+
Returns
|
1077 |
+
-------
|
1078 |
+
counts : Series
|
1079 |
+
|
1080 |
+
See Also
|
1081 |
+
--------
|
1082 |
+
Series.value_counts
|
1083 |
+
"""
|
1084 |
+
from pandas import (
|
1085 |
+
Index,
|
1086 |
+
Series,
|
1087 |
+
)
|
1088 |
+
from pandas.arrays import IntegerArray
|
1089 |
+
|
1090 |
+
keys, value_counts, na_counter = algos.value_counts_arraylike(
|
1091 |
+
self._data, dropna=dropna, mask=self._mask
|
1092 |
+
)
|
1093 |
+
mask_index = np.zeros((len(value_counts),), dtype=np.bool_)
|
1094 |
+
mask = mask_index.copy()
|
1095 |
+
|
1096 |
+
if na_counter > 0:
|
1097 |
+
mask_index[-1] = True
|
1098 |
+
|
1099 |
+
arr = IntegerArray(value_counts, mask)
|
1100 |
+
index = Index(
|
1101 |
+
self.dtype.construct_array_type()(
|
1102 |
+
keys, mask_index # type: ignore[arg-type]
|
1103 |
+
)
|
1104 |
+
)
|
1105 |
+
return Series(arr, index=index, name="count", copy=False)
|
1106 |
+
|
1107 |
+
def _mode(self, dropna: bool = True) -> Self:
|
1108 |
+
if dropna:
|
1109 |
+
result = mode(self._data, dropna=dropna, mask=self._mask)
|
1110 |
+
res_mask = np.zeros(result.shape, dtype=np.bool_)
|
1111 |
+
else:
|
1112 |
+
result, res_mask = mode(self._data, dropna=dropna, mask=self._mask)
|
1113 |
+
result = type(self)(result, res_mask) # type: ignore[arg-type]
|
1114 |
+
return result[result.argsort()]
|
1115 |
+
|
1116 |
+
@doc(ExtensionArray.equals)
|
1117 |
+
def equals(self, other) -> bool:
|
1118 |
+
if type(self) != type(other):
|
1119 |
+
return False
|
1120 |
+
if other.dtype != self.dtype:
|
1121 |
+
return False
|
1122 |
+
|
1123 |
+
# GH#44382 if e.g. self[1] is np.nan and other[1] is pd.NA, we are NOT
|
1124 |
+
# equal.
|
1125 |
+
if not np.array_equal(self._mask, other._mask):
|
1126 |
+
return False
|
1127 |
+
|
1128 |
+
left = self._data[~self._mask]
|
1129 |
+
right = other._data[~other._mask]
|
1130 |
+
return array_equivalent(left, right, strict_nan=True, dtype_equal=True)
|
1131 |
+
|
1132 |
+
def _quantile(
|
1133 |
+
self, qs: npt.NDArray[np.float64], interpolation: str
|
1134 |
+
) -> BaseMaskedArray:
|
1135 |
+
"""
|
1136 |
+
Dispatch to quantile_with_mask, needed because we do not have
|
1137 |
+
_from_factorized.
|
1138 |
+
|
1139 |
+
Notes
|
1140 |
+
-----
|
1141 |
+
We assume that all impacted cases are 1D-only.
|
1142 |
+
"""
|
1143 |
+
res = quantile_with_mask(
|
1144 |
+
self._data,
|
1145 |
+
mask=self._mask,
|
1146 |
+
# TODO(GH#40932): na_value_for_dtype(self.dtype.numpy_dtype)
|
1147 |
+
# instead of np.nan
|
1148 |
+
fill_value=np.nan,
|
1149 |
+
qs=qs,
|
1150 |
+
interpolation=interpolation,
|
1151 |
+
)
|
1152 |
+
|
1153 |
+
if self._hasna:
|
1154 |
+
# Our result mask is all-False unless we are all-NA, in which
|
1155 |
+
# case it is all-True.
|
1156 |
+
if self.ndim == 2:
|
1157 |
+
# I think this should be out_mask=self.isna().all(axis=1)
|
1158 |
+
# but am holding off until we have tests
|
1159 |
+
raise NotImplementedError
|
1160 |
+
if self.isna().all():
|
1161 |
+
out_mask = np.ones(res.shape, dtype=bool)
|
1162 |
+
|
1163 |
+
if is_integer_dtype(self.dtype):
|
1164 |
+
# We try to maintain int dtype if possible for not all-na case
|
1165 |
+
# as well
|
1166 |
+
res = np.zeros(res.shape, dtype=self.dtype.numpy_dtype)
|
1167 |
+
else:
|
1168 |
+
out_mask = np.zeros(res.shape, dtype=bool)
|
1169 |
+
else:
|
1170 |
+
out_mask = np.zeros(res.shape, dtype=bool)
|
1171 |
+
return self._maybe_mask_result(res, mask=out_mask)
|
1172 |
+
|
1173 |
+
# ------------------------------------------------------------------
|
1174 |
+
# Reductions
|
1175 |
+
|
1176 |
+
def _reduce(
|
1177 |
+
self, name: str, *, skipna: bool = True, keepdims: bool = False, **kwargs
|
1178 |
+
):
|
1179 |
+
if name in {"any", "all", "min", "max", "sum", "prod", "mean", "var", "std"}:
|
1180 |
+
result = getattr(self, name)(skipna=skipna, **kwargs)
|
1181 |
+
else:
|
1182 |
+
# median, skew, kurt, sem
|
1183 |
+
data = self._data
|
1184 |
+
mask = self._mask
|
1185 |
+
op = getattr(nanops, f"nan{name}")
|
1186 |
+
axis = kwargs.pop("axis", None)
|
1187 |
+
result = op(data, axis=axis, skipna=skipna, mask=mask, **kwargs)
|
1188 |
+
|
1189 |
+
if keepdims:
|
1190 |
+
if isna(result):
|
1191 |
+
return self._wrap_na_result(name=name, axis=0, mask_size=(1,))
|
1192 |
+
else:
|
1193 |
+
result = result.reshape(1)
|
1194 |
+
mask = np.zeros(1, dtype=bool)
|
1195 |
+
return self._maybe_mask_result(result, mask)
|
1196 |
+
|
1197 |
+
if isna(result):
|
1198 |
+
return libmissing.NA
|
1199 |
+
else:
|
1200 |
+
return result
|
1201 |
+
|
1202 |
+
def _wrap_reduction_result(self, name: str, result, *, skipna, axis):
|
1203 |
+
if isinstance(result, np.ndarray):
|
1204 |
+
if skipna:
|
1205 |
+
# we only retain mask for all-NA rows/columns
|
1206 |
+
mask = self._mask.all(axis=axis)
|
1207 |
+
else:
|
1208 |
+
mask = self._mask.any(axis=axis)
|
1209 |
+
|
1210 |
+
return self._maybe_mask_result(result, mask)
|
1211 |
+
return result
|
1212 |
+
|
1213 |
+
def _wrap_na_result(self, *, name, axis, mask_size):
|
1214 |
+
mask = np.ones(mask_size, dtype=bool)
|
1215 |
+
|
1216 |
+
float_dtyp = "float32" if self.dtype == "Float32" else "float64"
|
1217 |
+
if name in ["mean", "median", "var", "std", "skew", "kurt"]:
|
1218 |
+
np_dtype = float_dtyp
|
1219 |
+
elif name in ["min", "max"] or self.dtype.itemsize == 8:
|
1220 |
+
np_dtype = self.dtype.numpy_dtype.name
|
1221 |
+
else:
|
1222 |
+
is_windows_or_32bit = is_platform_windows() or not IS64
|
1223 |
+
int_dtyp = "int32" if is_windows_or_32bit else "int64"
|
1224 |
+
uint_dtyp = "uint32" if is_windows_or_32bit else "uint64"
|
1225 |
+
np_dtype = {"b": int_dtyp, "i": int_dtyp, "u": uint_dtyp, "f": float_dtyp}[
|
1226 |
+
self.dtype.kind
|
1227 |
+
]
|
1228 |
+
|
1229 |
+
value = np.array([1], dtype=np_dtype)
|
1230 |
+
return self._maybe_mask_result(value, mask=mask)
|
1231 |
+
|
1232 |
+
def _wrap_min_count_reduction_result(
|
1233 |
+
self, name: str, result, *, skipna, min_count, axis
|
1234 |
+
):
|
1235 |
+
if min_count == 0 and isinstance(result, np.ndarray):
|
1236 |
+
return self._maybe_mask_result(result, np.zeros(result.shape, dtype=bool))
|
1237 |
+
return self._wrap_reduction_result(name, result, skipna=skipna, axis=axis)
|
1238 |
+
|
1239 |
+
def sum(
|
1240 |
+
self,
|
1241 |
+
*,
|
1242 |
+
skipna: bool = True,
|
1243 |
+
min_count: int = 0,
|
1244 |
+
axis: AxisInt | None = 0,
|
1245 |
+
**kwargs,
|
1246 |
+
):
|
1247 |
+
nv.validate_sum((), kwargs)
|
1248 |
+
|
1249 |
+
result = masked_reductions.sum(
|
1250 |
+
self._data,
|
1251 |
+
self._mask,
|
1252 |
+
skipna=skipna,
|
1253 |
+
min_count=min_count,
|
1254 |
+
axis=axis,
|
1255 |
+
)
|
1256 |
+
return self._wrap_min_count_reduction_result(
|
1257 |
+
"sum", result, skipna=skipna, min_count=min_count, axis=axis
|
1258 |
+
)
|
1259 |
+
|
1260 |
+
def prod(
|
1261 |
+
self,
|
1262 |
+
*,
|
1263 |
+
skipna: bool = True,
|
1264 |
+
min_count: int = 0,
|
1265 |
+
axis: AxisInt | None = 0,
|
1266 |
+
**kwargs,
|
1267 |
+
):
|
1268 |
+
nv.validate_prod((), kwargs)
|
1269 |
+
|
1270 |
+
result = masked_reductions.prod(
|
1271 |
+
self._data,
|
1272 |
+
self._mask,
|
1273 |
+
skipna=skipna,
|
1274 |
+
min_count=min_count,
|
1275 |
+
axis=axis,
|
1276 |
+
)
|
1277 |
+
return self._wrap_min_count_reduction_result(
|
1278 |
+
"prod", result, skipna=skipna, min_count=min_count, axis=axis
|
1279 |
+
)
|
1280 |
+
|
1281 |
+
def mean(self, *, skipna: bool = True, axis: AxisInt | None = 0, **kwargs):
|
1282 |
+
nv.validate_mean((), kwargs)
|
1283 |
+
result = masked_reductions.mean(
|
1284 |
+
self._data,
|
1285 |
+
self._mask,
|
1286 |
+
skipna=skipna,
|
1287 |
+
axis=axis,
|
1288 |
+
)
|
1289 |
+
return self._wrap_reduction_result("mean", result, skipna=skipna, axis=axis)
|
1290 |
+
|
1291 |
+
def var(
|
1292 |
+
self, *, skipna: bool = True, axis: AxisInt | None = 0, ddof: int = 1, **kwargs
|
1293 |
+
):
|
1294 |
+
nv.validate_stat_ddof_func((), kwargs, fname="var")
|
1295 |
+
result = masked_reductions.var(
|
1296 |
+
self._data,
|
1297 |
+
self._mask,
|
1298 |
+
skipna=skipna,
|
1299 |
+
axis=axis,
|
1300 |
+
ddof=ddof,
|
1301 |
+
)
|
1302 |
+
return self._wrap_reduction_result("var", result, skipna=skipna, axis=axis)
|
1303 |
+
|
1304 |
+
def std(
|
1305 |
+
self, *, skipna: bool = True, axis: AxisInt | None = 0, ddof: int = 1, **kwargs
|
1306 |
+
):
|
1307 |
+
nv.validate_stat_ddof_func((), kwargs, fname="std")
|
1308 |
+
result = masked_reductions.std(
|
1309 |
+
self._data,
|
1310 |
+
self._mask,
|
1311 |
+
skipna=skipna,
|
1312 |
+
axis=axis,
|
1313 |
+
ddof=ddof,
|
1314 |
+
)
|
1315 |
+
return self._wrap_reduction_result("std", result, skipna=skipna, axis=axis)
|
1316 |
+
|
1317 |
+
def min(self, *, skipna: bool = True, axis: AxisInt | None = 0, **kwargs):
|
1318 |
+
nv.validate_min((), kwargs)
|
1319 |
+
result = masked_reductions.min(
|
1320 |
+
self._data,
|
1321 |
+
self._mask,
|
1322 |
+
skipna=skipna,
|
1323 |
+
axis=axis,
|
1324 |
+
)
|
1325 |
+
return self._wrap_reduction_result("min", result, skipna=skipna, axis=axis)
|
1326 |
+
|
1327 |
+
def max(self, *, skipna: bool = True, axis: AxisInt | None = 0, **kwargs):
|
1328 |
+
nv.validate_max((), kwargs)
|
1329 |
+
result = masked_reductions.max(
|
1330 |
+
self._data,
|
1331 |
+
self._mask,
|
1332 |
+
skipna=skipna,
|
1333 |
+
axis=axis,
|
1334 |
+
)
|
1335 |
+
return self._wrap_reduction_result("max", result, skipna=skipna, axis=axis)
|
1336 |
+
|
1337 |
+
def map(self, mapper, na_action=None):
|
1338 |
+
return map_array(self.to_numpy(), mapper, na_action=na_action)
|
1339 |
+
|
1340 |
+
def any(self, *, skipna: bool = True, axis: AxisInt | None = 0, **kwargs):
|
1341 |
+
"""
|
1342 |
+
Return whether any element is truthy.
|
1343 |
+
|
1344 |
+
Returns False unless there is at least one element that is truthy.
|
1345 |
+
By default, NAs are skipped. If ``skipna=False`` is specified and
|
1346 |
+
missing values are present, similar :ref:`Kleene logic <boolean.kleene>`
|
1347 |
+
is used as for logical operations.
|
1348 |
+
|
1349 |
+
.. versionchanged:: 1.4.0
|
1350 |
+
|
1351 |
+
Parameters
|
1352 |
+
----------
|
1353 |
+
skipna : bool, default True
|
1354 |
+
Exclude NA values. If the entire array is NA and `skipna` is
|
1355 |
+
True, then the result will be False, as for an empty array.
|
1356 |
+
If `skipna` is False, the result will still be True if there is
|
1357 |
+
at least one element that is truthy, otherwise NA will be returned
|
1358 |
+
if there are NA's present.
|
1359 |
+
axis : int, optional, default 0
|
1360 |
+
**kwargs : any, default None
|
1361 |
+
Additional keywords have no effect but might be accepted for
|
1362 |
+
compatibility with NumPy.
|
1363 |
+
|
1364 |
+
Returns
|
1365 |
+
-------
|
1366 |
+
bool or :attr:`pandas.NA`
|
1367 |
+
|
1368 |
+
See Also
|
1369 |
+
--------
|
1370 |
+
numpy.any : Numpy version of this method.
|
1371 |
+
BaseMaskedArray.all : Return whether all elements are truthy.
|
1372 |
+
|
1373 |
+
Examples
|
1374 |
+
--------
|
1375 |
+
The result indicates whether any element is truthy (and by default
|
1376 |
+
skips NAs):
|
1377 |
+
|
1378 |
+
>>> pd.array([True, False, True]).any()
|
1379 |
+
True
|
1380 |
+
>>> pd.array([True, False, pd.NA]).any()
|
1381 |
+
True
|
1382 |
+
>>> pd.array([False, False, pd.NA]).any()
|
1383 |
+
False
|
1384 |
+
>>> pd.array([], dtype="boolean").any()
|
1385 |
+
False
|
1386 |
+
>>> pd.array([pd.NA], dtype="boolean").any()
|
1387 |
+
False
|
1388 |
+
>>> pd.array([pd.NA], dtype="Float64").any()
|
1389 |
+
False
|
1390 |
+
|
1391 |
+
With ``skipna=False``, the result can be NA if this is logically
|
1392 |
+
required (whether ``pd.NA`` is True or False influences the result):
|
1393 |
+
|
1394 |
+
>>> pd.array([True, False, pd.NA]).any(skipna=False)
|
1395 |
+
True
|
1396 |
+
>>> pd.array([1, 0, pd.NA]).any(skipna=False)
|
1397 |
+
True
|
1398 |
+
>>> pd.array([False, False, pd.NA]).any(skipna=False)
|
1399 |
+
<NA>
|
1400 |
+
>>> pd.array([0, 0, pd.NA]).any(skipna=False)
|
1401 |
+
<NA>
|
1402 |
+
"""
|
1403 |
+
nv.validate_any((), kwargs)
|
1404 |
+
|
1405 |
+
values = self._data.copy()
|
1406 |
+
# error: Argument 3 to "putmask" has incompatible type "object";
|
1407 |
+
# expected "Union[_SupportsArray[dtype[Any]],
|
1408 |
+
# _NestedSequence[_SupportsArray[dtype[Any]]],
|
1409 |
+
# bool, int, float, complex, str, bytes,
|
1410 |
+
# _NestedSequence[Union[bool, int, float, complex, str, bytes]]]"
|
1411 |
+
np.putmask(values, self._mask, self._falsey_value) # type: ignore[arg-type]
|
1412 |
+
result = values.any()
|
1413 |
+
if skipna:
|
1414 |
+
return result
|
1415 |
+
else:
|
1416 |
+
if result or len(self) == 0 or not self._mask.any():
|
1417 |
+
return result
|
1418 |
+
else:
|
1419 |
+
return self.dtype.na_value
|
1420 |
+
|
1421 |
+
def all(self, *, skipna: bool = True, axis: AxisInt | None = 0, **kwargs):
|
1422 |
+
"""
|
1423 |
+
Return whether all elements are truthy.
|
1424 |
+
|
1425 |
+
Returns True unless there is at least one element that is falsey.
|
1426 |
+
By default, NAs are skipped. If ``skipna=False`` is specified and
|
1427 |
+
missing values are present, similar :ref:`Kleene logic <boolean.kleene>`
|
1428 |
+
is used as for logical operations.
|
1429 |
+
|
1430 |
+
.. versionchanged:: 1.4.0
|
1431 |
+
|
1432 |
+
Parameters
|
1433 |
+
----------
|
1434 |
+
skipna : bool, default True
|
1435 |
+
Exclude NA values. If the entire array is NA and `skipna` is
|
1436 |
+
True, then the result will be True, as for an empty array.
|
1437 |
+
If `skipna` is False, the result will still be False if there is
|
1438 |
+
at least one element that is falsey, otherwise NA will be returned
|
1439 |
+
if there are NA's present.
|
1440 |
+
axis : int, optional, default 0
|
1441 |
+
**kwargs : any, default None
|
1442 |
+
Additional keywords have no effect but might be accepted for
|
1443 |
+
compatibility with NumPy.
|
1444 |
+
|
1445 |
+
Returns
|
1446 |
+
-------
|
1447 |
+
bool or :attr:`pandas.NA`
|
1448 |
+
|
1449 |
+
See Also
|
1450 |
+
--------
|
1451 |
+
numpy.all : Numpy version of this method.
|
1452 |
+
BooleanArray.any : Return whether any element is truthy.
|
1453 |
+
|
1454 |
+
Examples
|
1455 |
+
--------
|
1456 |
+
The result indicates whether all elements are truthy (and by default
|
1457 |
+
skips NAs):
|
1458 |
+
|
1459 |
+
>>> pd.array([True, True, pd.NA]).all()
|
1460 |
+
True
|
1461 |
+
>>> pd.array([1, 1, pd.NA]).all()
|
1462 |
+
True
|
1463 |
+
>>> pd.array([True, False, pd.NA]).all()
|
1464 |
+
False
|
1465 |
+
>>> pd.array([], dtype="boolean").all()
|
1466 |
+
True
|
1467 |
+
>>> pd.array([pd.NA], dtype="boolean").all()
|
1468 |
+
True
|
1469 |
+
>>> pd.array([pd.NA], dtype="Float64").all()
|
1470 |
+
True
|
1471 |
+
|
1472 |
+
With ``skipna=False``, the result can be NA if this is logically
|
1473 |
+
required (whether ``pd.NA`` is True or False influences the result):
|
1474 |
+
|
1475 |
+
>>> pd.array([True, True, pd.NA]).all(skipna=False)
|
1476 |
+
<NA>
|
1477 |
+
>>> pd.array([1, 1, pd.NA]).all(skipna=False)
|
1478 |
+
<NA>
|
1479 |
+
>>> pd.array([True, False, pd.NA]).all(skipna=False)
|
1480 |
+
False
|
1481 |
+
>>> pd.array([1, 0, pd.NA]).all(skipna=False)
|
1482 |
+
False
|
1483 |
+
"""
|
1484 |
+
nv.validate_all((), kwargs)
|
1485 |
+
|
1486 |
+
values = self._data.copy()
|
1487 |
+
# error: Argument 3 to "putmask" has incompatible type "object";
|
1488 |
+
# expected "Union[_SupportsArray[dtype[Any]],
|
1489 |
+
# _NestedSequence[_SupportsArray[dtype[Any]]],
|
1490 |
+
# bool, int, float, complex, str, bytes,
|
1491 |
+
# _NestedSequence[Union[bool, int, float, complex, str, bytes]]]"
|
1492 |
+
np.putmask(values, self._mask, self._truthy_value) # type: ignore[arg-type]
|
1493 |
+
result = values.all(axis=axis)
|
1494 |
+
|
1495 |
+
if skipna:
|
1496 |
+
return result
|
1497 |
+
else:
|
1498 |
+
if not result or len(self) == 0 or not self._mask.any():
|
1499 |
+
return result
|
1500 |
+
else:
|
1501 |
+
return self.dtype.na_value
|
1502 |
+
|
1503 |
+
def interpolate(
|
1504 |
+
self,
|
1505 |
+
*,
|
1506 |
+
method: InterpolateOptions,
|
1507 |
+
axis: int,
|
1508 |
+
index,
|
1509 |
+
limit,
|
1510 |
+
limit_direction,
|
1511 |
+
limit_area,
|
1512 |
+
copy: bool,
|
1513 |
+
**kwargs,
|
1514 |
+
) -> FloatingArray:
|
1515 |
+
"""
|
1516 |
+
See NDFrame.interpolate.__doc__.
|
1517 |
+
"""
|
1518 |
+
# NB: we return type(self) even if copy=False
|
1519 |
+
if self.dtype.kind == "f":
|
1520 |
+
if copy:
|
1521 |
+
data = self._data.copy()
|
1522 |
+
mask = self._mask.copy()
|
1523 |
+
else:
|
1524 |
+
data = self._data
|
1525 |
+
mask = self._mask
|
1526 |
+
elif self.dtype.kind in "iu":
|
1527 |
+
copy = True
|
1528 |
+
data = self._data.astype("f8")
|
1529 |
+
mask = self._mask.copy()
|
1530 |
+
else:
|
1531 |
+
raise NotImplementedError(
|
1532 |
+
f"interpolate is not implemented for dtype={self.dtype}"
|
1533 |
+
)
|
1534 |
+
|
1535 |
+
missing.interpolate_2d_inplace(
|
1536 |
+
data,
|
1537 |
+
method=method,
|
1538 |
+
axis=0,
|
1539 |
+
index=index,
|
1540 |
+
limit=limit,
|
1541 |
+
limit_direction=limit_direction,
|
1542 |
+
limit_area=limit_area,
|
1543 |
+
mask=mask,
|
1544 |
+
**kwargs,
|
1545 |
+
)
|
1546 |
+
if not copy:
|
1547 |
+
return self # type: ignore[return-value]
|
1548 |
+
if self.dtype.kind == "f":
|
1549 |
+
return type(self)._simple_new(data, mask) # type: ignore[return-value]
|
1550 |
+
else:
|
1551 |
+
from pandas.core.arrays import FloatingArray
|
1552 |
+
|
1553 |
+
return FloatingArray._simple_new(data, mask)
|
1554 |
+
|
1555 |
+
def _accumulate(
|
1556 |
+
self, name: str, *, skipna: bool = True, **kwargs
|
1557 |
+
) -> BaseMaskedArray:
|
1558 |
+
data = self._data
|
1559 |
+
mask = self._mask
|
1560 |
+
|
1561 |
+
op = getattr(masked_accumulations, name)
|
1562 |
+
data, mask = op(data, mask, skipna=skipna, **kwargs)
|
1563 |
+
|
1564 |
+
return self._simple_new(data, mask)
|
1565 |
+
|
1566 |
+
# ------------------------------------------------------------------
|
1567 |
+
# GroupBy Methods
|
1568 |
+
|
1569 |
+
def _groupby_op(
|
1570 |
+
self,
|
1571 |
+
*,
|
1572 |
+
how: str,
|
1573 |
+
has_dropped_na: bool,
|
1574 |
+
min_count: int,
|
1575 |
+
ngroups: int,
|
1576 |
+
ids: npt.NDArray[np.intp],
|
1577 |
+
**kwargs,
|
1578 |
+
):
|
1579 |
+
from pandas.core.groupby.ops import WrappedCythonOp
|
1580 |
+
|
1581 |
+
kind = WrappedCythonOp.get_kind_from_how(how)
|
1582 |
+
op = WrappedCythonOp(how=how, kind=kind, has_dropped_na=has_dropped_na)
|
1583 |
+
|
1584 |
+
# libgroupby functions are responsible for NOT altering mask
|
1585 |
+
mask = self._mask
|
1586 |
+
if op.kind != "aggregate":
|
1587 |
+
result_mask = mask.copy()
|
1588 |
+
else:
|
1589 |
+
result_mask = np.zeros(ngroups, dtype=bool)
|
1590 |
+
|
1591 |
+
if how == "rank" and kwargs.get("na_option") in ["top", "bottom"]:
|
1592 |
+
result_mask[:] = False
|
1593 |
+
|
1594 |
+
res_values = op._cython_op_ndim_compat(
|
1595 |
+
self._data,
|
1596 |
+
min_count=min_count,
|
1597 |
+
ngroups=ngroups,
|
1598 |
+
comp_ids=ids,
|
1599 |
+
mask=mask,
|
1600 |
+
result_mask=result_mask,
|
1601 |
+
**kwargs,
|
1602 |
+
)
|
1603 |
+
|
1604 |
+
if op.how == "ohlc":
|
1605 |
+
arity = op._cython_arity.get(op.how, 1)
|
1606 |
+
result_mask = np.tile(result_mask, (arity, 1)).T
|
1607 |
+
|
1608 |
+
if op.how in ["idxmin", "idxmax"]:
|
1609 |
+
# Result values are indexes to take, keep as ndarray
|
1610 |
+
return res_values
|
1611 |
+
else:
|
1612 |
+
# res_values should already have the correct dtype, we just need to
|
1613 |
+
# wrap in a MaskedArray
|
1614 |
+
return self._maybe_mask_result(res_values, result_mask)
|
1615 |
+
|
1616 |
+
|
1617 |
+
def transpose_homogeneous_masked_arrays(
|
1618 |
+
masked_arrays: Sequence[BaseMaskedArray],
|
1619 |
+
) -> list[BaseMaskedArray]:
|
1620 |
+
"""Transpose masked arrays in a list, but faster.
|
1621 |
+
|
1622 |
+
Input should be a list of 1-dim masked arrays of equal length and all have the
|
1623 |
+
same dtype. The caller is responsible for ensuring validity of input data.
|
1624 |
+
"""
|
1625 |
+
masked_arrays = list(masked_arrays)
|
1626 |
+
dtype = masked_arrays[0].dtype
|
1627 |
+
|
1628 |
+
values = [arr._data.reshape(1, -1) for arr in masked_arrays]
|
1629 |
+
transposed_values = np.concatenate(
|
1630 |
+
values,
|
1631 |
+
axis=0,
|
1632 |
+
out=np.empty(
|
1633 |
+
(len(masked_arrays), len(masked_arrays[0])),
|
1634 |
+
order="F",
|
1635 |
+
dtype=dtype.numpy_dtype,
|
1636 |
+
),
|
1637 |
+
)
|
1638 |
+
|
1639 |
+
masks = [arr._mask.reshape(1, -1) for arr in masked_arrays]
|
1640 |
+
transposed_masks = np.concatenate(
|
1641 |
+
masks, axis=0, out=np.empty_like(transposed_values, dtype=bool)
|
1642 |
+
)
|
1643 |
+
|
1644 |
+
arr_type = dtype.construct_array_type()
|
1645 |
+
transposed_arrays: list[BaseMaskedArray] = []
|
1646 |
+
for i in range(transposed_values.shape[1]):
|
1647 |
+
transposed_arr = arr_type(transposed_values[:, i], mask=transposed_masks[:, i])
|
1648 |
+
transposed_arrays.append(transposed_arr)
|
1649 |
+
|
1650 |
+
return transposed_arrays
|
env-llmeval/lib/python3.10/site-packages/pandas/core/arrays/numeric.py
ADDED
@@ -0,0 +1,286 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from __future__ import annotations
|
2 |
+
|
3 |
+
import numbers
|
4 |
+
from typing import (
|
5 |
+
TYPE_CHECKING,
|
6 |
+
Any,
|
7 |
+
Callable,
|
8 |
+
)
|
9 |
+
|
10 |
+
import numpy as np
|
11 |
+
|
12 |
+
from pandas._libs import (
|
13 |
+
lib,
|
14 |
+
missing as libmissing,
|
15 |
+
)
|
16 |
+
from pandas.errors import AbstractMethodError
|
17 |
+
from pandas.util._decorators import cache_readonly
|
18 |
+
|
19 |
+
from pandas.core.dtypes.common import (
|
20 |
+
is_integer_dtype,
|
21 |
+
is_string_dtype,
|
22 |
+
pandas_dtype,
|
23 |
+
)
|
24 |
+
|
25 |
+
from pandas.core.arrays.masked import (
|
26 |
+
BaseMaskedArray,
|
27 |
+
BaseMaskedDtype,
|
28 |
+
)
|
29 |
+
|
30 |
+
if TYPE_CHECKING:
|
31 |
+
from collections.abc import Mapping
|
32 |
+
|
33 |
+
import pyarrow
|
34 |
+
|
35 |
+
from pandas._typing import (
|
36 |
+
Dtype,
|
37 |
+
DtypeObj,
|
38 |
+
Self,
|
39 |
+
npt,
|
40 |
+
)
|
41 |
+
|
42 |
+
|
43 |
+
class NumericDtype(BaseMaskedDtype):
|
44 |
+
_default_np_dtype: np.dtype
|
45 |
+
_checker: Callable[[Any], bool] # is_foo_dtype
|
46 |
+
|
47 |
+
def __repr__(self) -> str:
|
48 |
+
return f"{self.name}Dtype()"
|
49 |
+
|
50 |
+
@cache_readonly
|
51 |
+
def is_signed_integer(self) -> bool:
|
52 |
+
return self.kind == "i"
|
53 |
+
|
54 |
+
@cache_readonly
|
55 |
+
def is_unsigned_integer(self) -> bool:
|
56 |
+
return self.kind == "u"
|
57 |
+
|
58 |
+
@property
|
59 |
+
def _is_numeric(self) -> bool:
|
60 |
+
return True
|
61 |
+
|
62 |
+
def __from_arrow__(
|
63 |
+
self, array: pyarrow.Array | pyarrow.ChunkedArray
|
64 |
+
) -> BaseMaskedArray:
|
65 |
+
"""
|
66 |
+
Construct IntegerArray/FloatingArray from pyarrow Array/ChunkedArray.
|
67 |
+
"""
|
68 |
+
import pyarrow
|
69 |
+
|
70 |
+
from pandas.core.arrays.arrow._arrow_utils import (
|
71 |
+
pyarrow_array_to_numpy_and_mask,
|
72 |
+
)
|
73 |
+
|
74 |
+
array_class = self.construct_array_type()
|
75 |
+
|
76 |
+
pyarrow_type = pyarrow.from_numpy_dtype(self.type)
|
77 |
+
if not array.type.equals(pyarrow_type) and not pyarrow.types.is_null(
|
78 |
+
array.type
|
79 |
+
):
|
80 |
+
# test_from_arrow_type_error raise for string, but allow
|
81 |
+
# through itemsize conversion GH#31896
|
82 |
+
rt_dtype = pandas_dtype(array.type.to_pandas_dtype())
|
83 |
+
if rt_dtype.kind not in "iuf":
|
84 |
+
# Could allow "c" or potentially disallow float<->int conversion,
|
85 |
+
# but at the moment we specifically test that uint<->int works
|
86 |
+
raise TypeError(
|
87 |
+
f"Expected array of {self} type, got {array.type} instead"
|
88 |
+
)
|
89 |
+
|
90 |
+
array = array.cast(pyarrow_type)
|
91 |
+
|
92 |
+
if isinstance(array, pyarrow.ChunkedArray):
|
93 |
+
# TODO this "if" can be removed when requiring pyarrow >= 10.0, which fixed
|
94 |
+
# combine_chunks for empty arrays https://github.com/apache/arrow/pull/13757
|
95 |
+
if array.num_chunks == 0:
|
96 |
+
array = pyarrow.array([], type=array.type)
|
97 |
+
else:
|
98 |
+
array = array.combine_chunks()
|
99 |
+
|
100 |
+
data, mask = pyarrow_array_to_numpy_and_mask(array, dtype=self.numpy_dtype)
|
101 |
+
return array_class(data.copy(), ~mask, copy=False)
|
102 |
+
|
103 |
+
@classmethod
|
104 |
+
def _get_dtype_mapping(cls) -> Mapping[np.dtype, NumericDtype]:
|
105 |
+
raise AbstractMethodError(cls)
|
106 |
+
|
107 |
+
@classmethod
|
108 |
+
def _standardize_dtype(cls, dtype: NumericDtype | str | np.dtype) -> NumericDtype:
|
109 |
+
"""
|
110 |
+
Convert a string representation or a numpy dtype to NumericDtype.
|
111 |
+
"""
|
112 |
+
if isinstance(dtype, str) and (dtype.startswith(("Int", "UInt", "Float"))):
|
113 |
+
# Avoid DeprecationWarning from NumPy about np.dtype("Int64")
|
114 |
+
# https://github.com/numpy/numpy/pull/7476
|
115 |
+
dtype = dtype.lower()
|
116 |
+
|
117 |
+
if not isinstance(dtype, NumericDtype):
|
118 |
+
mapping = cls._get_dtype_mapping()
|
119 |
+
try:
|
120 |
+
dtype = mapping[np.dtype(dtype)]
|
121 |
+
except KeyError as err:
|
122 |
+
raise ValueError(f"invalid dtype specified {dtype}") from err
|
123 |
+
return dtype
|
124 |
+
|
125 |
+
@classmethod
|
126 |
+
def _safe_cast(cls, values: np.ndarray, dtype: np.dtype, copy: bool) -> np.ndarray:
|
127 |
+
"""
|
128 |
+
Safely cast the values to the given dtype.
|
129 |
+
|
130 |
+
"safe" in this context means the casting is lossless.
|
131 |
+
"""
|
132 |
+
raise AbstractMethodError(cls)
|
133 |
+
|
134 |
+
|
135 |
+
def _coerce_to_data_and_mask(
|
136 |
+
values, dtype, copy: bool, dtype_cls: type[NumericDtype], default_dtype: np.dtype
|
137 |
+
):
|
138 |
+
checker = dtype_cls._checker
|
139 |
+
|
140 |
+
mask = None
|
141 |
+
inferred_type = None
|
142 |
+
|
143 |
+
if dtype is None and hasattr(values, "dtype"):
|
144 |
+
if checker(values.dtype):
|
145 |
+
dtype = values.dtype
|
146 |
+
|
147 |
+
if dtype is not None:
|
148 |
+
dtype = dtype_cls._standardize_dtype(dtype)
|
149 |
+
|
150 |
+
cls = dtype_cls.construct_array_type()
|
151 |
+
if isinstance(values, cls):
|
152 |
+
values, mask = values._data, values._mask
|
153 |
+
if dtype is not None:
|
154 |
+
values = values.astype(dtype.numpy_dtype, copy=False)
|
155 |
+
|
156 |
+
if copy:
|
157 |
+
values = values.copy()
|
158 |
+
mask = mask.copy()
|
159 |
+
return values, mask, dtype, inferred_type
|
160 |
+
|
161 |
+
original = values
|
162 |
+
if not copy:
|
163 |
+
values = np.asarray(values)
|
164 |
+
else:
|
165 |
+
values = np.array(values, copy=copy)
|
166 |
+
inferred_type = None
|
167 |
+
if values.dtype == object or is_string_dtype(values.dtype):
|
168 |
+
inferred_type = lib.infer_dtype(values, skipna=True)
|
169 |
+
if inferred_type == "boolean" and dtype is None:
|
170 |
+
name = dtype_cls.__name__.strip("_")
|
171 |
+
raise TypeError(f"{values.dtype} cannot be converted to {name}")
|
172 |
+
|
173 |
+
elif values.dtype.kind == "b" and checker(dtype):
|
174 |
+
if not copy:
|
175 |
+
values = np.asarray(values, dtype=default_dtype)
|
176 |
+
else:
|
177 |
+
values = np.array(values, dtype=default_dtype, copy=copy)
|
178 |
+
|
179 |
+
elif values.dtype.kind not in "iuf":
|
180 |
+
name = dtype_cls.__name__.strip("_")
|
181 |
+
raise TypeError(f"{values.dtype} cannot be converted to {name}")
|
182 |
+
|
183 |
+
if values.ndim != 1:
|
184 |
+
raise TypeError("values must be a 1D list-like")
|
185 |
+
|
186 |
+
if mask is None:
|
187 |
+
if values.dtype.kind in "iu":
|
188 |
+
# fastpath
|
189 |
+
mask = np.zeros(len(values), dtype=np.bool_)
|
190 |
+
else:
|
191 |
+
mask = libmissing.is_numeric_na(values)
|
192 |
+
else:
|
193 |
+
assert len(mask) == len(values)
|
194 |
+
|
195 |
+
if mask.ndim != 1:
|
196 |
+
raise TypeError("mask must be a 1D list-like")
|
197 |
+
|
198 |
+
# infer dtype if needed
|
199 |
+
if dtype is None:
|
200 |
+
dtype = default_dtype
|
201 |
+
else:
|
202 |
+
dtype = dtype.numpy_dtype
|
203 |
+
|
204 |
+
if is_integer_dtype(dtype) and values.dtype.kind == "f" and len(values) > 0:
|
205 |
+
if mask.all():
|
206 |
+
values = np.ones(values.shape, dtype=dtype)
|
207 |
+
else:
|
208 |
+
idx = np.nanargmax(values)
|
209 |
+
if int(values[idx]) != original[idx]:
|
210 |
+
# We have ints that lost precision during the cast.
|
211 |
+
inferred_type = lib.infer_dtype(original, skipna=True)
|
212 |
+
if (
|
213 |
+
inferred_type not in ["floating", "mixed-integer-float"]
|
214 |
+
and not mask.any()
|
215 |
+
):
|
216 |
+
values = np.asarray(original, dtype=dtype)
|
217 |
+
else:
|
218 |
+
values = np.asarray(original, dtype="object")
|
219 |
+
|
220 |
+
# we copy as need to coerce here
|
221 |
+
if mask.any():
|
222 |
+
values = values.copy()
|
223 |
+
values[mask] = cls._internal_fill_value
|
224 |
+
if inferred_type in ("string", "unicode"):
|
225 |
+
# casts from str are always safe since they raise
|
226 |
+
# a ValueError if the str cannot be parsed into a float
|
227 |
+
values = values.astype(dtype, copy=copy)
|
228 |
+
else:
|
229 |
+
values = dtype_cls._safe_cast(values, dtype, copy=False)
|
230 |
+
|
231 |
+
return values, mask, dtype, inferred_type
|
232 |
+
|
233 |
+
|
234 |
+
class NumericArray(BaseMaskedArray):
|
235 |
+
"""
|
236 |
+
Base class for IntegerArray and FloatingArray.
|
237 |
+
"""
|
238 |
+
|
239 |
+
_dtype_cls: type[NumericDtype]
|
240 |
+
|
241 |
+
def __init__(
|
242 |
+
self, values: np.ndarray, mask: npt.NDArray[np.bool_], copy: bool = False
|
243 |
+
) -> None:
|
244 |
+
checker = self._dtype_cls._checker
|
245 |
+
if not (isinstance(values, np.ndarray) and checker(values.dtype)):
|
246 |
+
descr = (
|
247 |
+
"floating"
|
248 |
+
if self._dtype_cls.kind == "f" # type: ignore[comparison-overlap]
|
249 |
+
else "integer"
|
250 |
+
)
|
251 |
+
raise TypeError(
|
252 |
+
f"values should be {descr} numpy array. Use "
|
253 |
+
"the 'pd.array' function instead"
|
254 |
+
)
|
255 |
+
if values.dtype == np.float16:
|
256 |
+
# If we don't raise here, then accessing self.dtype would raise
|
257 |
+
raise TypeError("FloatingArray does not support np.float16 dtype.")
|
258 |
+
|
259 |
+
super().__init__(values, mask, copy=copy)
|
260 |
+
|
261 |
+
@cache_readonly
|
262 |
+
def dtype(self) -> NumericDtype:
|
263 |
+
mapping = self._dtype_cls._get_dtype_mapping()
|
264 |
+
return mapping[self._data.dtype]
|
265 |
+
|
266 |
+
@classmethod
|
267 |
+
def _coerce_to_array(
|
268 |
+
cls, value, *, dtype: DtypeObj, copy: bool = False
|
269 |
+
) -> tuple[np.ndarray, np.ndarray]:
|
270 |
+
dtype_cls = cls._dtype_cls
|
271 |
+
default_dtype = dtype_cls._default_np_dtype
|
272 |
+
values, mask, _, _ = _coerce_to_data_and_mask(
|
273 |
+
value, dtype, copy, dtype_cls, default_dtype
|
274 |
+
)
|
275 |
+
return values, mask
|
276 |
+
|
277 |
+
@classmethod
|
278 |
+
def _from_sequence_of_strings(
|
279 |
+
cls, strings, *, dtype: Dtype | None = None, copy: bool = False
|
280 |
+
) -> Self:
|
281 |
+
from pandas.core.tools.numeric import to_numeric
|
282 |
+
|
283 |
+
scalars = to_numeric(strings, errors="raise", dtype_backend="numpy_nullable")
|
284 |
+
return cls._from_sequence(scalars, dtype=dtype, copy=copy)
|
285 |
+
|
286 |
+
_HANDLED_TYPES = (np.ndarray, numbers.Number)
|
env-llmeval/lib/python3.10/site-packages/pandas/core/arrays/numpy_.py
ADDED
@@ -0,0 +1,563 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from __future__ import annotations
|
2 |
+
|
3 |
+
from typing import (
|
4 |
+
TYPE_CHECKING,
|
5 |
+
Literal,
|
6 |
+
)
|
7 |
+
|
8 |
+
import numpy as np
|
9 |
+
|
10 |
+
from pandas._libs import lib
|
11 |
+
from pandas._libs.tslibs import is_supported_dtype
|
12 |
+
from pandas.compat.numpy import function as nv
|
13 |
+
|
14 |
+
from pandas.core.dtypes.astype import astype_array
|
15 |
+
from pandas.core.dtypes.cast import construct_1d_object_array_from_listlike
|
16 |
+
from pandas.core.dtypes.common import pandas_dtype
|
17 |
+
from pandas.core.dtypes.dtypes import NumpyEADtype
|
18 |
+
from pandas.core.dtypes.missing import isna
|
19 |
+
|
20 |
+
from pandas.core import (
|
21 |
+
arraylike,
|
22 |
+
missing,
|
23 |
+
nanops,
|
24 |
+
ops,
|
25 |
+
)
|
26 |
+
from pandas.core.arraylike import OpsMixin
|
27 |
+
from pandas.core.arrays._mixins import NDArrayBackedExtensionArray
|
28 |
+
from pandas.core.construction import ensure_wrapped_if_datetimelike
|
29 |
+
from pandas.core.strings.object_array import ObjectStringArrayMixin
|
30 |
+
|
31 |
+
if TYPE_CHECKING:
|
32 |
+
from pandas._typing import (
|
33 |
+
AxisInt,
|
34 |
+
Dtype,
|
35 |
+
FillnaOptions,
|
36 |
+
InterpolateOptions,
|
37 |
+
NpDtype,
|
38 |
+
Scalar,
|
39 |
+
Self,
|
40 |
+
npt,
|
41 |
+
)
|
42 |
+
|
43 |
+
from pandas import Index
|
44 |
+
|
45 |
+
|
46 |
+
# error: Definition of "_concat_same_type" in base class "NDArrayBacked" is
|
47 |
+
# incompatible with definition in base class "ExtensionArray"
|
48 |
+
class NumpyExtensionArray( # type: ignore[misc]
|
49 |
+
OpsMixin,
|
50 |
+
NDArrayBackedExtensionArray,
|
51 |
+
ObjectStringArrayMixin,
|
52 |
+
):
|
53 |
+
"""
|
54 |
+
A pandas ExtensionArray for NumPy data.
|
55 |
+
|
56 |
+
This is mostly for internal compatibility, and is not especially
|
57 |
+
useful on its own.
|
58 |
+
|
59 |
+
Parameters
|
60 |
+
----------
|
61 |
+
values : ndarray
|
62 |
+
The NumPy ndarray to wrap. Must be 1-dimensional.
|
63 |
+
copy : bool, default False
|
64 |
+
Whether to copy `values`.
|
65 |
+
|
66 |
+
Attributes
|
67 |
+
----------
|
68 |
+
None
|
69 |
+
|
70 |
+
Methods
|
71 |
+
-------
|
72 |
+
None
|
73 |
+
|
74 |
+
Examples
|
75 |
+
--------
|
76 |
+
>>> pd.arrays.NumpyExtensionArray(np.array([0, 1, 2, 3]))
|
77 |
+
<NumpyExtensionArray>
|
78 |
+
[0, 1, 2, 3]
|
79 |
+
Length: 4, dtype: int64
|
80 |
+
"""
|
81 |
+
|
82 |
+
# If you're wondering why pd.Series(cls) doesn't put the array in an
|
83 |
+
# ExtensionBlock, search for `ABCNumpyExtensionArray`. We check for
|
84 |
+
# that _typ to ensure that users don't unnecessarily use EAs inside
|
85 |
+
# pandas internals, which turns off things like block consolidation.
|
86 |
+
_typ = "npy_extension"
|
87 |
+
__array_priority__ = 1000
|
88 |
+
_ndarray: np.ndarray
|
89 |
+
_dtype: NumpyEADtype
|
90 |
+
_internal_fill_value = np.nan
|
91 |
+
|
92 |
+
# ------------------------------------------------------------------------
|
93 |
+
# Constructors
|
94 |
+
|
95 |
+
def __init__(
|
96 |
+
self, values: np.ndarray | NumpyExtensionArray, copy: bool = False
|
97 |
+
) -> None:
|
98 |
+
if isinstance(values, type(self)):
|
99 |
+
values = values._ndarray
|
100 |
+
if not isinstance(values, np.ndarray):
|
101 |
+
raise ValueError(
|
102 |
+
f"'values' must be a NumPy array, not {type(values).__name__}"
|
103 |
+
)
|
104 |
+
|
105 |
+
if values.ndim == 0:
|
106 |
+
# Technically we support 2, but do not advertise that fact.
|
107 |
+
raise ValueError("NumpyExtensionArray must be 1-dimensional.")
|
108 |
+
|
109 |
+
if copy:
|
110 |
+
values = values.copy()
|
111 |
+
|
112 |
+
dtype = NumpyEADtype(values.dtype)
|
113 |
+
super().__init__(values, dtype)
|
114 |
+
|
115 |
+
@classmethod
|
116 |
+
def _from_sequence(
|
117 |
+
cls, scalars, *, dtype: Dtype | None = None, copy: bool = False
|
118 |
+
) -> NumpyExtensionArray:
|
119 |
+
if isinstance(dtype, NumpyEADtype):
|
120 |
+
dtype = dtype._dtype
|
121 |
+
|
122 |
+
# error: Argument "dtype" to "asarray" has incompatible type
|
123 |
+
# "Union[ExtensionDtype, str, dtype[Any], dtype[floating[_64Bit]], Type[object],
|
124 |
+
# None]"; expected "Union[dtype[Any], None, type, _SupportsDType, str,
|
125 |
+
# Union[Tuple[Any, int], Tuple[Any, Union[int, Sequence[int]]], List[Any],
|
126 |
+
# _DTypeDict, Tuple[Any, Any]]]"
|
127 |
+
result = np.asarray(scalars, dtype=dtype) # type: ignore[arg-type]
|
128 |
+
if (
|
129 |
+
result.ndim > 1
|
130 |
+
and not hasattr(scalars, "dtype")
|
131 |
+
and (dtype is None or dtype == object)
|
132 |
+
):
|
133 |
+
# e.g. list-of-tuples
|
134 |
+
result = construct_1d_object_array_from_listlike(scalars)
|
135 |
+
|
136 |
+
if copy and result is scalars:
|
137 |
+
result = result.copy()
|
138 |
+
return cls(result)
|
139 |
+
|
140 |
+
def _from_backing_data(self, arr: np.ndarray) -> NumpyExtensionArray:
|
141 |
+
return type(self)(arr)
|
142 |
+
|
143 |
+
# ------------------------------------------------------------------------
|
144 |
+
# Data
|
145 |
+
|
146 |
+
@property
|
147 |
+
def dtype(self) -> NumpyEADtype:
|
148 |
+
return self._dtype
|
149 |
+
|
150 |
+
# ------------------------------------------------------------------------
|
151 |
+
# NumPy Array Interface
|
152 |
+
|
153 |
+
def __array__(
|
154 |
+
self, dtype: NpDtype | None = None, copy: bool | None = None
|
155 |
+
) -> np.ndarray:
|
156 |
+
return np.asarray(self._ndarray, dtype=dtype)
|
157 |
+
|
158 |
+
def __array_ufunc__(self, ufunc: np.ufunc, method: str, *inputs, **kwargs):
|
159 |
+
# Lightly modified version of
|
160 |
+
# https://numpy.org/doc/stable/reference/generated/numpy.lib.mixins.NDArrayOperatorsMixin.html
|
161 |
+
# The primary modification is not boxing scalar return values
|
162 |
+
# in NumpyExtensionArray, since pandas' ExtensionArrays are 1-d.
|
163 |
+
out = kwargs.get("out", ())
|
164 |
+
|
165 |
+
result = arraylike.maybe_dispatch_ufunc_to_dunder_op(
|
166 |
+
self, ufunc, method, *inputs, **kwargs
|
167 |
+
)
|
168 |
+
if result is not NotImplemented:
|
169 |
+
return result
|
170 |
+
|
171 |
+
if "out" in kwargs:
|
172 |
+
# e.g. test_ufunc_unary
|
173 |
+
return arraylike.dispatch_ufunc_with_out(
|
174 |
+
self, ufunc, method, *inputs, **kwargs
|
175 |
+
)
|
176 |
+
|
177 |
+
if method == "reduce":
|
178 |
+
result = arraylike.dispatch_reduction_ufunc(
|
179 |
+
self, ufunc, method, *inputs, **kwargs
|
180 |
+
)
|
181 |
+
if result is not NotImplemented:
|
182 |
+
# e.g. tests.series.test_ufunc.TestNumpyReductions
|
183 |
+
return result
|
184 |
+
|
185 |
+
# Defer to the implementation of the ufunc on unwrapped values.
|
186 |
+
inputs = tuple(
|
187 |
+
x._ndarray if isinstance(x, NumpyExtensionArray) else x for x in inputs
|
188 |
+
)
|
189 |
+
if out:
|
190 |
+
kwargs["out"] = tuple(
|
191 |
+
x._ndarray if isinstance(x, NumpyExtensionArray) else x for x in out
|
192 |
+
)
|
193 |
+
result = getattr(ufunc, method)(*inputs, **kwargs)
|
194 |
+
|
195 |
+
if ufunc.nout > 1:
|
196 |
+
# multiple return values; re-box array-like results
|
197 |
+
return tuple(type(self)(x) for x in result)
|
198 |
+
elif method == "at":
|
199 |
+
# no return value
|
200 |
+
return None
|
201 |
+
elif method == "reduce":
|
202 |
+
if isinstance(result, np.ndarray):
|
203 |
+
# e.g. test_np_reduce_2d
|
204 |
+
return type(self)(result)
|
205 |
+
|
206 |
+
# e.g. test_np_max_nested_tuples
|
207 |
+
return result
|
208 |
+
else:
|
209 |
+
# one return value; re-box array-like results
|
210 |
+
return type(self)(result)
|
211 |
+
|
212 |
+
# ------------------------------------------------------------------------
|
213 |
+
# Pandas ExtensionArray Interface
|
214 |
+
|
215 |
+
def astype(self, dtype, copy: bool = True):
|
216 |
+
dtype = pandas_dtype(dtype)
|
217 |
+
|
218 |
+
if dtype == self.dtype:
|
219 |
+
if copy:
|
220 |
+
return self.copy()
|
221 |
+
return self
|
222 |
+
|
223 |
+
result = astype_array(self._ndarray, dtype=dtype, copy=copy)
|
224 |
+
return result
|
225 |
+
|
226 |
+
def isna(self) -> np.ndarray:
|
227 |
+
return isna(self._ndarray)
|
228 |
+
|
229 |
+
def _validate_scalar(self, fill_value):
|
230 |
+
if fill_value is None:
|
231 |
+
# Primarily for subclasses
|
232 |
+
fill_value = self.dtype.na_value
|
233 |
+
return fill_value
|
234 |
+
|
235 |
+
def _values_for_factorize(self) -> tuple[np.ndarray, float | None]:
|
236 |
+
if self.dtype.kind in "iub":
|
237 |
+
fv = None
|
238 |
+
else:
|
239 |
+
fv = np.nan
|
240 |
+
return self._ndarray, fv
|
241 |
+
|
242 |
+
# Base EA class (and all other EA classes) don't have limit_area keyword
|
243 |
+
# This can be removed here as well when the interpolate ffill/bfill method
|
244 |
+
# deprecation is enforced
|
245 |
+
def _pad_or_backfill(
|
246 |
+
self,
|
247 |
+
*,
|
248 |
+
method: FillnaOptions,
|
249 |
+
limit: int | None = None,
|
250 |
+
limit_area: Literal["inside", "outside"] | None = None,
|
251 |
+
copy: bool = True,
|
252 |
+
) -> Self:
|
253 |
+
"""
|
254 |
+
ffill or bfill along axis=0.
|
255 |
+
"""
|
256 |
+
if copy:
|
257 |
+
out_data = self._ndarray.copy()
|
258 |
+
else:
|
259 |
+
out_data = self._ndarray
|
260 |
+
|
261 |
+
meth = missing.clean_fill_method(method)
|
262 |
+
missing.pad_or_backfill_inplace(
|
263 |
+
out_data.T,
|
264 |
+
method=meth,
|
265 |
+
axis=0,
|
266 |
+
limit=limit,
|
267 |
+
limit_area=limit_area,
|
268 |
+
)
|
269 |
+
|
270 |
+
if not copy:
|
271 |
+
return self
|
272 |
+
return type(self)._simple_new(out_data, dtype=self.dtype)
|
273 |
+
|
274 |
+
def interpolate(
|
275 |
+
self,
|
276 |
+
*,
|
277 |
+
method: InterpolateOptions,
|
278 |
+
axis: int,
|
279 |
+
index: Index,
|
280 |
+
limit,
|
281 |
+
limit_direction,
|
282 |
+
limit_area,
|
283 |
+
copy: bool,
|
284 |
+
**kwargs,
|
285 |
+
) -> Self:
|
286 |
+
"""
|
287 |
+
See NDFrame.interpolate.__doc__.
|
288 |
+
"""
|
289 |
+
# NB: we return type(self) even if copy=False
|
290 |
+
if not copy:
|
291 |
+
out_data = self._ndarray
|
292 |
+
else:
|
293 |
+
out_data = self._ndarray.copy()
|
294 |
+
|
295 |
+
# TODO: assert we have floating dtype?
|
296 |
+
missing.interpolate_2d_inplace(
|
297 |
+
out_data,
|
298 |
+
method=method,
|
299 |
+
axis=axis,
|
300 |
+
index=index,
|
301 |
+
limit=limit,
|
302 |
+
limit_direction=limit_direction,
|
303 |
+
limit_area=limit_area,
|
304 |
+
**kwargs,
|
305 |
+
)
|
306 |
+
if not copy:
|
307 |
+
return self
|
308 |
+
return type(self)._simple_new(out_data, dtype=self.dtype)
|
309 |
+
|
310 |
+
# ------------------------------------------------------------------------
|
311 |
+
# Reductions
|
312 |
+
|
313 |
+
def any(
|
314 |
+
self,
|
315 |
+
*,
|
316 |
+
axis: AxisInt | None = None,
|
317 |
+
out=None,
|
318 |
+
keepdims: bool = False,
|
319 |
+
skipna: bool = True,
|
320 |
+
):
|
321 |
+
nv.validate_any((), {"out": out, "keepdims": keepdims})
|
322 |
+
result = nanops.nanany(self._ndarray, axis=axis, skipna=skipna)
|
323 |
+
return self._wrap_reduction_result(axis, result)
|
324 |
+
|
325 |
+
def all(
|
326 |
+
self,
|
327 |
+
*,
|
328 |
+
axis: AxisInt | None = None,
|
329 |
+
out=None,
|
330 |
+
keepdims: bool = False,
|
331 |
+
skipna: bool = True,
|
332 |
+
):
|
333 |
+
nv.validate_all((), {"out": out, "keepdims": keepdims})
|
334 |
+
result = nanops.nanall(self._ndarray, axis=axis, skipna=skipna)
|
335 |
+
return self._wrap_reduction_result(axis, result)
|
336 |
+
|
337 |
+
def min(
|
338 |
+
self, *, axis: AxisInt | None = None, skipna: bool = True, **kwargs
|
339 |
+
) -> Scalar:
|
340 |
+
nv.validate_min((), kwargs)
|
341 |
+
result = nanops.nanmin(
|
342 |
+
values=self._ndarray, axis=axis, mask=self.isna(), skipna=skipna
|
343 |
+
)
|
344 |
+
return self._wrap_reduction_result(axis, result)
|
345 |
+
|
346 |
+
def max(
|
347 |
+
self, *, axis: AxisInt | None = None, skipna: bool = True, **kwargs
|
348 |
+
) -> Scalar:
|
349 |
+
nv.validate_max((), kwargs)
|
350 |
+
result = nanops.nanmax(
|
351 |
+
values=self._ndarray, axis=axis, mask=self.isna(), skipna=skipna
|
352 |
+
)
|
353 |
+
return self._wrap_reduction_result(axis, result)
|
354 |
+
|
355 |
+
def sum(
|
356 |
+
self,
|
357 |
+
*,
|
358 |
+
axis: AxisInt | None = None,
|
359 |
+
skipna: bool = True,
|
360 |
+
min_count: int = 0,
|
361 |
+
**kwargs,
|
362 |
+
) -> Scalar:
|
363 |
+
nv.validate_sum((), kwargs)
|
364 |
+
result = nanops.nansum(
|
365 |
+
self._ndarray, axis=axis, skipna=skipna, min_count=min_count
|
366 |
+
)
|
367 |
+
return self._wrap_reduction_result(axis, result)
|
368 |
+
|
369 |
+
def prod(
|
370 |
+
self,
|
371 |
+
*,
|
372 |
+
axis: AxisInt | None = None,
|
373 |
+
skipna: bool = True,
|
374 |
+
min_count: int = 0,
|
375 |
+
**kwargs,
|
376 |
+
) -> Scalar:
|
377 |
+
nv.validate_prod((), kwargs)
|
378 |
+
result = nanops.nanprod(
|
379 |
+
self._ndarray, axis=axis, skipna=skipna, min_count=min_count
|
380 |
+
)
|
381 |
+
return self._wrap_reduction_result(axis, result)
|
382 |
+
|
383 |
+
def mean(
|
384 |
+
self,
|
385 |
+
*,
|
386 |
+
axis: AxisInt | None = None,
|
387 |
+
dtype: NpDtype | None = None,
|
388 |
+
out=None,
|
389 |
+
keepdims: bool = False,
|
390 |
+
skipna: bool = True,
|
391 |
+
):
|
392 |
+
nv.validate_mean((), {"dtype": dtype, "out": out, "keepdims": keepdims})
|
393 |
+
result = nanops.nanmean(self._ndarray, axis=axis, skipna=skipna)
|
394 |
+
return self._wrap_reduction_result(axis, result)
|
395 |
+
|
396 |
+
def median(
|
397 |
+
self,
|
398 |
+
*,
|
399 |
+
axis: AxisInt | None = None,
|
400 |
+
out=None,
|
401 |
+
overwrite_input: bool = False,
|
402 |
+
keepdims: bool = False,
|
403 |
+
skipna: bool = True,
|
404 |
+
):
|
405 |
+
nv.validate_median(
|
406 |
+
(), {"out": out, "overwrite_input": overwrite_input, "keepdims": keepdims}
|
407 |
+
)
|
408 |
+
result = nanops.nanmedian(self._ndarray, axis=axis, skipna=skipna)
|
409 |
+
return self._wrap_reduction_result(axis, result)
|
410 |
+
|
411 |
+
def std(
|
412 |
+
self,
|
413 |
+
*,
|
414 |
+
axis: AxisInt | None = None,
|
415 |
+
dtype: NpDtype | None = None,
|
416 |
+
out=None,
|
417 |
+
ddof: int = 1,
|
418 |
+
keepdims: bool = False,
|
419 |
+
skipna: bool = True,
|
420 |
+
):
|
421 |
+
nv.validate_stat_ddof_func(
|
422 |
+
(), {"dtype": dtype, "out": out, "keepdims": keepdims}, fname="std"
|
423 |
+
)
|
424 |
+
result = nanops.nanstd(self._ndarray, axis=axis, skipna=skipna, ddof=ddof)
|
425 |
+
return self._wrap_reduction_result(axis, result)
|
426 |
+
|
427 |
+
def var(
|
428 |
+
self,
|
429 |
+
*,
|
430 |
+
axis: AxisInt | None = None,
|
431 |
+
dtype: NpDtype | None = None,
|
432 |
+
out=None,
|
433 |
+
ddof: int = 1,
|
434 |
+
keepdims: bool = False,
|
435 |
+
skipna: bool = True,
|
436 |
+
):
|
437 |
+
nv.validate_stat_ddof_func(
|
438 |
+
(), {"dtype": dtype, "out": out, "keepdims": keepdims}, fname="var"
|
439 |
+
)
|
440 |
+
result = nanops.nanvar(self._ndarray, axis=axis, skipna=skipna, ddof=ddof)
|
441 |
+
return self._wrap_reduction_result(axis, result)
|
442 |
+
|
443 |
+
def sem(
|
444 |
+
self,
|
445 |
+
*,
|
446 |
+
axis: AxisInt | None = None,
|
447 |
+
dtype: NpDtype | None = None,
|
448 |
+
out=None,
|
449 |
+
ddof: int = 1,
|
450 |
+
keepdims: bool = False,
|
451 |
+
skipna: bool = True,
|
452 |
+
):
|
453 |
+
nv.validate_stat_ddof_func(
|
454 |
+
(), {"dtype": dtype, "out": out, "keepdims": keepdims}, fname="sem"
|
455 |
+
)
|
456 |
+
result = nanops.nansem(self._ndarray, axis=axis, skipna=skipna, ddof=ddof)
|
457 |
+
return self._wrap_reduction_result(axis, result)
|
458 |
+
|
459 |
+
def kurt(
|
460 |
+
self,
|
461 |
+
*,
|
462 |
+
axis: AxisInt | None = None,
|
463 |
+
dtype: NpDtype | None = None,
|
464 |
+
out=None,
|
465 |
+
keepdims: bool = False,
|
466 |
+
skipna: bool = True,
|
467 |
+
):
|
468 |
+
nv.validate_stat_ddof_func(
|
469 |
+
(), {"dtype": dtype, "out": out, "keepdims": keepdims}, fname="kurt"
|
470 |
+
)
|
471 |
+
result = nanops.nankurt(self._ndarray, axis=axis, skipna=skipna)
|
472 |
+
return self._wrap_reduction_result(axis, result)
|
473 |
+
|
474 |
+
def skew(
|
475 |
+
self,
|
476 |
+
*,
|
477 |
+
axis: AxisInt | None = None,
|
478 |
+
dtype: NpDtype | None = None,
|
479 |
+
out=None,
|
480 |
+
keepdims: bool = False,
|
481 |
+
skipna: bool = True,
|
482 |
+
):
|
483 |
+
nv.validate_stat_ddof_func(
|
484 |
+
(), {"dtype": dtype, "out": out, "keepdims": keepdims}, fname="skew"
|
485 |
+
)
|
486 |
+
result = nanops.nanskew(self._ndarray, axis=axis, skipna=skipna)
|
487 |
+
return self._wrap_reduction_result(axis, result)
|
488 |
+
|
489 |
+
# ------------------------------------------------------------------------
|
490 |
+
# Additional Methods
|
491 |
+
|
492 |
+
def to_numpy(
|
493 |
+
self,
|
494 |
+
dtype: npt.DTypeLike | None = None,
|
495 |
+
copy: bool = False,
|
496 |
+
na_value: object = lib.no_default,
|
497 |
+
) -> np.ndarray:
|
498 |
+
mask = self.isna()
|
499 |
+
if na_value is not lib.no_default and mask.any():
|
500 |
+
result = self._ndarray.copy()
|
501 |
+
result[mask] = na_value
|
502 |
+
else:
|
503 |
+
result = self._ndarray
|
504 |
+
|
505 |
+
result = np.asarray(result, dtype=dtype)
|
506 |
+
|
507 |
+
if copy and result is self._ndarray:
|
508 |
+
result = result.copy()
|
509 |
+
|
510 |
+
return result
|
511 |
+
|
512 |
+
# ------------------------------------------------------------------------
|
513 |
+
# Ops
|
514 |
+
|
515 |
+
def __invert__(self) -> NumpyExtensionArray:
|
516 |
+
return type(self)(~self._ndarray)
|
517 |
+
|
518 |
+
def __neg__(self) -> NumpyExtensionArray:
|
519 |
+
return type(self)(-self._ndarray)
|
520 |
+
|
521 |
+
def __pos__(self) -> NumpyExtensionArray:
|
522 |
+
return type(self)(+self._ndarray)
|
523 |
+
|
524 |
+
def __abs__(self) -> NumpyExtensionArray:
|
525 |
+
return type(self)(abs(self._ndarray))
|
526 |
+
|
527 |
+
def _cmp_method(self, other, op):
|
528 |
+
if isinstance(other, NumpyExtensionArray):
|
529 |
+
other = other._ndarray
|
530 |
+
|
531 |
+
other = ops.maybe_prepare_scalar_for_op(other, (len(self),))
|
532 |
+
pd_op = ops.get_array_op(op)
|
533 |
+
other = ensure_wrapped_if_datetimelike(other)
|
534 |
+
result = pd_op(self._ndarray, other)
|
535 |
+
|
536 |
+
if op is divmod or op is ops.rdivmod:
|
537 |
+
a, b = result
|
538 |
+
if isinstance(a, np.ndarray):
|
539 |
+
# for e.g. op vs TimedeltaArray, we may already
|
540 |
+
# have an ExtensionArray, in which case we do not wrap
|
541 |
+
return self._wrap_ndarray_result(a), self._wrap_ndarray_result(b)
|
542 |
+
return a, b
|
543 |
+
|
544 |
+
if isinstance(result, np.ndarray):
|
545 |
+
# for e.g. multiplication vs TimedeltaArray, we may already
|
546 |
+
# have an ExtensionArray, in which case we do not wrap
|
547 |
+
return self._wrap_ndarray_result(result)
|
548 |
+
return result
|
549 |
+
|
550 |
+
_arith_method = _cmp_method
|
551 |
+
|
552 |
+
def _wrap_ndarray_result(self, result: np.ndarray):
|
553 |
+
# If we have timedelta64[ns] result, return a TimedeltaArray instead
|
554 |
+
# of a NumpyExtensionArray
|
555 |
+
if result.dtype.kind == "m" and is_supported_dtype(result.dtype):
|
556 |
+
from pandas.core.arrays import TimedeltaArray
|
557 |
+
|
558 |
+
return TimedeltaArray._simple_new(result, dtype=result.dtype)
|
559 |
+
return type(self)(result)
|
560 |
+
|
561 |
+
# ------------------------------------------------------------------------
|
562 |
+
# String methods interface
|
563 |
+
_str_na_value = np.nan
|
env-llmeval/lib/python3.10/site-packages/pandas/core/arrays/period.py
ADDED
@@ -0,0 +1,1313 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from __future__ import annotations
|
2 |
+
|
3 |
+
from datetime import timedelta
|
4 |
+
import operator
|
5 |
+
from typing import (
|
6 |
+
TYPE_CHECKING,
|
7 |
+
Any,
|
8 |
+
Callable,
|
9 |
+
Literal,
|
10 |
+
TypeVar,
|
11 |
+
cast,
|
12 |
+
overload,
|
13 |
+
)
|
14 |
+
import warnings
|
15 |
+
|
16 |
+
import numpy as np
|
17 |
+
|
18 |
+
from pandas._libs import (
|
19 |
+
algos as libalgos,
|
20 |
+
lib,
|
21 |
+
)
|
22 |
+
from pandas._libs.arrays import NDArrayBacked
|
23 |
+
from pandas._libs.tslibs import (
|
24 |
+
BaseOffset,
|
25 |
+
NaT,
|
26 |
+
NaTType,
|
27 |
+
Timedelta,
|
28 |
+
add_overflowsafe,
|
29 |
+
astype_overflowsafe,
|
30 |
+
dt64arr_to_periodarr as c_dt64arr_to_periodarr,
|
31 |
+
get_unit_from_dtype,
|
32 |
+
iNaT,
|
33 |
+
parsing,
|
34 |
+
period as libperiod,
|
35 |
+
to_offset,
|
36 |
+
)
|
37 |
+
from pandas._libs.tslibs.dtypes import (
|
38 |
+
FreqGroup,
|
39 |
+
PeriodDtypeBase,
|
40 |
+
freq_to_period_freqstr,
|
41 |
+
)
|
42 |
+
from pandas._libs.tslibs.fields import isleapyear_arr
|
43 |
+
from pandas._libs.tslibs.offsets import (
|
44 |
+
Tick,
|
45 |
+
delta_to_tick,
|
46 |
+
)
|
47 |
+
from pandas._libs.tslibs.period import (
|
48 |
+
DIFFERENT_FREQ,
|
49 |
+
IncompatibleFrequency,
|
50 |
+
Period,
|
51 |
+
get_period_field_arr,
|
52 |
+
period_asfreq_arr,
|
53 |
+
)
|
54 |
+
from pandas.util._decorators import (
|
55 |
+
cache_readonly,
|
56 |
+
doc,
|
57 |
+
)
|
58 |
+
from pandas.util._exceptions import find_stack_level
|
59 |
+
|
60 |
+
from pandas.core.dtypes.common import (
|
61 |
+
ensure_object,
|
62 |
+
pandas_dtype,
|
63 |
+
)
|
64 |
+
from pandas.core.dtypes.dtypes import (
|
65 |
+
DatetimeTZDtype,
|
66 |
+
PeriodDtype,
|
67 |
+
)
|
68 |
+
from pandas.core.dtypes.generic import (
|
69 |
+
ABCIndex,
|
70 |
+
ABCPeriodIndex,
|
71 |
+
ABCSeries,
|
72 |
+
ABCTimedeltaArray,
|
73 |
+
)
|
74 |
+
from pandas.core.dtypes.missing import isna
|
75 |
+
|
76 |
+
from pandas.core.arrays import datetimelike as dtl
|
77 |
+
import pandas.core.common as com
|
78 |
+
|
79 |
+
if TYPE_CHECKING:
|
80 |
+
from collections.abc import Sequence
|
81 |
+
|
82 |
+
from pandas._typing import (
|
83 |
+
AnyArrayLike,
|
84 |
+
Dtype,
|
85 |
+
FillnaOptions,
|
86 |
+
NpDtype,
|
87 |
+
NumpySorter,
|
88 |
+
NumpyValueArrayLike,
|
89 |
+
Self,
|
90 |
+
npt,
|
91 |
+
)
|
92 |
+
|
93 |
+
from pandas.core.arrays import (
|
94 |
+
DatetimeArray,
|
95 |
+
TimedeltaArray,
|
96 |
+
)
|
97 |
+
from pandas.core.arrays.base import ExtensionArray
|
98 |
+
|
99 |
+
|
100 |
+
BaseOffsetT = TypeVar("BaseOffsetT", bound=BaseOffset)
|
101 |
+
|
102 |
+
|
103 |
+
_shared_doc_kwargs = {
|
104 |
+
"klass": "PeriodArray",
|
105 |
+
}
|
106 |
+
|
107 |
+
|
108 |
+
def _field_accessor(name: str, docstring: str | None = None):
|
109 |
+
def f(self):
|
110 |
+
base = self.dtype._dtype_code
|
111 |
+
result = get_period_field_arr(name, self.asi8, base)
|
112 |
+
return result
|
113 |
+
|
114 |
+
f.__name__ = name
|
115 |
+
f.__doc__ = docstring
|
116 |
+
return property(f)
|
117 |
+
|
118 |
+
|
119 |
+
# error: Definition of "_concat_same_type" in base class "NDArrayBacked" is
|
120 |
+
# incompatible with definition in base class "ExtensionArray"
|
121 |
+
class PeriodArray(dtl.DatelikeOps, libperiod.PeriodMixin): # type: ignore[misc]
|
122 |
+
"""
|
123 |
+
Pandas ExtensionArray for storing Period data.
|
124 |
+
|
125 |
+
Users should use :func:`~pandas.array` to create new instances.
|
126 |
+
|
127 |
+
Parameters
|
128 |
+
----------
|
129 |
+
values : Union[PeriodArray, Series[period], ndarray[int], PeriodIndex]
|
130 |
+
The data to store. These should be arrays that can be directly
|
131 |
+
converted to ordinals without inference or copy (PeriodArray,
|
132 |
+
ndarray[int64]), or a box around such an array (Series[period],
|
133 |
+
PeriodIndex).
|
134 |
+
dtype : PeriodDtype, optional
|
135 |
+
A PeriodDtype instance from which to extract a `freq`. If both
|
136 |
+
`freq` and `dtype` are specified, then the frequencies must match.
|
137 |
+
freq : str or DateOffset
|
138 |
+
The `freq` to use for the array. Mostly applicable when `values`
|
139 |
+
is an ndarray of integers, when `freq` is required. When `values`
|
140 |
+
is a PeriodArray (or box around), it's checked that ``values.freq``
|
141 |
+
matches `freq`.
|
142 |
+
copy : bool, default False
|
143 |
+
Whether to copy the ordinals before storing.
|
144 |
+
|
145 |
+
Attributes
|
146 |
+
----------
|
147 |
+
None
|
148 |
+
|
149 |
+
Methods
|
150 |
+
-------
|
151 |
+
None
|
152 |
+
|
153 |
+
See Also
|
154 |
+
--------
|
155 |
+
Period: Represents a period of time.
|
156 |
+
PeriodIndex : Immutable Index for period data.
|
157 |
+
period_range: Create a fixed-frequency PeriodArray.
|
158 |
+
array: Construct a pandas array.
|
159 |
+
|
160 |
+
Notes
|
161 |
+
-----
|
162 |
+
There are two components to a PeriodArray
|
163 |
+
|
164 |
+
- ordinals : integer ndarray
|
165 |
+
- freq : pd.tseries.offsets.Offset
|
166 |
+
|
167 |
+
The values are physically stored as a 1-D ndarray of integers. These are
|
168 |
+
called "ordinals" and represent some kind of offset from a base.
|
169 |
+
|
170 |
+
The `freq` indicates the span covered by each element of the array.
|
171 |
+
All elements in the PeriodArray have the same `freq`.
|
172 |
+
|
173 |
+
Examples
|
174 |
+
--------
|
175 |
+
>>> pd.arrays.PeriodArray(pd.PeriodIndex(['2023-01-01',
|
176 |
+
... '2023-01-02'], freq='D'))
|
177 |
+
<PeriodArray>
|
178 |
+
['2023-01-01', '2023-01-02']
|
179 |
+
Length: 2, dtype: period[D]
|
180 |
+
"""
|
181 |
+
|
182 |
+
# array priority higher than numpy scalars
|
183 |
+
__array_priority__ = 1000
|
184 |
+
_typ = "periodarray" # ABCPeriodArray
|
185 |
+
_internal_fill_value = np.int64(iNaT)
|
186 |
+
_recognized_scalars = (Period,)
|
187 |
+
_is_recognized_dtype = lambda x: isinstance(
|
188 |
+
x, PeriodDtype
|
189 |
+
) # check_compatible_with checks freq match
|
190 |
+
_infer_matches = ("period",)
|
191 |
+
|
192 |
+
@property
|
193 |
+
def _scalar_type(self) -> type[Period]:
|
194 |
+
return Period
|
195 |
+
|
196 |
+
# Names others delegate to us
|
197 |
+
_other_ops: list[str] = []
|
198 |
+
_bool_ops: list[str] = ["is_leap_year"]
|
199 |
+
_object_ops: list[str] = ["start_time", "end_time", "freq"]
|
200 |
+
_field_ops: list[str] = [
|
201 |
+
"year",
|
202 |
+
"month",
|
203 |
+
"day",
|
204 |
+
"hour",
|
205 |
+
"minute",
|
206 |
+
"second",
|
207 |
+
"weekofyear",
|
208 |
+
"weekday",
|
209 |
+
"week",
|
210 |
+
"dayofweek",
|
211 |
+
"day_of_week",
|
212 |
+
"dayofyear",
|
213 |
+
"day_of_year",
|
214 |
+
"quarter",
|
215 |
+
"qyear",
|
216 |
+
"days_in_month",
|
217 |
+
"daysinmonth",
|
218 |
+
]
|
219 |
+
_datetimelike_ops: list[str] = _field_ops + _object_ops + _bool_ops
|
220 |
+
_datetimelike_methods: list[str] = ["strftime", "to_timestamp", "asfreq"]
|
221 |
+
|
222 |
+
_dtype: PeriodDtype
|
223 |
+
|
224 |
+
# --------------------------------------------------------------------
|
225 |
+
# Constructors
|
226 |
+
|
227 |
+
def __init__(
|
228 |
+
self, values, dtype: Dtype | None = None, freq=None, copy: bool = False
|
229 |
+
) -> None:
|
230 |
+
if freq is not None:
|
231 |
+
# GH#52462
|
232 |
+
warnings.warn(
|
233 |
+
"The 'freq' keyword in the PeriodArray constructor is deprecated "
|
234 |
+
"and will be removed in a future version. Pass 'dtype' instead",
|
235 |
+
FutureWarning,
|
236 |
+
stacklevel=find_stack_level(),
|
237 |
+
)
|
238 |
+
freq = validate_dtype_freq(dtype, freq)
|
239 |
+
dtype = PeriodDtype(freq)
|
240 |
+
|
241 |
+
if dtype is not None:
|
242 |
+
dtype = pandas_dtype(dtype)
|
243 |
+
if not isinstance(dtype, PeriodDtype):
|
244 |
+
raise ValueError(f"Invalid dtype {dtype} for PeriodArray")
|
245 |
+
|
246 |
+
if isinstance(values, ABCSeries):
|
247 |
+
values = values._values
|
248 |
+
if not isinstance(values, type(self)):
|
249 |
+
raise TypeError("Incorrect dtype")
|
250 |
+
|
251 |
+
elif isinstance(values, ABCPeriodIndex):
|
252 |
+
values = values._values
|
253 |
+
|
254 |
+
if isinstance(values, type(self)):
|
255 |
+
if dtype is not None and dtype != values.dtype:
|
256 |
+
raise raise_on_incompatible(values, dtype.freq)
|
257 |
+
values, dtype = values._ndarray, values.dtype
|
258 |
+
|
259 |
+
if not copy:
|
260 |
+
values = np.asarray(values, dtype="int64")
|
261 |
+
else:
|
262 |
+
values = np.array(values, dtype="int64", copy=copy)
|
263 |
+
if dtype is None:
|
264 |
+
raise ValueError("dtype is not specified and cannot be inferred")
|
265 |
+
dtype = cast(PeriodDtype, dtype)
|
266 |
+
NDArrayBacked.__init__(self, values, dtype)
|
267 |
+
|
268 |
+
# error: Signature of "_simple_new" incompatible with supertype "NDArrayBacked"
|
269 |
+
@classmethod
|
270 |
+
def _simple_new( # type: ignore[override]
|
271 |
+
cls,
|
272 |
+
values: npt.NDArray[np.int64],
|
273 |
+
dtype: PeriodDtype,
|
274 |
+
) -> Self:
|
275 |
+
# alias for PeriodArray.__init__
|
276 |
+
assertion_msg = "Should be numpy array of type i8"
|
277 |
+
assert isinstance(values, np.ndarray) and values.dtype == "i8", assertion_msg
|
278 |
+
return cls(values, dtype=dtype)
|
279 |
+
|
280 |
+
@classmethod
|
281 |
+
def _from_sequence(
|
282 |
+
cls,
|
283 |
+
scalars,
|
284 |
+
*,
|
285 |
+
dtype: Dtype | None = None,
|
286 |
+
copy: bool = False,
|
287 |
+
) -> Self:
|
288 |
+
if dtype is not None:
|
289 |
+
dtype = pandas_dtype(dtype)
|
290 |
+
if dtype and isinstance(dtype, PeriodDtype):
|
291 |
+
freq = dtype.freq
|
292 |
+
else:
|
293 |
+
freq = None
|
294 |
+
|
295 |
+
if isinstance(scalars, cls):
|
296 |
+
validate_dtype_freq(scalars.dtype, freq)
|
297 |
+
if copy:
|
298 |
+
scalars = scalars.copy()
|
299 |
+
return scalars
|
300 |
+
|
301 |
+
periods = np.asarray(scalars, dtype=object)
|
302 |
+
|
303 |
+
freq = freq or libperiod.extract_freq(periods)
|
304 |
+
ordinals = libperiod.extract_ordinals(periods, freq)
|
305 |
+
dtype = PeriodDtype(freq)
|
306 |
+
return cls(ordinals, dtype=dtype)
|
307 |
+
|
308 |
+
@classmethod
|
309 |
+
def _from_sequence_of_strings(
|
310 |
+
cls, strings, *, dtype: Dtype | None = None, copy: bool = False
|
311 |
+
) -> Self:
|
312 |
+
return cls._from_sequence(strings, dtype=dtype, copy=copy)
|
313 |
+
|
314 |
+
@classmethod
|
315 |
+
def _from_datetime64(cls, data, freq, tz=None) -> Self:
|
316 |
+
"""
|
317 |
+
Construct a PeriodArray from a datetime64 array
|
318 |
+
|
319 |
+
Parameters
|
320 |
+
----------
|
321 |
+
data : ndarray[datetime64[ns], datetime64[ns, tz]]
|
322 |
+
freq : str or Tick
|
323 |
+
tz : tzinfo, optional
|
324 |
+
|
325 |
+
Returns
|
326 |
+
-------
|
327 |
+
PeriodArray[freq]
|
328 |
+
"""
|
329 |
+
if isinstance(freq, BaseOffset):
|
330 |
+
freq = freq_to_period_freqstr(freq.n, freq.name)
|
331 |
+
data, freq = dt64arr_to_periodarr(data, freq, tz)
|
332 |
+
dtype = PeriodDtype(freq)
|
333 |
+
return cls(data, dtype=dtype)
|
334 |
+
|
335 |
+
@classmethod
|
336 |
+
def _generate_range(cls, start, end, periods, freq):
|
337 |
+
periods = dtl.validate_periods(periods)
|
338 |
+
|
339 |
+
if freq is not None:
|
340 |
+
freq = Period._maybe_convert_freq(freq)
|
341 |
+
|
342 |
+
if start is not None or end is not None:
|
343 |
+
subarr, freq = _get_ordinal_range(start, end, periods, freq)
|
344 |
+
else:
|
345 |
+
raise ValueError("Not enough parameters to construct Period range")
|
346 |
+
|
347 |
+
return subarr, freq
|
348 |
+
|
349 |
+
@classmethod
|
350 |
+
def _from_fields(cls, *, fields: dict, freq) -> Self:
|
351 |
+
subarr, freq = _range_from_fields(freq=freq, **fields)
|
352 |
+
dtype = PeriodDtype(freq)
|
353 |
+
return cls._simple_new(subarr, dtype=dtype)
|
354 |
+
|
355 |
+
# -----------------------------------------------------------------
|
356 |
+
# DatetimeLike Interface
|
357 |
+
|
358 |
+
# error: Argument 1 of "_unbox_scalar" is incompatible with supertype
|
359 |
+
# "DatetimeLikeArrayMixin"; supertype defines the argument type as
|
360 |
+
# "Union[Union[Period, Any, Timedelta], NaTType]"
|
361 |
+
def _unbox_scalar( # type: ignore[override]
|
362 |
+
self,
|
363 |
+
value: Period | NaTType,
|
364 |
+
) -> np.int64:
|
365 |
+
if value is NaT:
|
366 |
+
# error: Item "Period" of "Union[Period, NaTType]" has no attribute "value"
|
367 |
+
return np.int64(value._value) # type: ignore[union-attr]
|
368 |
+
elif isinstance(value, self._scalar_type):
|
369 |
+
self._check_compatible_with(value)
|
370 |
+
return np.int64(value.ordinal)
|
371 |
+
else:
|
372 |
+
raise ValueError(f"'value' should be a Period. Got '{value}' instead.")
|
373 |
+
|
374 |
+
def _scalar_from_string(self, value: str) -> Period:
|
375 |
+
return Period(value, freq=self.freq)
|
376 |
+
|
377 |
+
# error: Argument 1 of "_check_compatible_with" is incompatible with
|
378 |
+
# supertype "DatetimeLikeArrayMixin"; supertype defines the argument type
|
379 |
+
# as "Period | Timestamp | Timedelta | NaTType"
|
380 |
+
def _check_compatible_with(self, other: Period | NaTType | PeriodArray) -> None: # type: ignore[override]
|
381 |
+
if other is NaT:
|
382 |
+
return
|
383 |
+
# error: Item "NaTType" of "Period | NaTType | PeriodArray" has no
|
384 |
+
# attribute "freq"
|
385 |
+
self._require_matching_freq(other.freq) # type: ignore[union-attr]
|
386 |
+
|
387 |
+
# --------------------------------------------------------------------
|
388 |
+
# Data / Attributes
|
389 |
+
|
390 |
+
@cache_readonly
|
391 |
+
def dtype(self) -> PeriodDtype:
|
392 |
+
return self._dtype
|
393 |
+
|
394 |
+
# error: Cannot override writeable attribute with read-only property
|
395 |
+
@property # type: ignore[override]
|
396 |
+
def freq(self) -> BaseOffset:
|
397 |
+
"""
|
398 |
+
Return the frequency object for this PeriodArray.
|
399 |
+
"""
|
400 |
+
return self.dtype.freq
|
401 |
+
|
402 |
+
@property
|
403 |
+
def freqstr(self) -> str:
|
404 |
+
return freq_to_period_freqstr(self.freq.n, self.freq.name)
|
405 |
+
|
406 |
+
def __array__(
|
407 |
+
self, dtype: NpDtype | None = None, copy: bool | None = None
|
408 |
+
) -> np.ndarray:
|
409 |
+
if dtype == "i8":
|
410 |
+
return self.asi8
|
411 |
+
elif dtype == bool:
|
412 |
+
return ~self._isnan
|
413 |
+
|
414 |
+
# This will raise TypeError for non-object dtypes
|
415 |
+
return np.array(list(self), dtype=object)
|
416 |
+
|
417 |
+
def __arrow_array__(self, type=None):
|
418 |
+
"""
|
419 |
+
Convert myself into a pyarrow Array.
|
420 |
+
"""
|
421 |
+
import pyarrow
|
422 |
+
|
423 |
+
from pandas.core.arrays.arrow.extension_types import ArrowPeriodType
|
424 |
+
|
425 |
+
if type is not None:
|
426 |
+
if pyarrow.types.is_integer(type):
|
427 |
+
return pyarrow.array(self._ndarray, mask=self.isna(), type=type)
|
428 |
+
elif isinstance(type, ArrowPeriodType):
|
429 |
+
# ensure we have the same freq
|
430 |
+
if self.freqstr != type.freq:
|
431 |
+
raise TypeError(
|
432 |
+
"Not supported to convert PeriodArray to array with different "
|
433 |
+
f"'freq' ({self.freqstr} vs {type.freq})"
|
434 |
+
)
|
435 |
+
else:
|
436 |
+
raise TypeError(
|
437 |
+
f"Not supported to convert PeriodArray to '{type}' type"
|
438 |
+
)
|
439 |
+
|
440 |
+
period_type = ArrowPeriodType(self.freqstr)
|
441 |
+
storage_array = pyarrow.array(self._ndarray, mask=self.isna(), type="int64")
|
442 |
+
return pyarrow.ExtensionArray.from_storage(period_type, storage_array)
|
443 |
+
|
444 |
+
# --------------------------------------------------------------------
|
445 |
+
# Vectorized analogues of Period properties
|
446 |
+
|
447 |
+
year = _field_accessor(
|
448 |
+
"year",
|
449 |
+
"""
|
450 |
+
The year of the period.
|
451 |
+
|
452 |
+
Examples
|
453 |
+
--------
|
454 |
+
>>> idx = pd.PeriodIndex(["2023", "2024", "2025"], freq="Y")
|
455 |
+
>>> idx.year
|
456 |
+
Index([2023, 2024, 2025], dtype='int64')
|
457 |
+
""",
|
458 |
+
)
|
459 |
+
month = _field_accessor(
|
460 |
+
"month",
|
461 |
+
"""
|
462 |
+
The month as January=1, December=12.
|
463 |
+
|
464 |
+
Examples
|
465 |
+
--------
|
466 |
+
>>> idx = pd.PeriodIndex(["2023-01", "2023-02", "2023-03"], freq="M")
|
467 |
+
>>> idx.month
|
468 |
+
Index([1, 2, 3], dtype='int64')
|
469 |
+
""",
|
470 |
+
)
|
471 |
+
day = _field_accessor(
|
472 |
+
"day",
|
473 |
+
"""
|
474 |
+
The days of the period.
|
475 |
+
|
476 |
+
Examples
|
477 |
+
--------
|
478 |
+
>>> idx = pd.PeriodIndex(['2020-01-31', '2020-02-28'], freq='D')
|
479 |
+
>>> idx.day
|
480 |
+
Index([31, 28], dtype='int64')
|
481 |
+
""",
|
482 |
+
)
|
483 |
+
hour = _field_accessor(
|
484 |
+
"hour",
|
485 |
+
"""
|
486 |
+
The hour of the period.
|
487 |
+
|
488 |
+
Examples
|
489 |
+
--------
|
490 |
+
>>> idx = pd.PeriodIndex(["2023-01-01 10:00", "2023-01-01 11:00"], freq='h')
|
491 |
+
>>> idx.hour
|
492 |
+
Index([10, 11], dtype='int64')
|
493 |
+
""",
|
494 |
+
)
|
495 |
+
minute = _field_accessor(
|
496 |
+
"minute",
|
497 |
+
"""
|
498 |
+
The minute of the period.
|
499 |
+
|
500 |
+
Examples
|
501 |
+
--------
|
502 |
+
>>> idx = pd.PeriodIndex(["2023-01-01 10:30:00",
|
503 |
+
... "2023-01-01 11:50:00"], freq='min')
|
504 |
+
>>> idx.minute
|
505 |
+
Index([30, 50], dtype='int64')
|
506 |
+
""",
|
507 |
+
)
|
508 |
+
second = _field_accessor(
|
509 |
+
"second",
|
510 |
+
"""
|
511 |
+
The second of the period.
|
512 |
+
|
513 |
+
Examples
|
514 |
+
--------
|
515 |
+
>>> idx = pd.PeriodIndex(["2023-01-01 10:00:30",
|
516 |
+
... "2023-01-01 10:00:31"], freq='s')
|
517 |
+
>>> idx.second
|
518 |
+
Index([30, 31], dtype='int64')
|
519 |
+
""",
|
520 |
+
)
|
521 |
+
weekofyear = _field_accessor(
|
522 |
+
"week",
|
523 |
+
"""
|
524 |
+
The week ordinal of the year.
|
525 |
+
|
526 |
+
Examples
|
527 |
+
--------
|
528 |
+
>>> idx = pd.PeriodIndex(["2023-01", "2023-02", "2023-03"], freq="M")
|
529 |
+
>>> idx.week # It can be written `weekofyear`
|
530 |
+
Index([5, 9, 13], dtype='int64')
|
531 |
+
""",
|
532 |
+
)
|
533 |
+
week = weekofyear
|
534 |
+
day_of_week = _field_accessor(
|
535 |
+
"day_of_week",
|
536 |
+
"""
|
537 |
+
The day of the week with Monday=0, Sunday=6.
|
538 |
+
|
539 |
+
Examples
|
540 |
+
--------
|
541 |
+
>>> idx = pd.PeriodIndex(["2023-01-01", "2023-01-02", "2023-01-03"], freq="D")
|
542 |
+
>>> idx.weekday
|
543 |
+
Index([6, 0, 1], dtype='int64')
|
544 |
+
""",
|
545 |
+
)
|
546 |
+
dayofweek = day_of_week
|
547 |
+
weekday = dayofweek
|
548 |
+
dayofyear = day_of_year = _field_accessor(
|
549 |
+
"day_of_year",
|
550 |
+
"""
|
551 |
+
The ordinal day of the year.
|
552 |
+
|
553 |
+
Examples
|
554 |
+
--------
|
555 |
+
>>> idx = pd.PeriodIndex(["2023-01-10", "2023-02-01", "2023-03-01"], freq="D")
|
556 |
+
>>> idx.dayofyear
|
557 |
+
Index([10, 32, 60], dtype='int64')
|
558 |
+
|
559 |
+
>>> idx = pd.PeriodIndex(["2023", "2024", "2025"], freq="Y")
|
560 |
+
>>> idx
|
561 |
+
PeriodIndex(['2023', '2024', '2025'], dtype='period[Y-DEC]')
|
562 |
+
>>> idx.dayofyear
|
563 |
+
Index([365, 366, 365], dtype='int64')
|
564 |
+
""",
|
565 |
+
)
|
566 |
+
quarter = _field_accessor(
|
567 |
+
"quarter",
|
568 |
+
"""
|
569 |
+
The quarter of the date.
|
570 |
+
|
571 |
+
Examples
|
572 |
+
--------
|
573 |
+
>>> idx = pd.PeriodIndex(["2023-01", "2023-02", "2023-03"], freq="M")
|
574 |
+
>>> idx.quarter
|
575 |
+
Index([1, 1, 1], dtype='int64')
|
576 |
+
""",
|
577 |
+
)
|
578 |
+
qyear = _field_accessor("qyear")
|
579 |
+
days_in_month = _field_accessor(
|
580 |
+
"days_in_month",
|
581 |
+
"""
|
582 |
+
The number of days in the month.
|
583 |
+
|
584 |
+
Examples
|
585 |
+
--------
|
586 |
+
For Series:
|
587 |
+
|
588 |
+
>>> period = pd.period_range('2020-1-1 00:00', '2020-3-1 00:00', freq='M')
|
589 |
+
>>> s = pd.Series(period)
|
590 |
+
>>> s
|
591 |
+
0 2020-01
|
592 |
+
1 2020-02
|
593 |
+
2 2020-03
|
594 |
+
dtype: period[M]
|
595 |
+
>>> s.dt.days_in_month
|
596 |
+
0 31
|
597 |
+
1 29
|
598 |
+
2 31
|
599 |
+
dtype: int64
|
600 |
+
|
601 |
+
For PeriodIndex:
|
602 |
+
|
603 |
+
>>> idx = pd.PeriodIndex(["2023-01", "2023-02", "2023-03"], freq="M")
|
604 |
+
>>> idx.days_in_month # It can be also entered as `daysinmonth`
|
605 |
+
Index([31, 28, 31], dtype='int64')
|
606 |
+
""",
|
607 |
+
)
|
608 |
+
daysinmonth = days_in_month
|
609 |
+
|
610 |
+
@property
|
611 |
+
def is_leap_year(self) -> npt.NDArray[np.bool_]:
|
612 |
+
"""
|
613 |
+
Logical indicating if the date belongs to a leap year.
|
614 |
+
|
615 |
+
Examples
|
616 |
+
--------
|
617 |
+
>>> idx = pd.PeriodIndex(["2023", "2024", "2025"], freq="Y")
|
618 |
+
>>> idx.is_leap_year
|
619 |
+
array([False, True, False])
|
620 |
+
"""
|
621 |
+
return isleapyear_arr(np.asarray(self.year))
|
622 |
+
|
623 |
+
def to_timestamp(self, freq=None, how: str = "start") -> DatetimeArray:
|
624 |
+
"""
|
625 |
+
Cast to DatetimeArray/Index.
|
626 |
+
|
627 |
+
Parameters
|
628 |
+
----------
|
629 |
+
freq : str or DateOffset, optional
|
630 |
+
Target frequency. The default is 'D' for week or longer,
|
631 |
+
's' otherwise.
|
632 |
+
how : {'s', 'e', 'start', 'end'}
|
633 |
+
Whether to use the start or end of the time period being converted.
|
634 |
+
|
635 |
+
Returns
|
636 |
+
-------
|
637 |
+
DatetimeArray/Index
|
638 |
+
|
639 |
+
Examples
|
640 |
+
--------
|
641 |
+
>>> idx = pd.PeriodIndex(["2023-01", "2023-02", "2023-03"], freq="M")
|
642 |
+
>>> idx.to_timestamp()
|
643 |
+
DatetimeIndex(['2023-01-01', '2023-02-01', '2023-03-01'],
|
644 |
+
dtype='datetime64[ns]', freq='MS')
|
645 |
+
"""
|
646 |
+
from pandas.core.arrays import DatetimeArray
|
647 |
+
|
648 |
+
how = libperiod.validate_end_alias(how)
|
649 |
+
|
650 |
+
end = how == "E"
|
651 |
+
if end:
|
652 |
+
if freq == "B" or self.freq == "B":
|
653 |
+
# roll forward to ensure we land on B date
|
654 |
+
adjust = Timedelta(1, "D") - Timedelta(1, "ns")
|
655 |
+
return self.to_timestamp(how="start") + adjust
|
656 |
+
else:
|
657 |
+
adjust = Timedelta(1, "ns")
|
658 |
+
return (self + self.freq).to_timestamp(how="start") - adjust
|
659 |
+
|
660 |
+
if freq is None:
|
661 |
+
freq_code = self._dtype._get_to_timestamp_base()
|
662 |
+
dtype = PeriodDtypeBase(freq_code, 1)
|
663 |
+
freq = dtype._freqstr
|
664 |
+
base = freq_code
|
665 |
+
else:
|
666 |
+
freq = Period._maybe_convert_freq(freq)
|
667 |
+
base = freq._period_dtype_code
|
668 |
+
|
669 |
+
new_parr = self.asfreq(freq, how=how)
|
670 |
+
|
671 |
+
new_data = libperiod.periodarr_to_dt64arr(new_parr.asi8, base)
|
672 |
+
dta = DatetimeArray._from_sequence(new_data)
|
673 |
+
|
674 |
+
if self.freq.name == "B":
|
675 |
+
# See if we can retain BDay instead of Day in cases where
|
676 |
+
# len(self) is too small for infer_freq to distinguish between them
|
677 |
+
diffs = libalgos.unique_deltas(self.asi8)
|
678 |
+
if len(diffs) == 1:
|
679 |
+
diff = diffs[0]
|
680 |
+
if diff == self.dtype._n:
|
681 |
+
dta._freq = self.freq
|
682 |
+
elif diff == 1:
|
683 |
+
dta._freq = self.freq.base
|
684 |
+
# TODO: other cases?
|
685 |
+
return dta
|
686 |
+
else:
|
687 |
+
return dta._with_freq("infer")
|
688 |
+
|
689 |
+
# --------------------------------------------------------------------
|
690 |
+
|
691 |
+
def _box_func(self, x) -> Period | NaTType:
|
692 |
+
return Period._from_ordinal(ordinal=x, freq=self.freq)
|
693 |
+
|
694 |
+
@doc(**_shared_doc_kwargs, other="PeriodIndex", other_name="PeriodIndex")
|
695 |
+
def asfreq(self, freq=None, how: str = "E") -> Self:
|
696 |
+
"""
|
697 |
+
Convert the {klass} to the specified frequency `freq`.
|
698 |
+
|
699 |
+
Equivalent to applying :meth:`pandas.Period.asfreq` with the given arguments
|
700 |
+
to each :class:`~pandas.Period` in this {klass}.
|
701 |
+
|
702 |
+
Parameters
|
703 |
+
----------
|
704 |
+
freq : str
|
705 |
+
A frequency.
|
706 |
+
how : str {{'E', 'S'}}, default 'E'
|
707 |
+
Whether the elements should be aligned to the end
|
708 |
+
or start within pa period.
|
709 |
+
|
710 |
+
* 'E', 'END', or 'FINISH' for end,
|
711 |
+
* 'S', 'START', or 'BEGIN' for start.
|
712 |
+
|
713 |
+
January 31st ('END') vs. January 1st ('START') for example.
|
714 |
+
|
715 |
+
Returns
|
716 |
+
-------
|
717 |
+
{klass}
|
718 |
+
The transformed {klass} with the new frequency.
|
719 |
+
|
720 |
+
See Also
|
721 |
+
--------
|
722 |
+
{other}.asfreq: Convert each Period in a {other_name} to the given frequency.
|
723 |
+
Period.asfreq : Convert a :class:`~pandas.Period` object to the given frequency.
|
724 |
+
|
725 |
+
Examples
|
726 |
+
--------
|
727 |
+
>>> pidx = pd.period_range('2010-01-01', '2015-01-01', freq='Y')
|
728 |
+
>>> pidx
|
729 |
+
PeriodIndex(['2010', '2011', '2012', '2013', '2014', '2015'],
|
730 |
+
dtype='period[Y-DEC]')
|
731 |
+
|
732 |
+
>>> pidx.asfreq('M')
|
733 |
+
PeriodIndex(['2010-12', '2011-12', '2012-12', '2013-12', '2014-12',
|
734 |
+
'2015-12'], dtype='period[M]')
|
735 |
+
|
736 |
+
>>> pidx.asfreq('M', how='S')
|
737 |
+
PeriodIndex(['2010-01', '2011-01', '2012-01', '2013-01', '2014-01',
|
738 |
+
'2015-01'], dtype='period[M]')
|
739 |
+
"""
|
740 |
+
how = libperiod.validate_end_alias(how)
|
741 |
+
if isinstance(freq, BaseOffset) and hasattr(freq, "_period_dtype_code"):
|
742 |
+
freq = PeriodDtype(freq)._freqstr
|
743 |
+
freq = Period._maybe_convert_freq(freq)
|
744 |
+
|
745 |
+
base1 = self._dtype._dtype_code
|
746 |
+
base2 = freq._period_dtype_code
|
747 |
+
|
748 |
+
asi8 = self.asi8
|
749 |
+
# self.freq.n can't be negative or 0
|
750 |
+
end = how == "E"
|
751 |
+
if end:
|
752 |
+
ordinal = asi8 + self.dtype._n - 1
|
753 |
+
else:
|
754 |
+
ordinal = asi8
|
755 |
+
|
756 |
+
new_data = period_asfreq_arr(ordinal, base1, base2, end)
|
757 |
+
|
758 |
+
if self._hasna:
|
759 |
+
new_data[self._isnan] = iNaT
|
760 |
+
|
761 |
+
dtype = PeriodDtype(freq)
|
762 |
+
return type(self)(new_data, dtype=dtype)
|
763 |
+
|
764 |
+
# ------------------------------------------------------------------
|
765 |
+
# Rendering Methods
|
766 |
+
|
767 |
+
def _formatter(self, boxed: bool = False):
|
768 |
+
if boxed:
|
769 |
+
return str
|
770 |
+
return "'{}'".format
|
771 |
+
|
772 |
+
def _format_native_types(
|
773 |
+
self, *, na_rep: str | float = "NaT", date_format=None, **kwargs
|
774 |
+
) -> npt.NDArray[np.object_]:
|
775 |
+
"""
|
776 |
+
actually format my specific types
|
777 |
+
"""
|
778 |
+
return libperiod.period_array_strftime(
|
779 |
+
self.asi8, self.dtype._dtype_code, na_rep, date_format
|
780 |
+
)
|
781 |
+
|
782 |
+
# ------------------------------------------------------------------
|
783 |
+
|
784 |
+
def astype(self, dtype, copy: bool = True):
|
785 |
+
# We handle Period[T] -> Period[U]
|
786 |
+
# Our parent handles everything else.
|
787 |
+
dtype = pandas_dtype(dtype)
|
788 |
+
if dtype == self._dtype:
|
789 |
+
if not copy:
|
790 |
+
return self
|
791 |
+
else:
|
792 |
+
return self.copy()
|
793 |
+
if isinstance(dtype, PeriodDtype):
|
794 |
+
return self.asfreq(dtype.freq)
|
795 |
+
|
796 |
+
if lib.is_np_dtype(dtype, "M") or isinstance(dtype, DatetimeTZDtype):
|
797 |
+
# GH#45038 match PeriodIndex behavior.
|
798 |
+
tz = getattr(dtype, "tz", None)
|
799 |
+
unit = dtl.dtype_to_unit(dtype)
|
800 |
+
return self.to_timestamp().tz_localize(tz).as_unit(unit)
|
801 |
+
|
802 |
+
return super().astype(dtype, copy=copy)
|
803 |
+
|
804 |
+
def searchsorted(
|
805 |
+
self,
|
806 |
+
value: NumpyValueArrayLike | ExtensionArray,
|
807 |
+
side: Literal["left", "right"] = "left",
|
808 |
+
sorter: NumpySorter | None = None,
|
809 |
+
) -> npt.NDArray[np.intp] | np.intp:
|
810 |
+
npvalue = self._validate_setitem_value(value).view("M8[ns]")
|
811 |
+
|
812 |
+
# Cast to M8 to get datetime-like NaT placement,
|
813 |
+
# similar to dtl._period_dispatch
|
814 |
+
m8arr = self._ndarray.view("M8[ns]")
|
815 |
+
return m8arr.searchsorted(npvalue, side=side, sorter=sorter)
|
816 |
+
|
817 |
+
def _pad_or_backfill(
|
818 |
+
self,
|
819 |
+
*,
|
820 |
+
method: FillnaOptions,
|
821 |
+
limit: int | None = None,
|
822 |
+
limit_area: Literal["inside", "outside"] | None = None,
|
823 |
+
copy: bool = True,
|
824 |
+
) -> Self:
|
825 |
+
# view as dt64 so we get treated as timelike in core.missing,
|
826 |
+
# similar to dtl._period_dispatch
|
827 |
+
dta = self.view("M8[ns]")
|
828 |
+
result = dta._pad_or_backfill(
|
829 |
+
method=method, limit=limit, limit_area=limit_area, copy=copy
|
830 |
+
)
|
831 |
+
if copy:
|
832 |
+
return cast("Self", result.view(self.dtype))
|
833 |
+
else:
|
834 |
+
return self
|
835 |
+
|
836 |
+
def fillna(
|
837 |
+
self, value=None, method=None, limit: int | None = None, copy: bool = True
|
838 |
+
) -> Self:
|
839 |
+
if method is not None:
|
840 |
+
# view as dt64 so we get treated as timelike in core.missing,
|
841 |
+
# similar to dtl._period_dispatch
|
842 |
+
dta = self.view("M8[ns]")
|
843 |
+
result = dta.fillna(value=value, method=method, limit=limit, copy=copy)
|
844 |
+
# error: Incompatible return value type (got "Union[ExtensionArray,
|
845 |
+
# ndarray[Any, Any]]", expected "PeriodArray")
|
846 |
+
return result.view(self.dtype) # type: ignore[return-value]
|
847 |
+
return super().fillna(value=value, method=method, limit=limit, copy=copy)
|
848 |
+
|
849 |
+
# ------------------------------------------------------------------
|
850 |
+
# Arithmetic Methods
|
851 |
+
|
852 |
+
def _addsub_int_array_or_scalar(
|
853 |
+
self, other: np.ndarray | int, op: Callable[[Any, Any], Any]
|
854 |
+
) -> Self:
|
855 |
+
"""
|
856 |
+
Add or subtract array of integers.
|
857 |
+
|
858 |
+
Parameters
|
859 |
+
----------
|
860 |
+
other : np.ndarray[int64] or int
|
861 |
+
op : {operator.add, operator.sub}
|
862 |
+
|
863 |
+
Returns
|
864 |
+
-------
|
865 |
+
result : PeriodArray
|
866 |
+
"""
|
867 |
+
assert op in [operator.add, operator.sub]
|
868 |
+
if op is operator.sub:
|
869 |
+
other = -other
|
870 |
+
res_values = add_overflowsafe(self.asi8, np.asarray(other, dtype="i8"))
|
871 |
+
return type(self)(res_values, dtype=self.dtype)
|
872 |
+
|
873 |
+
def _add_offset(self, other: BaseOffset):
|
874 |
+
assert not isinstance(other, Tick)
|
875 |
+
|
876 |
+
self._require_matching_freq(other, base=True)
|
877 |
+
return self._addsub_int_array_or_scalar(other.n, operator.add)
|
878 |
+
|
879 |
+
# TODO: can we de-duplicate with Period._add_timedeltalike_scalar?
|
880 |
+
def _add_timedeltalike_scalar(self, other):
|
881 |
+
"""
|
882 |
+
Parameters
|
883 |
+
----------
|
884 |
+
other : timedelta, Tick, np.timedelta64
|
885 |
+
|
886 |
+
Returns
|
887 |
+
-------
|
888 |
+
PeriodArray
|
889 |
+
"""
|
890 |
+
if not isinstance(self.freq, Tick):
|
891 |
+
# We cannot add timedelta-like to non-tick PeriodArray
|
892 |
+
raise raise_on_incompatible(self, other)
|
893 |
+
|
894 |
+
if isna(other):
|
895 |
+
# i.e. np.timedelta64("NaT")
|
896 |
+
return super()._add_timedeltalike_scalar(other)
|
897 |
+
|
898 |
+
td = np.asarray(Timedelta(other).asm8)
|
899 |
+
return self._add_timedelta_arraylike(td)
|
900 |
+
|
901 |
+
def _add_timedelta_arraylike(
|
902 |
+
self, other: TimedeltaArray | npt.NDArray[np.timedelta64]
|
903 |
+
) -> Self:
|
904 |
+
"""
|
905 |
+
Parameters
|
906 |
+
----------
|
907 |
+
other : TimedeltaArray or ndarray[timedelta64]
|
908 |
+
|
909 |
+
Returns
|
910 |
+
-------
|
911 |
+
PeriodArray
|
912 |
+
"""
|
913 |
+
if not self.dtype._is_tick_like():
|
914 |
+
# We cannot add timedelta-like to non-tick PeriodArray
|
915 |
+
raise TypeError(
|
916 |
+
f"Cannot add or subtract timedelta64[ns] dtype from {self.dtype}"
|
917 |
+
)
|
918 |
+
|
919 |
+
dtype = np.dtype(f"m8[{self.dtype._td64_unit}]")
|
920 |
+
|
921 |
+
# Similar to _check_timedeltalike_freq_compat, but we raise with a
|
922 |
+
# more specific exception message if necessary.
|
923 |
+
try:
|
924 |
+
delta = astype_overflowsafe(
|
925 |
+
np.asarray(other), dtype=dtype, copy=False, round_ok=False
|
926 |
+
)
|
927 |
+
except ValueError as err:
|
928 |
+
# e.g. if we have minutes freq and try to add 30s
|
929 |
+
# "Cannot losslessly convert units"
|
930 |
+
raise IncompatibleFrequency(
|
931 |
+
"Cannot add/subtract timedelta-like from PeriodArray that is "
|
932 |
+
"not an integer multiple of the PeriodArray's freq."
|
933 |
+
) from err
|
934 |
+
|
935 |
+
res_values = add_overflowsafe(self.asi8, np.asarray(delta.view("i8")))
|
936 |
+
return type(self)(res_values, dtype=self.dtype)
|
937 |
+
|
938 |
+
def _check_timedeltalike_freq_compat(self, other):
|
939 |
+
"""
|
940 |
+
Arithmetic operations with timedelta-like scalars or array `other`
|
941 |
+
are only valid if `other` is an integer multiple of `self.freq`.
|
942 |
+
If the operation is valid, find that integer multiple. Otherwise,
|
943 |
+
raise because the operation is invalid.
|
944 |
+
|
945 |
+
Parameters
|
946 |
+
----------
|
947 |
+
other : timedelta, np.timedelta64, Tick,
|
948 |
+
ndarray[timedelta64], TimedeltaArray, TimedeltaIndex
|
949 |
+
|
950 |
+
Returns
|
951 |
+
-------
|
952 |
+
multiple : int or ndarray[int64]
|
953 |
+
|
954 |
+
Raises
|
955 |
+
------
|
956 |
+
IncompatibleFrequency
|
957 |
+
"""
|
958 |
+
assert self.dtype._is_tick_like() # checked by calling function
|
959 |
+
|
960 |
+
dtype = np.dtype(f"m8[{self.dtype._td64_unit}]")
|
961 |
+
|
962 |
+
if isinstance(other, (timedelta, np.timedelta64, Tick)):
|
963 |
+
td = np.asarray(Timedelta(other).asm8)
|
964 |
+
else:
|
965 |
+
td = np.asarray(other)
|
966 |
+
|
967 |
+
try:
|
968 |
+
delta = astype_overflowsafe(td, dtype=dtype, copy=False, round_ok=False)
|
969 |
+
except ValueError as err:
|
970 |
+
raise raise_on_incompatible(self, other) from err
|
971 |
+
|
972 |
+
delta = delta.view("i8")
|
973 |
+
return lib.item_from_zerodim(delta)
|
974 |
+
|
975 |
+
|
976 |
+
def raise_on_incompatible(left, right) -> IncompatibleFrequency:
|
977 |
+
"""
|
978 |
+
Helper function to render a consistent error message when raising
|
979 |
+
IncompatibleFrequency.
|
980 |
+
|
981 |
+
Parameters
|
982 |
+
----------
|
983 |
+
left : PeriodArray
|
984 |
+
right : None, DateOffset, Period, ndarray, or timedelta-like
|
985 |
+
|
986 |
+
Returns
|
987 |
+
-------
|
988 |
+
IncompatibleFrequency
|
989 |
+
Exception to be raised by the caller.
|
990 |
+
"""
|
991 |
+
# GH#24283 error message format depends on whether right is scalar
|
992 |
+
if isinstance(right, (np.ndarray, ABCTimedeltaArray)) or right is None:
|
993 |
+
other_freq = None
|
994 |
+
elif isinstance(right, BaseOffset):
|
995 |
+
other_freq = freq_to_period_freqstr(right.n, right.name)
|
996 |
+
elif isinstance(right, (ABCPeriodIndex, PeriodArray, Period)):
|
997 |
+
other_freq = right.freqstr
|
998 |
+
else:
|
999 |
+
other_freq = delta_to_tick(Timedelta(right)).freqstr
|
1000 |
+
|
1001 |
+
own_freq = freq_to_period_freqstr(left.freq.n, left.freq.name)
|
1002 |
+
msg = DIFFERENT_FREQ.format(
|
1003 |
+
cls=type(left).__name__, own_freq=own_freq, other_freq=other_freq
|
1004 |
+
)
|
1005 |
+
return IncompatibleFrequency(msg)
|
1006 |
+
|
1007 |
+
|
1008 |
+
# -------------------------------------------------------------------
|
1009 |
+
# Constructor Helpers
|
1010 |
+
|
1011 |
+
|
1012 |
+
def period_array(
|
1013 |
+
data: Sequence[Period | str | None] | AnyArrayLike,
|
1014 |
+
freq: str | Tick | BaseOffset | None = None,
|
1015 |
+
copy: bool = False,
|
1016 |
+
) -> PeriodArray:
|
1017 |
+
"""
|
1018 |
+
Construct a new PeriodArray from a sequence of Period scalars.
|
1019 |
+
|
1020 |
+
Parameters
|
1021 |
+
----------
|
1022 |
+
data : Sequence of Period objects
|
1023 |
+
A sequence of Period objects. These are required to all have
|
1024 |
+
the same ``freq.`` Missing values can be indicated by ``None``
|
1025 |
+
or ``pandas.NaT``.
|
1026 |
+
freq : str, Tick, or Offset
|
1027 |
+
The frequency of every element of the array. This can be specified
|
1028 |
+
to avoid inferring the `freq` from `data`.
|
1029 |
+
copy : bool, default False
|
1030 |
+
Whether to ensure a copy of the data is made.
|
1031 |
+
|
1032 |
+
Returns
|
1033 |
+
-------
|
1034 |
+
PeriodArray
|
1035 |
+
|
1036 |
+
See Also
|
1037 |
+
--------
|
1038 |
+
PeriodArray
|
1039 |
+
pandas.PeriodIndex
|
1040 |
+
|
1041 |
+
Examples
|
1042 |
+
--------
|
1043 |
+
>>> period_array([pd.Period('2017', freq='Y'),
|
1044 |
+
... pd.Period('2018', freq='Y')])
|
1045 |
+
<PeriodArray>
|
1046 |
+
['2017', '2018']
|
1047 |
+
Length: 2, dtype: period[Y-DEC]
|
1048 |
+
|
1049 |
+
>>> period_array([pd.Period('2017', freq='Y'),
|
1050 |
+
... pd.Period('2018', freq='Y'),
|
1051 |
+
... pd.NaT])
|
1052 |
+
<PeriodArray>
|
1053 |
+
['2017', '2018', 'NaT']
|
1054 |
+
Length: 3, dtype: period[Y-DEC]
|
1055 |
+
|
1056 |
+
Integers that look like years are handled
|
1057 |
+
|
1058 |
+
>>> period_array([2000, 2001, 2002], freq='D')
|
1059 |
+
<PeriodArray>
|
1060 |
+
['2000-01-01', '2001-01-01', '2002-01-01']
|
1061 |
+
Length: 3, dtype: period[D]
|
1062 |
+
|
1063 |
+
Datetime-like strings may also be passed
|
1064 |
+
|
1065 |
+
>>> period_array(['2000-Q1', '2000-Q2', '2000-Q3', '2000-Q4'], freq='Q')
|
1066 |
+
<PeriodArray>
|
1067 |
+
['2000Q1', '2000Q2', '2000Q3', '2000Q4']
|
1068 |
+
Length: 4, dtype: period[Q-DEC]
|
1069 |
+
"""
|
1070 |
+
data_dtype = getattr(data, "dtype", None)
|
1071 |
+
|
1072 |
+
if lib.is_np_dtype(data_dtype, "M"):
|
1073 |
+
return PeriodArray._from_datetime64(data, freq)
|
1074 |
+
if isinstance(data_dtype, PeriodDtype):
|
1075 |
+
out = PeriodArray(data)
|
1076 |
+
if freq is not None:
|
1077 |
+
if freq == data_dtype.freq:
|
1078 |
+
return out
|
1079 |
+
return out.asfreq(freq)
|
1080 |
+
return out
|
1081 |
+
|
1082 |
+
# other iterable of some kind
|
1083 |
+
if not isinstance(data, (np.ndarray, list, tuple, ABCSeries)):
|
1084 |
+
data = list(data)
|
1085 |
+
|
1086 |
+
arrdata = np.asarray(data)
|
1087 |
+
|
1088 |
+
dtype: PeriodDtype | None
|
1089 |
+
if freq:
|
1090 |
+
dtype = PeriodDtype(freq)
|
1091 |
+
else:
|
1092 |
+
dtype = None
|
1093 |
+
|
1094 |
+
if arrdata.dtype.kind == "f" and len(arrdata) > 0:
|
1095 |
+
raise TypeError("PeriodIndex does not allow floating point in construction")
|
1096 |
+
|
1097 |
+
if arrdata.dtype.kind in "iu":
|
1098 |
+
arr = arrdata.astype(np.int64, copy=False)
|
1099 |
+
# error: Argument 2 to "from_ordinals" has incompatible type "Union[str,
|
1100 |
+
# Tick, None]"; expected "Union[timedelta, BaseOffset, str]"
|
1101 |
+
ordinals = libperiod.from_ordinals(arr, freq) # type: ignore[arg-type]
|
1102 |
+
return PeriodArray(ordinals, dtype=dtype)
|
1103 |
+
|
1104 |
+
data = ensure_object(arrdata)
|
1105 |
+
if freq is None:
|
1106 |
+
freq = libperiod.extract_freq(data)
|
1107 |
+
dtype = PeriodDtype(freq)
|
1108 |
+
return PeriodArray._from_sequence(data, dtype=dtype)
|
1109 |
+
|
1110 |
+
|
1111 |
+
@overload
|
1112 |
+
def validate_dtype_freq(dtype, freq: BaseOffsetT) -> BaseOffsetT:
|
1113 |
+
...
|
1114 |
+
|
1115 |
+
|
1116 |
+
@overload
|
1117 |
+
def validate_dtype_freq(dtype, freq: timedelta | str | None) -> BaseOffset:
|
1118 |
+
...
|
1119 |
+
|
1120 |
+
|
1121 |
+
def validate_dtype_freq(
|
1122 |
+
dtype, freq: BaseOffsetT | BaseOffset | timedelta | str | None
|
1123 |
+
) -> BaseOffsetT:
|
1124 |
+
"""
|
1125 |
+
If both a dtype and a freq are available, ensure they match. If only
|
1126 |
+
dtype is available, extract the implied freq.
|
1127 |
+
|
1128 |
+
Parameters
|
1129 |
+
----------
|
1130 |
+
dtype : dtype
|
1131 |
+
freq : DateOffset or None
|
1132 |
+
|
1133 |
+
Returns
|
1134 |
+
-------
|
1135 |
+
freq : DateOffset
|
1136 |
+
|
1137 |
+
Raises
|
1138 |
+
------
|
1139 |
+
ValueError : non-period dtype
|
1140 |
+
IncompatibleFrequency : mismatch between dtype and freq
|
1141 |
+
"""
|
1142 |
+
if freq is not None:
|
1143 |
+
freq = to_offset(freq, is_period=True)
|
1144 |
+
|
1145 |
+
if dtype is not None:
|
1146 |
+
dtype = pandas_dtype(dtype)
|
1147 |
+
if not isinstance(dtype, PeriodDtype):
|
1148 |
+
raise ValueError("dtype must be PeriodDtype")
|
1149 |
+
if freq is None:
|
1150 |
+
freq = dtype.freq
|
1151 |
+
elif freq != dtype.freq:
|
1152 |
+
raise IncompatibleFrequency("specified freq and dtype are different")
|
1153 |
+
# error: Incompatible return value type (got "Union[BaseOffset, Any, None]",
|
1154 |
+
# expected "BaseOffset")
|
1155 |
+
return freq # type: ignore[return-value]
|
1156 |
+
|
1157 |
+
|
1158 |
+
def dt64arr_to_periodarr(
|
1159 |
+
data, freq, tz=None
|
1160 |
+
) -> tuple[npt.NDArray[np.int64], BaseOffset]:
|
1161 |
+
"""
|
1162 |
+
Convert an datetime-like array to values Period ordinals.
|
1163 |
+
|
1164 |
+
Parameters
|
1165 |
+
----------
|
1166 |
+
data : Union[Series[datetime64[ns]], DatetimeIndex, ndarray[datetime64ns]]
|
1167 |
+
freq : Optional[Union[str, Tick]]
|
1168 |
+
Must match the `freq` on the `data` if `data` is a DatetimeIndex
|
1169 |
+
or Series.
|
1170 |
+
tz : Optional[tzinfo]
|
1171 |
+
|
1172 |
+
Returns
|
1173 |
+
-------
|
1174 |
+
ordinals : ndarray[int64]
|
1175 |
+
freq : Tick
|
1176 |
+
The frequency extracted from the Series or DatetimeIndex if that's
|
1177 |
+
used.
|
1178 |
+
|
1179 |
+
"""
|
1180 |
+
if not isinstance(data.dtype, np.dtype) or data.dtype.kind != "M":
|
1181 |
+
raise ValueError(f"Wrong dtype: {data.dtype}")
|
1182 |
+
|
1183 |
+
if freq is None:
|
1184 |
+
if isinstance(data, ABCIndex):
|
1185 |
+
data, freq = data._values, data.freq
|
1186 |
+
elif isinstance(data, ABCSeries):
|
1187 |
+
data, freq = data._values, data.dt.freq
|
1188 |
+
|
1189 |
+
elif isinstance(data, (ABCIndex, ABCSeries)):
|
1190 |
+
data = data._values
|
1191 |
+
|
1192 |
+
reso = get_unit_from_dtype(data.dtype)
|
1193 |
+
freq = Period._maybe_convert_freq(freq)
|
1194 |
+
base = freq._period_dtype_code
|
1195 |
+
return c_dt64arr_to_periodarr(data.view("i8"), base, tz, reso=reso), freq
|
1196 |
+
|
1197 |
+
|
1198 |
+
def _get_ordinal_range(start, end, periods, freq, mult: int = 1):
|
1199 |
+
if com.count_not_none(start, end, periods) != 2:
|
1200 |
+
raise ValueError(
|
1201 |
+
"Of the three parameters: start, end, and periods, "
|
1202 |
+
"exactly two must be specified"
|
1203 |
+
)
|
1204 |
+
|
1205 |
+
if freq is not None:
|
1206 |
+
freq = to_offset(freq, is_period=True)
|
1207 |
+
mult = freq.n
|
1208 |
+
|
1209 |
+
if start is not None:
|
1210 |
+
start = Period(start, freq)
|
1211 |
+
if end is not None:
|
1212 |
+
end = Period(end, freq)
|
1213 |
+
|
1214 |
+
is_start_per = isinstance(start, Period)
|
1215 |
+
is_end_per = isinstance(end, Period)
|
1216 |
+
|
1217 |
+
if is_start_per and is_end_per and start.freq != end.freq:
|
1218 |
+
raise ValueError("start and end must have same freq")
|
1219 |
+
if start is NaT or end is NaT:
|
1220 |
+
raise ValueError("start and end must not be NaT")
|
1221 |
+
|
1222 |
+
if freq is None:
|
1223 |
+
if is_start_per:
|
1224 |
+
freq = start.freq
|
1225 |
+
elif is_end_per:
|
1226 |
+
freq = end.freq
|
1227 |
+
else: # pragma: no cover
|
1228 |
+
raise ValueError("Could not infer freq from start/end")
|
1229 |
+
mult = freq.n
|
1230 |
+
|
1231 |
+
if periods is not None:
|
1232 |
+
periods = periods * mult
|
1233 |
+
if start is None:
|
1234 |
+
data = np.arange(
|
1235 |
+
end.ordinal - periods + mult, end.ordinal + 1, mult, dtype=np.int64
|
1236 |
+
)
|
1237 |
+
else:
|
1238 |
+
data = np.arange(
|
1239 |
+
start.ordinal, start.ordinal + periods, mult, dtype=np.int64
|
1240 |
+
)
|
1241 |
+
else:
|
1242 |
+
data = np.arange(start.ordinal, end.ordinal + 1, mult, dtype=np.int64)
|
1243 |
+
|
1244 |
+
return data, freq
|
1245 |
+
|
1246 |
+
|
1247 |
+
def _range_from_fields(
|
1248 |
+
year=None,
|
1249 |
+
month=None,
|
1250 |
+
quarter=None,
|
1251 |
+
day=None,
|
1252 |
+
hour=None,
|
1253 |
+
minute=None,
|
1254 |
+
second=None,
|
1255 |
+
freq=None,
|
1256 |
+
) -> tuple[np.ndarray, BaseOffset]:
|
1257 |
+
if hour is None:
|
1258 |
+
hour = 0
|
1259 |
+
if minute is None:
|
1260 |
+
minute = 0
|
1261 |
+
if second is None:
|
1262 |
+
second = 0
|
1263 |
+
if day is None:
|
1264 |
+
day = 1
|
1265 |
+
|
1266 |
+
ordinals = []
|
1267 |
+
|
1268 |
+
if quarter is not None:
|
1269 |
+
if freq is None:
|
1270 |
+
freq = to_offset("Q", is_period=True)
|
1271 |
+
base = FreqGroup.FR_QTR.value
|
1272 |
+
else:
|
1273 |
+
freq = to_offset(freq, is_period=True)
|
1274 |
+
base = libperiod.freq_to_dtype_code(freq)
|
1275 |
+
if base != FreqGroup.FR_QTR.value:
|
1276 |
+
raise AssertionError("base must equal FR_QTR")
|
1277 |
+
|
1278 |
+
freqstr = freq.freqstr
|
1279 |
+
year, quarter = _make_field_arrays(year, quarter)
|
1280 |
+
for y, q in zip(year, quarter):
|
1281 |
+
calendar_year, calendar_month = parsing.quarter_to_myear(y, q, freqstr)
|
1282 |
+
val = libperiod.period_ordinal(
|
1283 |
+
calendar_year, calendar_month, 1, 1, 1, 1, 0, 0, base
|
1284 |
+
)
|
1285 |
+
ordinals.append(val)
|
1286 |
+
else:
|
1287 |
+
freq = to_offset(freq, is_period=True)
|
1288 |
+
base = libperiod.freq_to_dtype_code(freq)
|
1289 |
+
arrays = _make_field_arrays(year, month, day, hour, minute, second)
|
1290 |
+
for y, mth, d, h, mn, s in zip(*arrays):
|
1291 |
+
ordinals.append(libperiod.period_ordinal(y, mth, d, h, mn, s, 0, 0, base))
|
1292 |
+
|
1293 |
+
return np.array(ordinals, dtype=np.int64), freq
|
1294 |
+
|
1295 |
+
|
1296 |
+
def _make_field_arrays(*fields) -> list[np.ndarray]:
|
1297 |
+
length = None
|
1298 |
+
for x in fields:
|
1299 |
+
if isinstance(x, (list, np.ndarray, ABCSeries)):
|
1300 |
+
if length is not None and len(x) != length:
|
1301 |
+
raise ValueError("Mismatched Period array lengths")
|
1302 |
+
if length is None:
|
1303 |
+
length = len(x)
|
1304 |
+
|
1305 |
+
# error: Argument 2 to "repeat" has incompatible type "Optional[int]"; expected
|
1306 |
+
# "Union[Union[int, integer[Any]], Union[bool, bool_], ndarray, Sequence[Union[int,
|
1307 |
+
# integer[Any]]], Sequence[Union[bool, bool_]], Sequence[Sequence[Any]]]"
|
1308 |
+
return [
|
1309 |
+
np.asarray(x)
|
1310 |
+
if isinstance(x, (np.ndarray, list, ABCSeries))
|
1311 |
+
else np.repeat(x, length) # type: ignore[arg-type]
|
1312 |
+
for x in fields
|
1313 |
+
]
|
env-llmeval/lib/python3.10/site-packages/pandas/core/arrays/string_.py
ADDED
@@ -0,0 +1,657 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from __future__ import annotations
|
2 |
+
|
3 |
+
from typing import (
|
4 |
+
TYPE_CHECKING,
|
5 |
+
ClassVar,
|
6 |
+
Literal,
|
7 |
+
)
|
8 |
+
|
9 |
+
import numpy as np
|
10 |
+
|
11 |
+
from pandas._config import get_option
|
12 |
+
|
13 |
+
from pandas._libs import (
|
14 |
+
lib,
|
15 |
+
missing as libmissing,
|
16 |
+
)
|
17 |
+
from pandas._libs.arrays import NDArrayBacked
|
18 |
+
from pandas._libs.lib import ensure_string_array
|
19 |
+
from pandas.compat import pa_version_under10p1
|
20 |
+
from pandas.compat.numpy import function as nv
|
21 |
+
from pandas.util._decorators import doc
|
22 |
+
|
23 |
+
from pandas.core.dtypes.base import (
|
24 |
+
ExtensionDtype,
|
25 |
+
StorageExtensionDtype,
|
26 |
+
register_extension_dtype,
|
27 |
+
)
|
28 |
+
from pandas.core.dtypes.common import (
|
29 |
+
is_array_like,
|
30 |
+
is_bool_dtype,
|
31 |
+
is_integer_dtype,
|
32 |
+
is_object_dtype,
|
33 |
+
is_string_dtype,
|
34 |
+
pandas_dtype,
|
35 |
+
)
|
36 |
+
|
37 |
+
from pandas.core import ops
|
38 |
+
from pandas.core.array_algos import masked_reductions
|
39 |
+
from pandas.core.arrays.base import ExtensionArray
|
40 |
+
from pandas.core.arrays.floating import (
|
41 |
+
FloatingArray,
|
42 |
+
FloatingDtype,
|
43 |
+
)
|
44 |
+
from pandas.core.arrays.integer import (
|
45 |
+
IntegerArray,
|
46 |
+
IntegerDtype,
|
47 |
+
)
|
48 |
+
from pandas.core.arrays.numpy_ import NumpyExtensionArray
|
49 |
+
from pandas.core.construction import extract_array
|
50 |
+
from pandas.core.indexers import check_array_indexer
|
51 |
+
from pandas.core.missing import isna
|
52 |
+
|
53 |
+
if TYPE_CHECKING:
|
54 |
+
import pyarrow
|
55 |
+
|
56 |
+
from pandas._typing import (
|
57 |
+
AxisInt,
|
58 |
+
Dtype,
|
59 |
+
DtypeObj,
|
60 |
+
NumpySorter,
|
61 |
+
NumpyValueArrayLike,
|
62 |
+
Scalar,
|
63 |
+
Self,
|
64 |
+
npt,
|
65 |
+
type_t,
|
66 |
+
)
|
67 |
+
|
68 |
+
from pandas import Series
|
69 |
+
|
70 |
+
|
71 |
+
@register_extension_dtype
|
72 |
+
class StringDtype(StorageExtensionDtype):
|
73 |
+
"""
|
74 |
+
Extension dtype for string data.
|
75 |
+
|
76 |
+
.. warning::
|
77 |
+
|
78 |
+
StringDtype is considered experimental. The implementation and
|
79 |
+
parts of the API may change without warning.
|
80 |
+
|
81 |
+
Parameters
|
82 |
+
----------
|
83 |
+
storage : {"python", "pyarrow", "pyarrow_numpy"}, optional
|
84 |
+
If not given, the value of ``pd.options.mode.string_storage``.
|
85 |
+
|
86 |
+
Attributes
|
87 |
+
----------
|
88 |
+
None
|
89 |
+
|
90 |
+
Methods
|
91 |
+
-------
|
92 |
+
None
|
93 |
+
|
94 |
+
Examples
|
95 |
+
--------
|
96 |
+
>>> pd.StringDtype()
|
97 |
+
string[python]
|
98 |
+
|
99 |
+
>>> pd.StringDtype(storage="pyarrow")
|
100 |
+
string[pyarrow]
|
101 |
+
"""
|
102 |
+
|
103 |
+
# error: Cannot override instance variable (previously declared on
|
104 |
+
# base class "StorageExtensionDtype") with class variable
|
105 |
+
name: ClassVar[str] = "string" # type: ignore[misc]
|
106 |
+
|
107 |
+
#: StringDtype().na_value uses pandas.NA except the implementation that
|
108 |
+
# follows NumPy semantics, which uses nan.
|
109 |
+
@property
|
110 |
+
def na_value(self) -> libmissing.NAType | float: # type: ignore[override]
|
111 |
+
if self.storage == "pyarrow_numpy":
|
112 |
+
return np.nan
|
113 |
+
else:
|
114 |
+
return libmissing.NA
|
115 |
+
|
116 |
+
_metadata = ("storage",)
|
117 |
+
|
118 |
+
def __init__(self, storage=None) -> None:
|
119 |
+
if storage is None:
|
120 |
+
infer_string = get_option("future.infer_string")
|
121 |
+
if infer_string:
|
122 |
+
storage = "pyarrow_numpy"
|
123 |
+
else:
|
124 |
+
storage = get_option("mode.string_storage")
|
125 |
+
if storage not in {"python", "pyarrow", "pyarrow_numpy"}:
|
126 |
+
raise ValueError(
|
127 |
+
f"Storage must be 'python', 'pyarrow' or 'pyarrow_numpy'. "
|
128 |
+
f"Got {storage} instead."
|
129 |
+
)
|
130 |
+
if storage in ("pyarrow", "pyarrow_numpy") and pa_version_under10p1:
|
131 |
+
raise ImportError(
|
132 |
+
"pyarrow>=10.0.1 is required for PyArrow backed StringArray."
|
133 |
+
)
|
134 |
+
self.storage = storage
|
135 |
+
|
136 |
+
@property
|
137 |
+
def type(self) -> type[str]:
|
138 |
+
return str
|
139 |
+
|
140 |
+
@classmethod
|
141 |
+
def construct_from_string(cls, string) -> Self:
|
142 |
+
"""
|
143 |
+
Construct a StringDtype from a string.
|
144 |
+
|
145 |
+
Parameters
|
146 |
+
----------
|
147 |
+
string : str
|
148 |
+
The type of the name. The storage type will be taking from `string`.
|
149 |
+
Valid options and their storage types are
|
150 |
+
|
151 |
+
========================== ==============================================
|
152 |
+
string result storage
|
153 |
+
========================== ==============================================
|
154 |
+
``'string'`` pd.options.mode.string_storage, default python
|
155 |
+
``'string[python]'`` python
|
156 |
+
``'string[pyarrow]'`` pyarrow
|
157 |
+
========================== ==============================================
|
158 |
+
|
159 |
+
Returns
|
160 |
+
-------
|
161 |
+
StringDtype
|
162 |
+
|
163 |
+
Raise
|
164 |
+
-----
|
165 |
+
TypeError
|
166 |
+
If the string is not a valid option.
|
167 |
+
"""
|
168 |
+
if not isinstance(string, str):
|
169 |
+
raise TypeError(
|
170 |
+
f"'construct_from_string' expects a string, got {type(string)}"
|
171 |
+
)
|
172 |
+
if string == "string":
|
173 |
+
return cls()
|
174 |
+
elif string == "string[python]":
|
175 |
+
return cls(storage="python")
|
176 |
+
elif string == "string[pyarrow]":
|
177 |
+
return cls(storage="pyarrow")
|
178 |
+
elif string == "string[pyarrow_numpy]":
|
179 |
+
return cls(storage="pyarrow_numpy")
|
180 |
+
else:
|
181 |
+
raise TypeError(f"Cannot construct a '{cls.__name__}' from '{string}'")
|
182 |
+
|
183 |
+
# https://github.com/pandas-dev/pandas/issues/36126
|
184 |
+
# error: Signature of "construct_array_type" incompatible with supertype
|
185 |
+
# "ExtensionDtype"
|
186 |
+
def construct_array_type( # type: ignore[override]
|
187 |
+
self,
|
188 |
+
) -> type_t[BaseStringArray]:
|
189 |
+
"""
|
190 |
+
Return the array type associated with this dtype.
|
191 |
+
|
192 |
+
Returns
|
193 |
+
-------
|
194 |
+
type
|
195 |
+
"""
|
196 |
+
from pandas.core.arrays.string_arrow import (
|
197 |
+
ArrowStringArray,
|
198 |
+
ArrowStringArrayNumpySemantics,
|
199 |
+
)
|
200 |
+
|
201 |
+
if self.storage == "python":
|
202 |
+
return StringArray
|
203 |
+
elif self.storage == "pyarrow":
|
204 |
+
return ArrowStringArray
|
205 |
+
else:
|
206 |
+
return ArrowStringArrayNumpySemantics
|
207 |
+
|
208 |
+
def __from_arrow__(
|
209 |
+
self, array: pyarrow.Array | pyarrow.ChunkedArray
|
210 |
+
) -> BaseStringArray:
|
211 |
+
"""
|
212 |
+
Construct StringArray from pyarrow Array/ChunkedArray.
|
213 |
+
"""
|
214 |
+
if self.storage == "pyarrow":
|
215 |
+
from pandas.core.arrays.string_arrow import ArrowStringArray
|
216 |
+
|
217 |
+
return ArrowStringArray(array)
|
218 |
+
elif self.storage == "pyarrow_numpy":
|
219 |
+
from pandas.core.arrays.string_arrow import ArrowStringArrayNumpySemantics
|
220 |
+
|
221 |
+
return ArrowStringArrayNumpySemantics(array)
|
222 |
+
else:
|
223 |
+
import pyarrow
|
224 |
+
|
225 |
+
if isinstance(array, pyarrow.Array):
|
226 |
+
chunks = [array]
|
227 |
+
else:
|
228 |
+
# pyarrow.ChunkedArray
|
229 |
+
chunks = array.chunks
|
230 |
+
|
231 |
+
results = []
|
232 |
+
for arr in chunks:
|
233 |
+
# convert chunk by chunk to numpy and concatenate then, to avoid
|
234 |
+
# overflow for large string data when concatenating the pyarrow arrays
|
235 |
+
arr = arr.to_numpy(zero_copy_only=False)
|
236 |
+
arr = ensure_string_array(arr, na_value=libmissing.NA)
|
237 |
+
results.append(arr)
|
238 |
+
|
239 |
+
if len(chunks) == 0:
|
240 |
+
arr = np.array([], dtype=object)
|
241 |
+
else:
|
242 |
+
arr = np.concatenate(results)
|
243 |
+
|
244 |
+
# Bypass validation inside StringArray constructor, see GH#47781
|
245 |
+
new_string_array = StringArray.__new__(StringArray)
|
246 |
+
NDArrayBacked.__init__(
|
247 |
+
new_string_array,
|
248 |
+
arr,
|
249 |
+
StringDtype(storage="python"),
|
250 |
+
)
|
251 |
+
return new_string_array
|
252 |
+
|
253 |
+
|
254 |
+
class BaseStringArray(ExtensionArray):
|
255 |
+
"""
|
256 |
+
Mixin class for StringArray, ArrowStringArray.
|
257 |
+
"""
|
258 |
+
|
259 |
+
@doc(ExtensionArray.tolist)
|
260 |
+
def tolist(self):
|
261 |
+
if self.ndim > 1:
|
262 |
+
return [x.tolist() for x in self]
|
263 |
+
return list(self.to_numpy())
|
264 |
+
|
265 |
+
@classmethod
|
266 |
+
def _from_scalars(cls, scalars, dtype: DtypeObj) -> Self:
|
267 |
+
if lib.infer_dtype(scalars, skipna=True) not in ["string", "empty"]:
|
268 |
+
# TODO: require any NAs be valid-for-string
|
269 |
+
raise ValueError
|
270 |
+
return cls._from_sequence(scalars, dtype=dtype)
|
271 |
+
|
272 |
+
|
273 |
+
# error: Definition of "_concat_same_type" in base class "NDArrayBacked" is
|
274 |
+
# incompatible with definition in base class "ExtensionArray"
|
275 |
+
class StringArray(BaseStringArray, NumpyExtensionArray): # type: ignore[misc]
|
276 |
+
"""
|
277 |
+
Extension array for string data.
|
278 |
+
|
279 |
+
.. warning::
|
280 |
+
|
281 |
+
StringArray is considered experimental. The implementation and
|
282 |
+
parts of the API may change without warning.
|
283 |
+
|
284 |
+
Parameters
|
285 |
+
----------
|
286 |
+
values : array-like
|
287 |
+
The array of data.
|
288 |
+
|
289 |
+
.. warning::
|
290 |
+
|
291 |
+
Currently, this expects an object-dtype ndarray
|
292 |
+
where the elements are Python strings
|
293 |
+
or nan-likes (``None``, ``np.nan``, ``NA``).
|
294 |
+
This may change without warning in the future. Use
|
295 |
+
:meth:`pandas.array` with ``dtype="string"`` for a stable way of
|
296 |
+
creating a `StringArray` from any sequence.
|
297 |
+
|
298 |
+
.. versionchanged:: 1.5.0
|
299 |
+
|
300 |
+
StringArray now accepts array-likes containing
|
301 |
+
nan-likes(``None``, ``np.nan``) for the ``values`` parameter
|
302 |
+
in addition to strings and :attr:`pandas.NA`
|
303 |
+
|
304 |
+
copy : bool, default False
|
305 |
+
Whether to copy the array of data.
|
306 |
+
|
307 |
+
Attributes
|
308 |
+
----------
|
309 |
+
None
|
310 |
+
|
311 |
+
Methods
|
312 |
+
-------
|
313 |
+
None
|
314 |
+
|
315 |
+
See Also
|
316 |
+
--------
|
317 |
+
:func:`pandas.array`
|
318 |
+
The recommended function for creating a StringArray.
|
319 |
+
Series.str
|
320 |
+
The string methods are available on Series backed by
|
321 |
+
a StringArray.
|
322 |
+
|
323 |
+
Notes
|
324 |
+
-----
|
325 |
+
StringArray returns a BooleanArray for comparison methods.
|
326 |
+
|
327 |
+
Examples
|
328 |
+
--------
|
329 |
+
>>> pd.array(['This is', 'some text', None, 'data.'], dtype="string")
|
330 |
+
<StringArray>
|
331 |
+
['This is', 'some text', <NA>, 'data.']
|
332 |
+
Length: 4, dtype: string
|
333 |
+
|
334 |
+
Unlike arrays instantiated with ``dtype="object"``, ``StringArray``
|
335 |
+
will convert the values to strings.
|
336 |
+
|
337 |
+
>>> pd.array(['1', 1], dtype="object")
|
338 |
+
<NumpyExtensionArray>
|
339 |
+
['1', 1]
|
340 |
+
Length: 2, dtype: object
|
341 |
+
>>> pd.array(['1', 1], dtype="string")
|
342 |
+
<StringArray>
|
343 |
+
['1', '1']
|
344 |
+
Length: 2, dtype: string
|
345 |
+
|
346 |
+
However, instantiating StringArrays directly with non-strings will raise an error.
|
347 |
+
|
348 |
+
For comparison methods, `StringArray` returns a :class:`pandas.BooleanArray`:
|
349 |
+
|
350 |
+
>>> pd.array(["a", None, "c"], dtype="string") == "a"
|
351 |
+
<BooleanArray>
|
352 |
+
[True, <NA>, False]
|
353 |
+
Length: 3, dtype: boolean
|
354 |
+
"""
|
355 |
+
|
356 |
+
# undo the NumpyExtensionArray hack
|
357 |
+
_typ = "extension"
|
358 |
+
|
359 |
+
def __init__(self, values, copy: bool = False) -> None:
|
360 |
+
values = extract_array(values)
|
361 |
+
|
362 |
+
super().__init__(values, copy=copy)
|
363 |
+
if not isinstance(values, type(self)):
|
364 |
+
self._validate()
|
365 |
+
NDArrayBacked.__init__(self, self._ndarray, StringDtype(storage="python"))
|
366 |
+
|
367 |
+
def _validate(self):
|
368 |
+
"""Validate that we only store NA or strings."""
|
369 |
+
if len(self._ndarray) and not lib.is_string_array(self._ndarray, skipna=True):
|
370 |
+
raise ValueError("StringArray requires a sequence of strings or pandas.NA")
|
371 |
+
if self._ndarray.dtype != "object":
|
372 |
+
raise ValueError(
|
373 |
+
"StringArray requires a sequence of strings or pandas.NA. Got "
|
374 |
+
f"'{self._ndarray.dtype}' dtype instead."
|
375 |
+
)
|
376 |
+
# Check to see if need to convert Na values to pd.NA
|
377 |
+
if self._ndarray.ndim > 2:
|
378 |
+
# Ravel if ndims > 2 b/c no cythonized version available
|
379 |
+
lib.convert_nans_to_NA(self._ndarray.ravel("K"))
|
380 |
+
else:
|
381 |
+
lib.convert_nans_to_NA(self._ndarray)
|
382 |
+
|
383 |
+
@classmethod
|
384 |
+
def _from_sequence(cls, scalars, *, dtype: Dtype | None = None, copy: bool = False):
|
385 |
+
if dtype and not (isinstance(dtype, str) and dtype == "string"):
|
386 |
+
dtype = pandas_dtype(dtype)
|
387 |
+
assert isinstance(dtype, StringDtype) and dtype.storage == "python"
|
388 |
+
|
389 |
+
from pandas.core.arrays.masked import BaseMaskedArray
|
390 |
+
|
391 |
+
if isinstance(scalars, BaseMaskedArray):
|
392 |
+
# avoid costly conversion to object dtype
|
393 |
+
na_values = scalars._mask
|
394 |
+
result = scalars._data
|
395 |
+
result = lib.ensure_string_array(result, copy=copy, convert_na_value=False)
|
396 |
+
result[na_values] = libmissing.NA
|
397 |
+
|
398 |
+
else:
|
399 |
+
if lib.is_pyarrow_array(scalars):
|
400 |
+
# pyarrow array; we cannot rely on the "to_numpy" check in
|
401 |
+
# ensure_string_array because calling scalars.to_numpy would set
|
402 |
+
# zero_copy_only to True which caused problems see GH#52076
|
403 |
+
scalars = np.array(scalars)
|
404 |
+
# convert non-na-likes to str, and nan-likes to StringDtype().na_value
|
405 |
+
result = lib.ensure_string_array(scalars, na_value=libmissing.NA, copy=copy)
|
406 |
+
|
407 |
+
# Manually creating new array avoids the validation step in the __init__, so is
|
408 |
+
# faster. Refactor need for validation?
|
409 |
+
new_string_array = cls.__new__(cls)
|
410 |
+
NDArrayBacked.__init__(new_string_array, result, StringDtype(storage="python"))
|
411 |
+
|
412 |
+
return new_string_array
|
413 |
+
|
414 |
+
@classmethod
|
415 |
+
def _from_sequence_of_strings(
|
416 |
+
cls, strings, *, dtype: Dtype | None = None, copy: bool = False
|
417 |
+
):
|
418 |
+
return cls._from_sequence(strings, dtype=dtype, copy=copy)
|
419 |
+
|
420 |
+
@classmethod
|
421 |
+
def _empty(cls, shape, dtype) -> StringArray:
|
422 |
+
values = np.empty(shape, dtype=object)
|
423 |
+
values[:] = libmissing.NA
|
424 |
+
return cls(values).astype(dtype, copy=False)
|
425 |
+
|
426 |
+
def __arrow_array__(self, type=None):
|
427 |
+
"""
|
428 |
+
Convert myself into a pyarrow Array.
|
429 |
+
"""
|
430 |
+
import pyarrow as pa
|
431 |
+
|
432 |
+
if type is None:
|
433 |
+
type = pa.string()
|
434 |
+
|
435 |
+
values = self._ndarray.copy()
|
436 |
+
values[self.isna()] = None
|
437 |
+
return pa.array(values, type=type, from_pandas=True)
|
438 |
+
|
439 |
+
def _values_for_factorize(self):
|
440 |
+
arr = self._ndarray.copy()
|
441 |
+
mask = self.isna()
|
442 |
+
arr[mask] = None
|
443 |
+
return arr, None
|
444 |
+
|
445 |
+
def __setitem__(self, key, value) -> None:
|
446 |
+
value = extract_array(value, extract_numpy=True)
|
447 |
+
if isinstance(value, type(self)):
|
448 |
+
# extract_array doesn't extract NumpyExtensionArray subclasses
|
449 |
+
value = value._ndarray
|
450 |
+
|
451 |
+
key = check_array_indexer(self, key)
|
452 |
+
scalar_key = lib.is_scalar(key)
|
453 |
+
scalar_value = lib.is_scalar(value)
|
454 |
+
if scalar_key and not scalar_value:
|
455 |
+
raise ValueError("setting an array element with a sequence.")
|
456 |
+
|
457 |
+
# validate new items
|
458 |
+
if scalar_value:
|
459 |
+
if isna(value):
|
460 |
+
value = libmissing.NA
|
461 |
+
elif not isinstance(value, str):
|
462 |
+
raise TypeError(
|
463 |
+
f"Cannot set non-string value '{value}' into a StringArray."
|
464 |
+
)
|
465 |
+
else:
|
466 |
+
if not is_array_like(value):
|
467 |
+
value = np.asarray(value, dtype=object)
|
468 |
+
if len(value) and not lib.is_string_array(value, skipna=True):
|
469 |
+
raise TypeError("Must provide strings.")
|
470 |
+
|
471 |
+
mask = isna(value)
|
472 |
+
if mask.any():
|
473 |
+
value = value.copy()
|
474 |
+
value[isna(value)] = libmissing.NA
|
475 |
+
|
476 |
+
super().__setitem__(key, value)
|
477 |
+
|
478 |
+
def _putmask(self, mask: npt.NDArray[np.bool_], value) -> None:
|
479 |
+
# the super() method NDArrayBackedExtensionArray._putmask uses
|
480 |
+
# np.putmask which doesn't properly handle None/pd.NA, so using the
|
481 |
+
# base class implementation that uses __setitem__
|
482 |
+
ExtensionArray._putmask(self, mask, value)
|
483 |
+
|
484 |
+
def astype(self, dtype, copy: bool = True):
|
485 |
+
dtype = pandas_dtype(dtype)
|
486 |
+
|
487 |
+
if dtype == self.dtype:
|
488 |
+
if copy:
|
489 |
+
return self.copy()
|
490 |
+
return self
|
491 |
+
|
492 |
+
elif isinstance(dtype, IntegerDtype):
|
493 |
+
arr = self._ndarray.copy()
|
494 |
+
mask = self.isna()
|
495 |
+
arr[mask] = 0
|
496 |
+
values = arr.astype(dtype.numpy_dtype)
|
497 |
+
return IntegerArray(values, mask, copy=False)
|
498 |
+
elif isinstance(dtype, FloatingDtype):
|
499 |
+
arr = self.copy()
|
500 |
+
mask = self.isna()
|
501 |
+
arr[mask] = "0"
|
502 |
+
values = arr.astype(dtype.numpy_dtype)
|
503 |
+
return FloatingArray(values, mask, copy=False)
|
504 |
+
elif isinstance(dtype, ExtensionDtype):
|
505 |
+
# Skip the NumpyExtensionArray.astype method
|
506 |
+
return ExtensionArray.astype(self, dtype, copy)
|
507 |
+
elif np.issubdtype(dtype, np.floating):
|
508 |
+
arr = self._ndarray.copy()
|
509 |
+
mask = self.isna()
|
510 |
+
arr[mask] = 0
|
511 |
+
values = arr.astype(dtype)
|
512 |
+
values[mask] = np.nan
|
513 |
+
return values
|
514 |
+
|
515 |
+
return super().astype(dtype, copy)
|
516 |
+
|
517 |
+
def _reduce(
|
518 |
+
self, name: str, *, skipna: bool = True, axis: AxisInt | None = 0, **kwargs
|
519 |
+
):
|
520 |
+
if name in ["min", "max"]:
|
521 |
+
return getattr(self, name)(skipna=skipna, axis=axis)
|
522 |
+
|
523 |
+
raise TypeError(f"Cannot perform reduction '{name}' with string dtype")
|
524 |
+
|
525 |
+
def min(self, axis=None, skipna: bool = True, **kwargs) -> Scalar:
|
526 |
+
nv.validate_min((), kwargs)
|
527 |
+
result = masked_reductions.min(
|
528 |
+
values=self.to_numpy(), mask=self.isna(), skipna=skipna
|
529 |
+
)
|
530 |
+
return self._wrap_reduction_result(axis, result)
|
531 |
+
|
532 |
+
def max(self, axis=None, skipna: bool = True, **kwargs) -> Scalar:
|
533 |
+
nv.validate_max((), kwargs)
|
534 |
+
result = masked_reductions.max(
|
535 |
+
values=self.to_numpy(), mask=self.isna(), skipna=skipna
|
536 |
+
)
|
537 |
+
return self._wrap_reduction_result(axis, result)
|
538 |
+
|
539 |
+
def value_counts(self, dropna: bool = True) -> Series:
|
540 |
+
from pandas.core.algorithms import value_counts_internal as value_counts
|
541 |
+
|
542 |
+
result = value_counts(self._ndarray, dropna=dropna).astype("Int64")
|
543 |
+
result.index = result.index.astype(self.dtype)
|
544 |
+
return result
|
545 |
+
|
546 |
+
def memory_usage(self, deep: bool = False) -> int:
|
547 |
+
result = self._ndarray.nbytes
|
548 |
+
if deep:
|
549 |
+
return result + lib.memory_usage_of_objects(self._ndarray)
|
550 |
+
return result
|
551 |
+
|
552 |
+
@doc(ExtensionArray.searchsorted)
|
553 |
+
def searchsorted(
|
554 |
+
self,
|
555 |
+
value: NumpyValueArrayLike | ExtensionArray,
|
556 |
+
side: Literal["left", "right"] = "left",
|
557 |
+
sorter: NumpySorter | None = None,
|
558 |
+
) -> npt.NDArray[np.intp] | np.intp:
|
559 |
+
if self._hasna:
|
560 |
+
raise ValueError(
|
561 |
+
"searchsorted requires array to be sorted, which is impossible "
|
562 |
+
"with NAs present."
|
563 |
+
)
|
564 |
+
return super().searchsorted(value=value, side=side, sorter=sorter)
|
565 |
+
|
566 |
+
def _cmp_method(self, other, op):
|
567 |
+
from pandas.arrays import BooleanArray
|
568 |
+
|
569 |
+
if isinstance(other, StringArray):
|
570 |
+
other = other._ndarray
|
571 |
+
|
572 |
+
mask = isna(self) | isna(other)
|
573 |
+
valid = ~mask
|
574 |
+
|
575 |
+
if not lib.is_scalar(other):
|
576 |
+
if len(other) != len(self):
|
577 |
+
# prevent improper broadcasting when other is 2D
|
578 |
+
raise ValueError(
|
579 |
+
f"Lengths of operands do not match: {len(self)} != {len(other)}"
|
580 |
+
)
|
581 |
+
|
582 |
+
other = np.asarray(other)
|
583 |
+
other = other[valid]
|
584 |
+
|
585 |
+
if op.__name__ in ops.ARITHMETIC_BINOPS:
|
586 |
+
result = np.empty_like(self._ndarray, dtype="object")
|
587 |
+
result[mask] = libmissing.NA
|
588 |
+
result[valid] = op(self._ndarray[valid], other)
|
589 |
+
return StringArray(result)
|
590 |
+
else:
|
591 |
+
# logical
|
592 |
+
result = np.zeros(len(self._ndarray), dtype="bool")
|
593 |
+
result[valid] = op(self._ndarray[valid], other)
|
594 |
+
return BooleanArray(result, mask)
|
595 |
+
|
596 |
+
_arith_method = _cmp_method
|
597 |
+
|
598 |
+
# ------------------------------------------------------------------------
|
599 |
+
# String methods interface
|
600 |
+
# error: Incompatible types in assignment (expression has type "NAType",
|
601 |
+
# base class "NumpyExtensionArray" defined the type as "float")
|
602 |
+
_str_na_value = libmissing.NA # type: ignore[assignment]
|
603 |
+
|
604 |
+
def _str_map(
|
605 |
+
self, f, na_value=None, dtype: Dtype | None = None, convert: bool = True
|
606 |
+
):
|
607 |
+
from pandas.arrays import BooleanArray
|
608 |
+
|
609 |
+
if dtype is None:
|
610 |
+
dtype = StringDtype(storage="python")
|
611 |
+
if na_value is None:
|
612 |
+
na_value = self.dtype.na_value
|
613 |
+
|
614 |
+
mask = isna(self)
|
615 |
+
arr = np.asarray(self)
|
616 |
+
|
617 |
+
if is_integer_dtype(dtype) or is_bool_dtype(dtype):
|
618 |
+
constructor: type[IntegerArray | BooleanArray]
|
619 |
+
if is_integer_dtype(dtype):
|
620 |
+
constructor = IntegerArray
|
621 |
+
else:
|
622 |
+
constructor = BooleanArray
|
623 |
+
|
624 |
+
na_value_is_na = isna(na_value)
|
625 |
+
if na_value_is_na:
|
626 |
+
na_value = 1
|
627 |
+
elif dtype == np.dtype("bool"):
|
628 |
+
na_value = bool(na_value)
|
629 |
+
result = lib.map_infer_mask(
|
630 |
+
arr,
|
631 |
+
f,
|
632 |
+
mask.view("uint8"),
|
633 |
+
convert=False,
|
634 |
+
na_value=na_value,
|
635 |
+
# error: Argument 1 to "dtype" has incompatible type
|
636 |
+
# "Union[ExtensionDtype, str, dtype[Any], Type[object]]"; expected
|
637 |
+
# "Type[object]"
|
638 |
+
dtype=np.dtype(dtype), # type: ignore[arg-type]
|
639 |
+
)
|
640 |
+
|
641 |
+
if not na_value_is_na:
|
642 |
+
mask[:] = False
|
643 |
+
|
644 |
+
return constructor(result, mask)
|
645 |
+
|
646 |
+
elif is_string_dtype(dtype) and not is_object_dtype(dtype):
|
647 |
+
# i.e. StringDtype
|
648 |
+
result = lib.map_infer_mask(
|
649 |
+
arr, f, mask.view("uint8"), convert=False, na_value=na_value
|
650 |
+
)
|
651 |
+
return StringArray(result)
|
652 |
+
else:
|
653 |
+
# This is when the result type is object. We reach this when
|
654 |
+
# -> We know the result type is truly object (e.g. .encode returns bytes
|
655 |
+
# or .findall returns a list).
|
656 |
+
# -> We don't know the result type. E.g. `.get` can return anything.
|
657 |
+
return lib.map_infer_mask(arr, f, mask.view("uint8"))
|
env-llmeval/lib/python3.10/site-packages/pandas/core/arrays/string_arrow.py
ADDED
@@ -0,0 +1,715 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from __future__ import annotations
|
2 |
+
|
3 |
+
from functools import partial
|
4 |
+
import operator
|
5 |
+
import re
|
6 |
+
from typing import (
|
7 |
+
TYPE_CHECKING,
|
8 |
+
Callable,
|
9 |
+
Union,
|
10 |
+
)
|
11 |
+
import warnings
|
12 |
+
|
13 |
+
import numpy as np
|
14 |
+
|
15 |
+
from pandas._libs import (
|
16 |
+
lib,
|
17 |
+
missing as libmissing,
|
18 |
+
)
|
19 |
+
from pandas.compat import (
|
20 |
+
pa_version_under10p1,
|
21 |
+
pa_version_under13p0,
|
22 |
+
)
|
23 |
+
from pandas.util._exceptions import find_stack_level
|
24 |
+
|
25 |
+
from pandas.core.dtypes.common import (
|
26 |
+
is_bool_dtype,
|
27 |
+
is_integer_dtype,
|
28 |
+
is_object_dtype,
|
29 |
+
is_scalar,
|
30 |
+
is_string_dtype,
|
31 |
+
pandas_dtype,
|
32 |
+
)
|
33 |
+
from pandas.core.dtypes.missing import isna
|
34 |
+
|
35 |
+
from pandas.core.arrays._arrow_string_mixins import ArrowStringArrayMixin
|
36 |
+
from pandas.core.arrays.arrow import ArrowExtensionArray
|
37 |
+
from pandas.core.arrays.boolean import BooleanDtype
|
38 |
+
from pandas.core.arrays.integer import Int64Dtype
|
39 |
+
from pandas.core.arrays.numeric import NumericDtype
|
40 |
+
from pandas.core.arrays.string_ import (
|
41 |
+
BaseStringArray,
|
42 |
+
StringDtype,
|
43 |
+
)
|
44 |
+
from pandas.core.ops import invalid_comparison
|
45 |
+
from pandas.core.strings.object_array import ObjectStringArrayMixin
|
46 |
+
|
47 |
+
if not pa_version_under10p1:
|
48 |
+
import pyarrow as pa
|
49 |
+
import pyarrow.compute as pc
|
50 |
+
|
51 |
+
from pandas.core.arrays.arrow._arrow_utils import fallback_performancewarning
|
52 |
+
|
53 |
+
|
54 |
+
if TYPE_CHECKING:
|
55 |
+
from collections.abc import Sequence
|
56 |
+
|
57 |
+
from pandas._typing import (
|
58 |
+
ArrayLike,
|
59 |
+
AxisInt,
|
60 |
+
Dtype,
|
61 |
+
Scalar,
|
62 |
+
npt,
|
63 |
+
)
|
64 |
+
|
65 |
+
from pandas import Series
|
66 |
+
|
67 |
+
|
68 |
+
ArrowStringScalarOrNAT = Union[str, libmissing.NAType]
|
69 |
+
|
70 |
+
|
71 |
+
def _chk_pyarrow_available() -> None:
|
72 |
+
if pa_version_under10p1:
|
73 |
+
msg = "pyarrow>=10.0.1 is required for PyArrow backed ArrowExtensionArray."
|
74 |
+
raise ImportError(msg)
|
75 |
+
|
76 |
+
|
77 |
+
# TODO: Inherit directly from BaseStringArrayMethods. Currently we inherit from
|
78 |
+
# ObjectStringArrayMixin because we want to have the object-dtype based methods as
|
79 |
+
# fallback for the ones that pyarrow doesn't yet support
|
80 |
+
|
81 |
+
|
82 |
+
class ArrowStringArray(ObjectStringArrayMixin, ArrowExtensionArray, BaseStringArray):
|
83 |
+
"""
|
84 |
+
Extension array for string data in a ``pyarrow.ChunkedArray``.
|
85 |
+
|
86 |
+
.. warning::
|
87 |
+
|
88 |
+
ArrowStringArray is considered experimental. The implementation and
|
89 |
+
parts of the API may change without warning.
|
90 |
+
|
91 |
+
Parameters
|
92 |
+
----------
|
93 |
+
values : pyarrow.Array or pyarrow.ChunkedArray
|
94 |
+
The array of data.
|
95 |
+
|
96 |
+
Attributes
|
97 |
+
----------
|
98 |
+
None
|
99 |
+
|
100 |
+
Methods
|
101 |
+
-------
|
102 |
+
None
|
103 |
+
|
104 |
+
See Also
|
105 |
+
--------
|
106 |
+
:func:`pandas.array`
|
107 |
+
The recommended function for creating a ArrowStringArray.
|
108 |
+
Series.str
|
109 |
+
The string methods are available on Series backed by
|
110 |
+
a ArrowStringArray.
|
111 |
+
|
112 |
+
Notes
|
113 |
+
-----
|
114 |
+
ArrowStringArray returns a BooleanArray for comparison methods.
|
115 |
+
|
116 |
+
Examples
|
117 |
+
--------
|
118 |
+
>>> pd.array(['This is', 'some text', None, 'data.'], dtype="string[pyarrow]")
|
119 |
+
<ArrowStringArray>
|
120 |
+
['This is', 'some text', <NA>, 'data.']
|
121 |
+
Length: 4, dtype: string
|
122 |
+
"""
|
123 |
+
|
124 |
+
# error: Incompatible types in assignment (expression has type "StringDtype",
|
125 |
+
# base class "ArrowExtensionArray" defined the type as "ArrowDtype")
|
126 |
+
_dtype: StringDtype # type: ignore[assignment]
|
127 |
+
_storage = "pyarrow"
|
128 |
+
|
129 |
+
def __init__(self, values) -> None:
|
130 |
+
_chk_pyarrow_available()
|
131 |
+
if isinstance(values, (pa.Array, pa.ChunkedArray)) and pa.types.is_string(
|
132 |
+
values.type
|
133 |
+
):
|
134 |
+
values = pc.cast(values, pa.large_string())
|
135 |
+
|
136 |
+
super().__init__(values)
|
137 |
+
self._dtype = StringDtype(storage=self._storage)
|
138 |
+
|
139 |
+
if not pa.types.is_large_string(self._pa_array.type) and not (
|
140 |
+
pa.types.is_dictionary(self._pa_array.type)
|
141 |
+
and pa.types.is_large_string(self._pa_array.type.value_type)
|
142 |
+
):
|
143 |
+
raise ValueError(
|
144 |
+
"ArrowStringArray requires a PyArrow (chunked) array of "
|
145 |
+
"large_string type"
|
146 |
+
)
|
147 |
+
|
148 |
+
@classmethod
|
149 |
+
def _box_pa_scalar(cls, value, pa_type: pa.DataType | None = None) -> pa.Scalar:
|
150 |
+
pa_scalar = super()._box_pa_scalar(value, pa_type)
|
151 |
+
if pa.types.is_string(pa_scalar.type) and pa_type is None:
|
152 |
+
pa_scalar = pc.cast(pa_scalar, pa.large_string())
|
153 |
+
return pa_scalar
|
154 |
+
|
155 |
+
@classmethod
|
156 |
+
def _box_pa_array(
|
157 |
+
cls, value, pa_type: pa.DataType | None = None, copy: bool = False
|
158 |
+
) -> pa.Array | pa.ChunkedArray:
|
159 |
+
pa_array = super()._box_pa_array(value, pa_type)
|
160 |
+
if pa.types.is_string(pa_array.type) and pa_type is None:
|
161 |
+
pa_array = pc.cast(pa_array, pa.large_string())
|
162 |
+
return pa_array
|
163 |
+
|
164 |
+
def __len__(self) -> int:
|
165 |
+
"""
|
166 |
+
Length of this array.
|
167 |
+
|
168 |
+
Returns
|
169 |
+
-------
|
170 |
+
length : int
|
171 |
+
"""
|
172 |
+
return len(self._pa_array)
|
173 |
+
|
174 |
+
@classmethod
|
175 |
+
def _from_sequence(cls, scalars, *, dtype: Dtype | None = None, copy: bool = False):
|
176 |
+
from pandas.core.arrays.masked import BaseMaskedArray
|
177 |
+
|
178 |
+
_chk_pyarrow_available()
|
179 |
+
|
180 |
+
if dtype and not (isinstance(dtype, str) and dtype == "string"):
|
181 |
+
dtype = pandas_dtype(dtype)
|
182 |
+
assert isinstance(dtype, StringDtype) and dtype.storage in (
|
183 |
+
"pyarrow",
|
184 |
+
"pyarrow_numpy",
|
185 |
+
)
|
186 |
+
|
187 |
+
if isinstance(scalars, BaseMaskedArray):
|
188 |
+
# avoid costly conversion to object dtype in ensure_string_array and
|
189 |
+
# numerical issues with Float32Dtype
|
190 |
+
na_values = scalars._mask
|
191 |
+
result = scalars._data
|
192 |
+
result = lib.ensure_string_array(result, copy=copy, convert_na_value=False)
|
193 |
+
return cls(pa.array(result, mask=na_values, type=pa.string()))
|
194 |
+
elif isinstance(scalars, (pa.Array, pa.ChunkedArray)):
|
195 |
+
return cls(pc.cast(scalars, pa.string()))
|
196 |
+
|
197 |
+
# convert non-na-likes to str
|
198 |
+
result = lib.ensure_string_array(scalars, copy=copy)
|
199 |
+
return cls(pa.array(result, type=pa.string(), from_pandas=True))
|
200 |
+
|
201 |
+
@classmethod
|
202 |
+
def _from_sequence_of_strings(
|
203 |
+
cls, strings, dtype: Dtype | None = None, copy: bool = False
|
204 |
+
):
|
205 |
+
return cls._from_sequence(strings, dtype=dtype, copy=copy)
|
206 |
+
|
207 |
+
@property
|
208 |
+
def dtype(self) -> StringDtype: # type: ignore[override]
|
209 |
+
"""
|
210 |
+
An instance of 'string[pyarrow]'.
|
211 |
+
"""
|
212 |
+
return self._dtype
|
213 |
+
|
214 |
+
def insert(self, loc: int, item) -> ArrowStringArray:
|
215 |
+
if not isinstance(item, str) and item is not libmissing.NA:
|
216 |
+
raise TypeError("Scalar must be NA or str")
|
217 |
+
return super().insert(loc, item)
|
218 |
+
|
219 |
+
@classmethod
|
220 |
+
def _result_converter(cls, values, na=None):
|
221 |
+
return BooleanDtype().__from_arrow__(values)
|
222 |
+
|
223 |
+
def _maybe_convert_setitem_value(self, value):
|
224 |
+
"""Maybe convert value to be pyarrow compatible."""
|
225 |
+
if is_scalar(value):
|
226 |
+
if isna(value):
|
227 |
+
value = None
|
228 |
+
elif not isinstance(value, str):
|
229 |
+
raise TypeError("Scalar must be NA or str")
|
230 |
+
else:
|
231 |
+
value = np.array(value, dtype=object, copy=True)
|
232 |
+
value[isna(value)] = None
|
233 |
+
for v in value:
|
234 |
+
if not (v is None or isinstance(v, str)):
|
235 |
+
raise TypeError("Scalar must be NA or str")
|
236 |
+
return super()._maybe_convert_setitem_value(value)
|
237 |
+
|
238 |
+
def isin(self, values: ArrayLike) -> npt.NDArray[np.bool_]:
|
239 |
+
value_set = [
|
240 |
+
pa_scalar.as_py()
|
241 |
+
for pa_scalar in [pa.scalar(value, from_pandas=True) for value in values]
|
242 |
+
if pa_scalar.type in (pa.string(), pa.null())
|
243 |
+
]
|
244 |
+
|
245 |
+
# short-circuit to return all False array.
|
246 |
+
if not len(value_set):
|
247 |
+
return np.zeros(len(self), dtype=bool)
|
248 |
+
|
249 |
+
result = pc.is_in(
|
250 |
+
self._pa_array, value_set=pa.array(value_set, type=self._pa_array.type)
|
251 |
+
)
|
252 |
+
# pyarrow 2.0.0 returned nulls, so we explicily specify dtype to convert nulls
|
253 |
+
# to False
|
254 |
+
return np.array(result, dtype=np.bool_)
|
255 |
+
|
256 |
+
def astype(self, dtype, copy: bool = True):
|
257 |
+
dtype = pandas_dtype(dtype)
|
258 |
+
|
259 |
+
if dtype == self.dtype:
|
260 |
+
if copy:
|
261 |
+
return self.copy()
|
262 |
+
return self
|
263 |
+
elif isinstance(dtype, NumericDtype):
|
264 |
+
data = self._pa_array.cast(pa.from_numpy_dtype(dtype.numpy_dtype))
|
265 |
+
return dtype.__from_arrow__(data)
|
266 |
+
elif isinstance(dtype, np.dtype) and np.issubdtype(dtype, np.floating):
|
267 |
+
return self.to_numpy(dtype=dtype, na_value=np.nan)
|
268 |
+
|
269 |
+
return super().astype(dtype, copy=copy)
|
270 |
+
|
271 |
+
@property
|
272 |
+
def _data(self):
|
273 |
+
# dask accesses ._data directlys
|
274 |
+
warnings.warn(
|
275 |
+
f"{type(self).__name__}._data is a deprecated and will be removed "
|
276 |
+
"in a future version, use ._pa_array instead",
|
277 |
+
FutureWarning,
|
278 |
+
stacklevel=find_stack_level(),
|
279 |
+
)
|
280 |
+
return self._pa_array
|
281 |
+
|
282 |
+
# ------------------------------------------------------------------------
|
283 |
+
# String methods interface
|
284 |
+
|
285 |
+
# error: Incompatible types in assignment (expression has type "NAType",
|
286 |
+
# base class "ObjectStringArrayMixin" defined the type as "float")
|
287 |
+
_str_na_value = libmissing.NA # type: ignore[assignment]
|
288 |
+
|
289 |
+
def _str_map(
|
290 |
+
self, f, na_value=None, dtype: Dtype | None = None, convert: bool = True
|
291 |
+
):
|
292 |
+
# TODO: de-duplicate with StringArray method. This method is moreless copy and
|
293 |
+
# paste.
|
294 |
+
|
295 |
+
from pandas.arrays import (
|
296 |
+
BooleanArray,
|
297 |
+
IntegerArray,
|
298 |
+
)
|
299 |
+
|
300 |
+
if dtype is None:
|
301 |
+
dtype = self.dtype
|
302 |
+
if na_value is None:
|
303 |
+
na_value = self.dtype.na_value
|
304 |
+
|
305 |
+
mask = isna(self)
|
306 |
+
arr = np.asarray(self)
|
307 |
+
|
308 |
+
if is_integer_dtype(dtype) or is_bool_dtype(dtype):
|
309 |
+
constructor: type[IntegerArray | BooleanArray]
|
310 |
+
if is_integer_dtype(dtype):
|
311 |
+
constructor = IntegerArray
|
312 |
+
else:
|
313 |
+
constructor = BooleanArray
|
314 |
+
|
315 |
+
na_value_is_na = isna(na_value)
|
316 |
+
if na_value_is_na:
|
317 |
+
na_value = 1
|
318 |
+
result = lib.map_infer_mask(
|
319 |
+
arr,
|
320 |
+
f,
|
321 |
+
mask.view("uint8"),
|
322 |
+
convert=False,
|
323 |
+
na_value=na_value,
|
324 |
+
# error: Argument 1 to "dtype" has incompatible type
|
325 |
+
# "Union[ExtensionDtype, str, dtype[Any], Type[object]]"; expected
|
326 |
+
# "Type[object]"
|
327 |
+
dtype=np.dtype(dtype), # type: ignore[arg-type]
|
328 |
+
)
|
329 |
+
|
330 |
+
if not na_value_is_na:
|
331 |
+
mask[:] = False
|
332 |
+
|
333 |
+
return constructor(result, mask)
|
334 |
+
|
335 |
+
elif is_string_dtype(dtype) and not is_object_dtype(dtype):
|
336 |
+
# i.e. StringDtype
|
337 |
+
result = lib.map_infer_mask(
|
338 |
+
arr, f, mask.view("uint8"), convert=False, na_value=na_value
|
339 |
+
)
|
340 |
+
result = pa.array(result, mask=mask, type=pa.string(), from_pandas=True)
|
341 |
+
return type(self)(result)
|
342 |
+
else:
|
343 |
+
# This is when the result type is object. We reach this when
|
344 |
+
# -> We know the result type is truly object (e.g. .encode returns bytes
|
345 |
+
# or .findall returns a list).
|
346 |
+
# -> We don't know the result type. E.g. `.get` can return anything.
|
347 |
+
return lib.map_infer_mask(arr, f, mask.view("uint8"))
|
348 |
+
|
349 |
+
def _str_contains(
|
350 |
+
self, pat, case: bool = True, flags: int = 0, na=np.nan, regex: bool = True
|
351 |
+
):
|
352 |
+
if flags:
|
353 |
+
fallback_performancewarning()
|
354 |
+
return super()._str_contains(pat, case, flags, na, regex)
|
355 |
+
|
356 |
+
if regex:
|
357 |
+
result = pc.match_substring_regex(self._pa_array, pat, ignore_case=not case)
|
358 |
+
else:
|
359 |
+
result = pc.match_substring(self._pa_array, pat, ignore_case=not case)
|
360 |
+
result = self._result_converter(result, na=na)
|
361 |
+
if not isna(na):
|
362 |
+
result[isna(result)] = bool(na)
|
363 |
+
return result
|
364 |
+
|
365 |
+
def _str_startswith(self, pat: str | tuple[str, ...], na: Scalar | None = None):
|
366 |
+
if isinstance(pat, str):
|
367 |
+
result = pc.starts_with(self._pa_array, pattern=pat)
|
368 |
+
else:
|
369 |
+
if len(pat) == 0:
|
370 |
+
# mimic existing behaviour of string extension array
|
371 |
+
# and python string method
|
372 |
+
result = pa.array(
|
373 |
+
np.zeros(len(self._pa_array), dtype=bool), mask=isna(self._pa_array)
|
374 |
+
)
|
375 |
+
else:
|
376 |
+
result = pc.starts_with(self._pa_array, pattern=pat[0])
|
377 |
+
|
378 |
+
for p in pat[1:]:
|
379 |
+
result = pc.or_(result, pc.starts_with(self._pa_array, pattern=p))
|
380 |
+
if not isna(na):
|
381 |
+
result = result.fill_null(na)
|
382 |
+
return self._result_converter(result)
|
383 |
+
|
384 |
+
def _str_endswith(self, pat: str | tuple[str, ...], na: Scalar | None = None):
|
385 |
+
if isinstance(pat, str):
|
386 |
+
result = pc.ends_with(self._pa_array, pattern=pat)
|
387 |
+
else:
|
388 |
+
if len(pat) == 0:
|
389 |
+
# mimic existing behaviour of string extension array
|
390 |
+
# and python string method
|
391 |
+
result = pa.array(
|
392 |
+
np.zeros(len(self._pa_array), dtype=bool), mask=isna(self._pa_array)
|
393 |
+
)
|
394 |
+
else:
|
395 |
+
result = pc.ends_with(self._pa_array, pattern=pat[0])
|
396 |
+
|
397 |
+
for p in pat[1:]:
|
398 |
+
result = pc.or_(result, pc.ends_with(self._pa_array, pattern=p))
|
399 |
+
if not isna(na):
|
400 |
+
result = result.fill_null(na)
|
401 |
+
return self._result_converter(result)
|
402 |
+
|
403 |
+
def _str_replace(
|
404 |
+
self,
|
405 |
+
pat: str | re.Pattern,
|
406 |
+
repl: str | Callable,
|
407 |
+
n: int = -1,
|
408 |
+
case: bool = True,
|
409 |
+
flags: int = 0,
|
410 |
+
regex: bool = True,
|
411 |
+
):
|
412 |
+
if isinstance(pat, re.Pattern) or callable(repl) or not case or flags:
|
413 |
+
fallback_performancewarning()
|
414 |
+
return super()._str_replace(pat, repl, n, case, flags, regex)
|
415 |
+
|
416 |
+
func = pc.replace_substring_regex if regex else pc.replace_substring
|
417 |
+
result = func(self._pa_array, pattern=pat, replacement=repl, max_replacements=n)
|
418 |
+
return type(self)(result)
|
419 |
+
|
420 |
+
def _str_repeat(self, repeats: int | Sequence[int]):
|
421 |
+
if not isinstance(repeats, int):
|
422 |
+
return super()._str_repeat(repeats)
|
423 |
+
else:
|
424 |
+
return type(self)(pc.binary_repeat(self._pa_array, repeats))
|
425 |
+
|
426 |
+
def _str_match(
|
427 |
+
self, pat: str, case: bool = True, flags: int = 0, na: Scalar | None = None
|
428 |
+
):
|
429 |
+
if not pat.startswith("^"):
|
430 |
+
pat = f"^{pat}"
|
431 |
+
return self._str_contains(pat, case, flags, na, regex=True)
|
432 |
+
|
433 |
+
def _str_fullmatch(
|
434 |
+
self, pat, case: bool = True, flags: int = 0, na: Scalar | None = None
|
435 |
+
):
|
436 |
+
if not pat.endswith("$") or pat.endswith("\\$"):
|
437 |
+
pat = f"{pat}$"
|
438 |
+
return self._str_match(pat, case, flags, na)
|
439 |
+
|
440 |
+
def _str_slice(
|
441 |
+
self, start: int | None = None, stop: int | None = None, step: int | None = None
|
442 |
+
):
|
443 |
+
if stop is None:
|
444 |
+
return super()._str_slice(start, stop, step)
|
445 |
+
if start is None:
|
446 |
+
start = 0
|
447 |
+
if step is None:
|
448 |
+
step = 1
|
449 |
+
return type(self)(
|
450 |
+
pc.utf8_slice_codeunits(self._pa_array, start=start, stop=stop, step=step)
|
451 |
+
)
|
452 |
+
|
453 |
+
def _str_isalnum(self):
|
454 |
+
result = pc.utf8_is_alnum(self._pa_array)
|
455 |
+
return self._result_converter(result)
|
456 |
+
|
457 |
+
def _str_isalpha(self):
|
458 |
+
result = pc.utf8_is_alpha(self._pa_array)
|
459 |
+
return self._result_converter(result)
|
460 |
+
|
461 |
+
def _str_isdecimal(self):
|
462 |
+
result = pc.utf8_is_decimal(self._pa_array)
|
463 |
+
return self._result_converter(result)
|
464 |
+
|
465 |
+
def _str_isdigit(self):
|
466 |
+
result = pc.utf8_is_digit(self._pa_array)
|
467 |
+
return self._result_converter(result)
|
468 |
+
|
469 |
+
def _str_islower(self):
|
470 |
+
result = pc.utf8_is_lower(self._pa_array)
|
471 |
+
return self._result_converter(result)
|
472 |
+
|
473 |
+
def _str_isnumeric(self):
|
474 |
+
result = pc.utf8_is_numeric(self._pa_array)
|
475 |
+
return self._result_converter(result)
|
476 |
+
|
477 |
+
def _str_isspace(self):
|
478 |
+
result = pc.utf8_is_space(self._pa_array)
|
479 |
+
return self._result_converter(result)
|
480 |
+
|
481 |
+
def _str_istitle(self):
|
482 |
+
result = pc.utf8_is_title(self._pa_array)
|
483 |
+
return self._result_converter(result)
|
484 |
+
|
485 |
+
def _str_isupper(self):
|
486 |
+
result = pc.utf8_is_upper(self._pa_array)
|
487 |
+
return self._result_converter(result)
|
488 |
+
|
489 |
+
def _str_len(self):
|
490 |
+
result = pc.utf8_length(self._pa_array)
|
491 |
+
return self._convert_int_dtype(result)
|
492 |
+
|
493 |
+
def _str_lower(self):
|
494 |
+
return type(self)(pc.utf8_lower(self._pa_array))
|
495 |
+
|
496 |
+
def _str_upper(self):
|
497 |
+
return type(self)(pc.utf8_upper(self._pa_array))
|
498 |
+
|
499 |
+
def _str_strip(self, to_strip=None):
|
500 |
+
if to_strip is None:
|
501 |
+
result = pc.utf8_trim_whitespace(self._pa_array)
|
502 |
+
else:
|
503 |
+
result = pc.utf8_trim(self._pa_array, characters=to_strip)
|
504 |
+
return type(self)(result)
|
505 |
+
|
506 |
+
def _str_lstrip(self, to_strip=None):
|
507 |
+
if to_strip is None:
|
508 |
+
result = pc.utf8_ltrim_whitespace(self._pa_array)
|
509 |
+
else:
|
510 |
+
result = pc.utf8_ltrim(self._pa_array, characters=to_strip)
|
511 |
+
return type(self)(result)
|
512 |
+
|
513 |
+
def _str_rstrip(self, to_strip=None):
|
514 |
+
if to_strip is None:
|
515 |
+
result = pc.utf8_rtrim_whitespace(self._pa_array)
|
516 |
+
else:
|
517 |
+
result = pc.utf8_rtrim(self._pa_array, characters=to_strip)
|
518 |
+
return type(self)(result)
|
519 |
+
|
520 |
+
def _str_removeprefix(self, prefix: str):
|
521 |
+
if not pa_version_under13p0:
|
522 |
+
starts_with = pc.starts_with(self._pa_array, pattern=prefix)
|
523 |
+
removed = pc.utf8_slice_codeunits(self._pa_array, len(prefix))
|
524 |
+
result = pc.if_else(starts_with, removed, self._pa_array)
|
525 |
+
return type(self)(result)
|
526 |
+
return super()._str_removeprefix(prefix)
|
527 |
+
|
528 |
+
def _str_removesuffix(self, suffix: str):
|
529 |
+
ends_with = pc.ends_with(self._pa_array, pattern=suffix)
|
530 |
+
removed = pc.utf8_slice_codeunits(self._pa_array, 0, stop=-len(suffix))
|
531 |
+
result = pc.if_else(ends_with, removed, self._pa_array)
|
532 |
+
return type(self)(result)
|
533 |
+
|
534 |
+
def _str_count(self, pat: str, flags: int = 0):
|
535 |
+
if flags:
|
536 |
+
return super()._str_count(pat, flags)
|
537 |
+
result = pc.count_substring_regex(self._pa_array, pat)
|
538 |
+
return self._convert_int_dtype(result)
|
539 |
+
|
540 |
+
def _str_find(self, sub: str, start: int = 0, end: int | None = None):
|
541 |
+
if start != 0 and end is not None:
|
542 |
+
slices = pc.utf8_slice_codeunits(self._pa_array, start, stop=end)
|
543 |
+
result = pc.find_substring(slices, sub)
|
544 |
+
not_found = pc.equal(result, -1)
|
545 |
+
offset_result = pc.add(result, end - start)
|
546 |
+
result = pc.if_else(not_found, result, offset_result)
|
547 |
+
elif start == 0 and end is None:
|
548 |
+
slices = self._pa_array
|
549 |
+
result = pc.find_substring(slices, sub)
|
550 |
+
else:
|
551 |
+
return super()._str_find(sub, start, end)
|
552 |
+
return self._convert_int_dtype(result)
|
553 |
+
|
554 |
+
def _str_get_dummies(self, sep: str = "|"):
|
555 |
+
dummies_pa, labels = ArrowExtensionArray(self._pa_array)._str_get_dummies(sep)
|
556 |
+
if len(labels) == 0:
|
557 |
+
return np.empty(shape=(0, 0), dtype=np.int64), labels
|
558 |
+
dummies = np.vstack(dummies_pa.to_numpy())
|
559 |
+
return dummies.astype(np.int64, copy=False), labels
|
560 |
+
|
561 |
+
def _convert_int_dtype(self, result):
|
562 |
+
return Int64Dtype().__from_arrow__(result)
|
563 |
+
|
564 |
+
def _reduce(
|
565 |
+
self, name: str, *, skipna: bool = True, keepdims: bool = False, **kwargs
|
566 |
+
):
|
567 |
+
result = self._reduce_calc(name, skipna=skipna, keepdims=keepdims, **kwargs)
|
568 |
+
if name in ("argmin", "argmax") and isinstance(result, pa.Array):
|
569 |
+
return self._convert_int_dtype(result)
|
570 |
+
elif isinstance(result, pa.Array):
|
571 |
+
return type(self)(result)
|
572 |
+
else:
|
573 |
+
return result
|
574 |
+
|
575 |
+
def _rank(
|
576 |
+
self,
|
577 |
+
*,
|
578 |
+
axis: AxisInt = 0,
|
579 |
+
method: str = "average",
|
580 |
+
na_option: str = "keep",
|
581 |
+
ascending: bool = True,
|
582 |
+
pct: bool = False,
|
583 |
+
):
|
584 |
+
"""
|
585 |
+
See Series.rank.__doc__.
|
586 |
+
"""
|
587 |
+
return self._convert_int_dtype(
|
588 |
+
self._rank_calc(
|
589 |
+
axis=axis,
|
590 |
+
method=method,
|
591 |
+
na_option=na_option,
|
592 |
+
ascending=ascending,
|
593 |
+
pct=pct,
|
594 |
+
)
|
595 |
+
)
|
596 |
+
|
597 |
+
|
598 |
+
class ArrowStringArrayNumpySemantics(ArrowStringArray):
|
599 |
+
_storage = "pyarrow_numpy"
|
600 |
+
|
601 |
+
@classmethod
|
602 |
+
def _result_converter(cls, values, na=None):
|
603 |
+
if not isna(na):
|
604 |
+
values = values.fill_null(bool(na))
|
605 |
+
return ArrowExtensionArray(values).to_numpy(na_value=np.nan)
|
606 |
+
|
607 |
+
def __getattribute__(self, item):
|
608 |
+
# ArrowStringArray and we both inherit from ArrowExtensionArray, which
|
609 |
+
# creates inheritance problems (Diamond inheritance)
|
610 |
+
if item in ArrowStringArrayMixin.__dict__ and item not in (
|
611 |
+
"_pa_array",
|
612 |
+
"__dict__",
|
613 |
+
):
|
614 |
+
return partial(getattr(ArrowStringArrayMixin, item), self)
|
615 |
+
return super().__getattribute__(item)
|
616 |
+
|
617 |
+
def _str_map(
|
618 |
+
self, f, na_value=None, dtype: Dtype | None = None, convert: bool = True
|
619 |
+
):
|
620 |
+
if dtype is None:
|
621 |
+
dtype = self.dtype
|
622 |
+
if na_value is None:
|
623 |
+
na_value = self.dtype.na_value
|
624 |
+
|
625 |
+
mask = isna(self)
|
626 |
+
arr = np.asarray(self)
|
627 |
+
|
628 |
+
if is_integer_dtype(dtype) or is_bool_dtype(dtype):
|
629 |
+
if is_integer_dtype(dtype):
|
630 |
+
na_value = np.nan
|
631 |
+
else:
|
632 |
+
na_value = False
|
633 |
+
try:
|
634 |
+
result = lib.map_infer_mask(
|
635 |
+
arr,
|
636 |
+
f,
|
637 |
+
mask.view("uint8"),
|
638 |
+
convert=False,
|
639 |
+
na_value=na_value,
|
640 |
+
dtype=np.dtype(dtype), # type: ignore[arg-type]
|
641 |
+
)
|
642 |
+
return result
|
643 |
+
|
644 |
+
except ValueError:
|
645 |
+
result = lib.map_infer_mask(
|
646 |
+
arr,
|
647 |
+
f,
|
648 |
+
mask.view("uint8"),
|
649 |
+
convert=False,
|
650 |
+
na_value=na_value,
|
651 |
+
)
|
652 |
+
if convert and result.dtype == object:
|
653 |
+
result = lib.maybe_convert_objects(result)
|
654 |
+
return result
|
655 |
+
|
656 |
+
elif is_string_dtype(dtype) and not is_object_dtype(dtype):
|
657 |
+
# i.e. StringDtype
|
658 |
+
result = lib.map_infer_mask(
|
659 |
+
arr, f, mask.view("uint8"), convert=False, na_value=na_value
|
660 |
+
)
|
661 |
+
result = pa.array(result, mask=mask, type=pa.string(), from_pandas=True)
|
662 |
+
return type(self)(result)
|
663 |
+
else:
|
664 |
+
# This is when the result type is object. We reach this when
|
665 |
+
# -> We know the result type is truly object (e.g. .encode returns bytes
|
666 |
+
# or .findall returns a list).
|
667 |
+
# -> We don't know the result type. E.g. `.get` can return anything.
|
668 |
+
return lib.map_infer_mask(arr, f, mask.view("uint8"))
|
669 |
+
|
670 |
+
def _convert_int_dtype(self, result):
|
671 |
+
if isinstance(result, pa.Array):
|
672 |
+
result = result.to_numpy(zero_copy_only=False)
|
673 |
+
else:
|
674 |
+
result = result.to_numpy()
|
675 |
+
if result.dtype == np.int32:
|
676 |
+
result = result.astype(np.int64)
|
677 |
+
return result
|
678 |
+
|
679 |
+
def _cmp_method(self, other, op):
|
680 |
+
try:
|
681 |
+
result = super()._cmp_method(other, op)
|
682 |
+
except pa.ArrowNotImplementedError:
|
683 |
+
return invalid_comparison(self, other, op)
|
684 |
+
if op == operator.ne:
|
685 |
+
return result.to_numpy(np.bool_, na_value=True)
|
686 |
+
else:
|
687 |
+
return result.to_numpy(np.bool_, na_value=False)
|
688 |
+
|
689 |
+
def value_counts(self, dropna: bool = True) -> Series:
|
690 |
+
from pandas import Series
|
691 |
+
|
692 |
+
result = super().value_counts(dropna)
|
693 |
+
return Series(
|
694 |
+
result._values.to_numpy(), index=result.index, name=result.name, copy=False
|
695 |
+
)
|
696 |
+
|
697 |
+
def _reduce(
|
698 |
+
self, name: str, *, skipna: bool = True, keepdims: bool = False, **kwargs
|
699 |
+
):
|
700 |
+
if name in ["any", "all"]:
|
701 |
+
if not skipna and name == "all":
|
702 |
+
nas = pc.invert(pc.is_null(self._pa_array))
|
703 |
+
arr = pc.and_kleene(nas, pc.not_equal(self._pa_array, ""))
|
704 |
+
else:
|
705 |
+
arr = pc.not_equal(self._pa_array, "")
|
706 |
+
return ArrowExtensionArray(arr)._reduce(
|
707 |
+
name, skipna=skipna, keepdims=keepdims, **kwargs
|
708 |
+
)
|
709 |
+
else:
|
710 |
+
return super()._reduce(name, skipna=skipna, keepdims=keepdims, **kwargs)
|
711 |
+
|
712 |
+
def insert(self, loc: int, item) -> ArrowStringArrayNumpySemantics:
|
713 |
+
if item is np.nan:
|
714 |
+
item = libmissing.NA
|
715 |
+
return super().insert(loc, item) # type: ignore[return-value]
|
env-llmeval/lib/python3.10/site-packages/pandas/core/arrays/timedeltas.py
ADDED
@@ -0,0 +1,1177 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from __future__ import annotations
|
2 |
+
|
3 |
+
from datetime import timedelta
|
4 |
+
import operator
|
5 |
+
from typing import (
|
6 |
+
TYPE_CHECKING,
|
7 |
+
cast,
|
8 |
+
)
|
9 |
+
|
10 |
+
import numpy as np
|
11 |
+
|
12 |
+
from pandas._libs import (
|
13 |
+
lib,
|
14 |
+
tslibs,
|
15 |
+
)
|
16 |
+
from pandas._libs.tslibs import (
|
17 |
+
NaT,
|
18 |
+
NaTType,
|
19 |
+
Tick,
|
20 |
+
Timedelta,
|
21 |
+
astype_overflowsafe,
|
22 |
+
get_supported_dtype,
|
23 |
+
iNaT,
|
24 |
+
is_supported_dtype,
|
25 |
+
periods_per_second,
|
26 |
+
)
|
27 |
+
from pandas._libs.tslibs.conversion import cast_from_unit_vectorized
|
28 |
+
from pandas._libs.tslibs.fields import (
|
29 |
+
get_timedelta_days,
|
30 |
+
get_timedelta_field,
|
31 |
+
)
|
32 |
+
from pandas._libs.tslibs.timedeltas import (
|
33 |
+
array_to_timedelta64,
|
34 |
+
floordiv_object_array,
|
35 |
+
ints_to_pytimedelta,
|
36 |
+
parse_timedelta_unit,
|
37 |
+
truediv_object_array,
|
38 |
+
)
|
39 |
+
from pandas.compat.numpy import function as nv
|
40 |
+
from pandas.util._validators import validate_endpoints
|
41 |
+
|
42 |
+
from pandas.core.dtypes.common import (
|
43 |
+
TD64NS_DTYPE,
|
44 |
+
is_float_dtype,
|
45 |
+
is_integer_dtype,
|
46 |
+
is_object_dtype,
|
47 |
+
is_scalar,
|
48 |
+
is_string_dtype,
|
49 |
+
pandas_dtype,
|
50 |
+
)
|
51 |
+
from pandas.core.dtypes.dtypes import ExtensionDtype
|
52 |
+
from pandas.core.dtypes.missing import isna
|
53 |
+
|
54 |
+
from pandas.core import (
|
55 |
+
nanops,
|
56 |
+
roperator,
|
57 |
+
)
|
58 |
+
from pandas.core.array_algos import datetimelike_accumulations
|
59 |
+
from pandas.core.arrays import datetimelike as dtl
|
60 |
+
from pandas.core.arrays._ranges import generate_regular_range
|
61 |
+
import pandas.core.common as com
|
62 |
+
from pandas.core.ops.common import unpack_zerodim_and_defer
|
63 |
+
|
64 |
+
if TYPE_CHECKING:
|
65 |
+
from collections.abc import Iterator
|
66 |
+
|
67 |
+
from pandas._typing import (
|
68 |
+
AxisInt,
|
69 |
+
DateTimeErrorChoices,
|
70 |
+
DtypeObj,
|
71 |
+
NpDtype,
|
72 |
+
Self,
|
73 |
+
npt,
|
74 |
+
)
|
75 |
+
|
76 |
+
from pandas import DataFrame
|
77 |
+
|
78 |
+
import textwrap
|
79 |
+
|
80 |
+
|
81 |
+
def _field_accessor(name: str, alias: str, docstring: str):
|
82 |
+
def f(self) -> np.ndarray:
|
83 |
+
values = self.asi8
|
84 |
+
if alias == "days":
|
85 |
+
result = get_timedelta_days(values, reso=self._creso)
|
86 |
+
else:
|
87 |
+
# error: Incompatible types in assignment (
|
88 |
+
# expression has type "ndarray[Any, dtype[signedinteger[_32Bit]]]",
|
89 |
+
# variable has type "ndarray[Any, dtype[signedinteger[_64Bit]]]
|
90 |
+
result = get_timedelta_field(values, alias, reso=self._creso) # type: ignore[assignment]
|
91 |
+
if self._hasna:
|
92 |
+
result = self._maybe_mask_results(
|
93 |
+
result, fill_value=None, convert="float64"
|
94 |
+
)
|
95 |
+
|
96 |
+
return result
|
97 |
+
|
98 |
+
f.__name__ = name
|
99 |
+
f.__doc__ = f"\n{docstring}\n"
|
100 |
+
return property(f)
|
101 |
+
|
102 |
+
|
103 |
+
class TimedeltaArray(dtl.TimelikeOps):
|
104 |
+
"""
|
105 |
+
Pandas ExtensionArray for timedelta data.
|
106 |
+
|
107 |
+
.. warning::
|
108 |
+
|
109 |
+
TimedeltaArray is currently experimental, and its API may change
|
110 |
+
without warning. In particular, :attr:`TimedeltaArray.dtype` is
|
111 |
+
expected to change to be an instance of an ``ExtensionDtype``
|
112 |
+
subclass.
|
113 |
+
|
114 |
+
Parameters
|
115 |
+
----------
|
116 |
+
values : array-like
|
117 |
+
The timedelta data.
|
118 |
+
|
119 |
+
dtype : numpy.dtype
|
120 |
+
Currently, only ``numpy.dtype("timedelta64[ns]")`` is accepted.
|
121 |
+
freq : Offset, optional
|
122 |
+
copy : bool, default False
|
123 |
+
Whether to copy the underlying array of data.
|
124 |
+
|
125 |
+
Attributes
|
126 |
+
----------
|
127 |
+
None
|
128 |
+
|
129 |
+
Methods
|
130 |
+
-------
|
131 |
+
None
|
132 |
+
|
133 |
+
Examples
|
134 |
+
--------
|
135 |
+
>>> pd.arrays.TimedeltaArray._from_sequence(pd.TimedeltaIndex(['1h', '2h']))
|
136 |
+
<TimedeltaArray>
|
137 |
+
['0 days 01:00:00', '0 days 02:00:00']
|
138 |
+
Length: 2, dtype: timedelta64[ns]
|
139 |
+
"""
|
140 |
+
|
141 |
+
_typ = "timedeltaarray"
|
142 |
+
_internal_fill_value = np.timedelta64("NaT", "ns")
|
143 |
+
_recognized_scalars = (timedelta, np.timedelta64, Tick)
|
144 |
+
_is_recognized_dtype = lambda x: lib.is_np_dtype(x, "m")
|
145 |
+
_infer_matches = ("timedelta", "timedelta64")
|
146 |
+
|
147 |
+
@property
|
148 |
+
def _scalar_type(self) -> type[Timedelta]:
|
149 |
+
return Timedelta
|
150 |
+
|
151 |
+
__array_priority__ = 1000
|
152 |
+
# define my properties & methods for delegation
|
153 |
+
_other_ops: list[str] = []
|
154 |
+
_bool_ops: list[str] = []
|
155 |
+
_object_ops: list[str] = ["freq"]
|
156 |
+
_field_ops: list[str] = ["days", "seconds", "microseconds", "nanoseconds"]
|
157 |
+
_datetimelike_ops: list[str] = _field_ops + _object_ops + _bool_ops + ["unit"]
|
158 |
+
_datetimelike_methods: list[str] = [
|
159 |
+
"to_pytimedelta",
|
160 |
+
"total_seconds",
|
161 |
+
"round",
|
162 |
+
"floor",
|
163 |
+
"ceil",
|
164 |
+
"as_unit",
|
165 |
+
]
|
166 |
+
|
167 |
+
# Note: ndim must be defined to ensure NaT.__richcmp__(TimedeltaArray)
|
168 |
+
# operates pointwise.
|
169 |
+
|
170 |
+
def _box_func(self, x: np.timedelta64) -> Timedelta | NaTType:
|
171 |
+
y = x.view("i8")
|
172 |
+
if y == NaT._value:
|
173 |
+
return NaT
|
174 |
+
return Timedelta._from_value_and_reso(y, reso=self._creso)
|
175 |
+
|
176 |
+
@property
|
177 |
+
# error: Return type "dtype" of "dtype" incompatible with return type
|
178 |
+
# "ExtensionDtype" in supertype "ExtensionArray"
|
179 |
+
def dtype(self) -> np.dtype[np.timedelta64]: # type: ignore[override]
|
180 |
+
"""
|
181 |
+
The dtype for the TimedeltaArray.
|
182 |
+
|
183 |
+
.. warning::
|
184 |
+
|
185 |
+
A future version of pandas will change dtype to be an instance
|
186 |
+
of a :class:`pandas.api.extensions.ExtensionDtype` subclass,
|
187 |
+
not a ``numpy.dtype``.
|
188 |
+
|
189 |
+
Returns
|
190 |
+
-------
|
191 |
+
numpy.dtype
|
192 |
+
"""
|
193 |
+
return self._ndarray.dtype
|
194 |
+
|
195 |
+
# ----------------------------------------------------------------
|
196 |
+
# Constructors
|
197 |
+
|
198 |
+
_freq = None
|
199 |
+
_default_dtype = TD64NS_DTYPE # used in TimeLikeOps.__init__
|
200 |
+
|
201 |
+
@classmethod
|
202 |
+
def _validate_dtype(cls, values, dtype):
|
203 |
+
# used in TimeLikeOps.__init__
|
204 |
+
dtype = _validate_td64_dtype(dtype)
|
205 |
+
_validate_td64_dtype(values.dtype)
|
206 |
+
if dtype != values.dtype:
|
207 |
+
raise ValueError("Values resolution does not match dtype.")
|
208 |
+
return dtype
|
209 |
+
|
210 |
+
# error: Signature of "_simple_new" incompatible with supertype "NDArrayBacked"
|
211 |
+
@classmethod
|
212 |
+
def _simple_new( # type: ignore[override]
|
213 |
+
cls,
|
214 |
+
values: npt.NDArray[np.timedelta64],
|
215 |
+
freq: Tick | None = None,
|
216 |
+
dtype: np.dtype[np.timedelta64] = TD64NS_DTYPE,
|
217 |
+
) -> Self:
|
218 |
+
# Require td64 dtype, not unit-less, matching values.dtype
|
219 |
+
assert lib.is_np_dtype(dtype, "m")
|
220 |
+
assert not tslibs.is_unitless(dtype)
|
221 |
+
assert isinstance(values, np.ndarray), type(values)
|
222 |
+
assert dtype == values.dtype
|
223 |
+
assert freq is None or isinstance(freq, Tick)
|
224 |
+
|
225 |
+
result = super()._simple_new(values=values, dtype=dtype)
|
226 |
+
result._freq = freq
|
227 |
+
return result
|
228 |
+
|
229 |
+
@classmethod
|
230 |
+
def _from_sequence(cls, data, *, dtype=None, copy: bool = False) -> Self:
|
231 |
+
if dtype:
|
232 |
+
dtype = _validate_td64_dtype(dtype)
|
233 |
+
|
234 |
+
data, freq = sequence_to_td64ns(data, copy=copy, unit=None)
|
235 |
+
|
236 |
+
if dtype is not None:
|
237 |
+
data = astype_overflowsafe(data, dtype=dtype, copy=False)
|
238 |
+
|
239 |
+
return cls._simple_new(data, dtype=data.dtype, freq=freq)
|
240 |
+
|
241 |
+
@classmethod
|
242 |
+
def _from_sequence_not_strict(
|
243 |
+
cls,
|
244 |
+
data,
|
245 |
+
*,
|
246 |
+
dtype=None,
|
247 |
+
copy: bool = False,
|
248 |
+
freq=lib.no_default,
|
249 |
+
unit=None,
|
250 |
+
) -> Self:
|
251 |
+
"""
|
252 |
+
_from_sequence_not_strict but without responsibility for finding the
|
253 |
+
result's `freq`.
|
254 |
+
"""
|
255 |
+
if dtype:
|
256 |
+
dtype = _validate_td64_dtype(dtype)
|
257 |
+
|
258 |
+
assert unit not in ["Y", "y", "M"] # caller is responsible for checking
|
259 |
+
|
260 |
+
data, inferred_freq = sequence_to_td64ns(data, copy=copy, unit=unit)
|
261 |
+
|
262 |
+
if dtype is not None:
|
263 |
+
data = astype_overflowsafe(data, dtype=dtype, copy=False)
|
264 |
+
|
265 |
+
result = cls._simple_new(data, dtype=data.dtype, freq=inferred_freq)
|
266 |
+
|
267 |
+
result._maybe_pin_freq(freq, {})
|
268 |
+
return result
|
269 |
+
|
270 |
+
@classmethod
|
271 |
+
def _generate_range(
|
272 |
+
cls, start, end, periods, freq, closed=None, *, unit: str | None = None
|
273 |
+
) -> Self:
|
274 |
+
periods = dtl.validate_periods(periods)
|
275 |
+
if freq is None and any(x is None for x in [periods, start, end]):
|
276 |
+
raise ValueError("Must provide freq argument if no data is supplied")
|
277 |
+
|
278 |
+
if com.count_not_none(start, end, periods, freq) != 3:
|
279 |
+
raise ValueError(
|
280 |
+
"Of the four parameters: start, end, periods, "
|
281 |
+
"and freq, exactly three must be specified"
|
282 |
+
)
|
283 |
+
|
284 |
+
if start is not None:
|
285 |
+
start = Timedelta(start).as_unit("ns")
|
286 |
+
|
287 |
+
if end is not None:
|
288 |
+
end = Timedelta(end).as_unit("ns")
|
289 |
+
|
290 |
+
if unit is not None:
|
291 |
+
if unit not in ["s", "ms", "us", "ns"]:
|
292 |
+
raise ValueError("'unit' must be one of 's', 'ms', 'us', 'ns'")
|
293 |
+
else:
|
294 |
+
unit = "ns"
|
295 |
+
|
296 |
+
if start is not None and unit is not None:
|
297 |
+
start = start.as_unit(unit, round_ok=False)
|
298 |
+
if end is not None and unit is not None:
|
299 |
+
end = end.as_unit(unit, round_ok=False)
|
300 |
+
|
301 |
+
left_closed, right_closed = validate_endpoints(closed)
|
302 |
+
|
303 |
+
if freq is not None:
|
304 |
+
index = generate_regular_range(start, end, periods, freq, unit=unit)
|
305 |
+
else:
|
306 |
+
index = np.linspace(start._value, end._value, periods).astype("i8")
|
307 |
+
|
308 |
+
if not left_closed:
|
309 |
+
index = index[1:]
|
310 |
+
if not right_closed:
|
311 |
+
index = index[:-1]
|
312 |
+
|
313 |
+
td64values = index.view(f"m8[{unit}]")
|
314 |
+
return cls._simple_new(td64values, dtype=td64values.dtype, freq=freq)
|
315 |
+
|
316 |
+
# ----------------------------------------------------------------
|
317 |
+
# DatetimeLike Interface
|
318 |
+
|
319 |
+
def _unbox_scalar(self, value) -> np.timedelta64:
|
320 |
+
if not isinstance(value, self._scalar_type) and value is not NaT:
|
321 |
+
raise ValueError("'value' should be a Timedelta.")
|
322 |
+
self._check_compatible_with(value)
|
323 |
+
if value is NaT:
|
324 |
+
return np.timedelta64(value._value, self.unit)
|
325 |
+
else:
|
326 |
+
return value.as_unit(self.unit).asm8
|
327 |
+
|
328 |
+
def _scalar_from_string(self, value) -> Timedelta | NaTType:
|
329 |
+
return Timedelta(value)
|
330 |
+
|
331 |
+
def _check_compatible_with(self, other) -> None:
|
332 |
+
# we don't have anything to validate.
|
333 |
+
pass
|
334 |
+
|
335 |
+
# ----------------------------------------------------------------
|
336 |
+
# Array-Like / EA-Interface Methods
|
337 |
+
|
338 |
+
def astype(self, dtype, copy: bool = True):
|
339 |
+
# We handle
|
340 |
+
# --> timedelta64[ns]
|
341 |
+
# --> timedelta64
|
342 |
+
# DatetimeLikeArrayMixin super call handles other cases
|
343 |
+
dtype = pandas_dtype(dtype)
|
344 |
+
|
345 |
+
if lib.is_np_dtype(dtype, "m"):
|
346 |
+
if dtype == self.dtype:
|
347 |
+
if copy:
|
348 |
+
return self.copy()
|
349 |
+
return self
|
350 |
+
|
351 |
+
if is_supported_dtype(dtype):
|
352 |
+
# unit conversion e.g. timedelta64[s]
|
353 |
+
res_values = astype_overflowsafe(self._ndarray, dtype, copy=False)
|
354 |
+
return type(self)._simple_new(
|
355 |
+
res_values, dtype=res_values.dtype, freq=self.freq
|
356 |
+
)
|
357 |
+
else:
|
358 |
+
raise ValueError(
|
359 |
+
f"Cannot convert from {self.dtype} to {dtype}. "
|
360 |
+
"Supported resolutions are 's', 'ms', 'us', 'ns'"
|
361 |
+
)
|
362 |
+
|
363 |
+
return dtl.DatetimeLikeArrayMixin.astype(self, dtype, copy=copy)
|
364 |
+
|
365 |
+
def __iter__(self) -> Iterator:
|
366 |
+
if self.ndim > 1:
|
367 |
+
for i in range(len(self)):
|
368 |
+
yield self[i]
|
369 |
+
else:
|
370 |
+
# convert in chunks of 10k for efficiency
|
371 |
+
data = self._ndarray
|
372 |
+
length = len(self)
|
373 |
+
chunksize = 10000
|
374 |
+
chunks = (length // chunksize) + 1
|
375 |
+
for i in range(chunks):
|
376 |
+
start_i = i * chunksize
|
377 |
+
end_i = min((i + 1) * chunksize, length)
|
378 |
+
converted = ints_to_pytimedelta(data[start_i:end_i], box=True)
|
379 |
+
yield from converted
|
380 |
+
|
381 |
+
# ----------------------------------------------------------------
|
382 |
+
# Reductions
|
383 |
+
|
384 |
+
def sum(
|
385 |
+
self,
|
386 |
+
*,
|
387 |
+
axis: AxisInt | None = None,
|
388 |
+
dtype: NpDtype | None = None,
|
389 |
+
out=None,
|
390 |
+
keepdims: bool = False,
|
391 |
+
initial=None,
|
392 |
+
skipna: bool = True,
|
393 |
+
min_count: int = 0,
|
394 |
+
):
|
395 |
+
nv.validate_sum(
|
396 |
+
(), {"dtype": dtype, "out": out, "keepdims": keepdims, "initial": initial}
|
397 |
+
)
|
398 |
+
|
399 |
+
result = nanops.nansum(
|
400 |
+
self._ndarray, axis=axis, skipna=skipna, min_count=min_count
|
401 |
+
)
|
402 |
+
return self._wrap_reduction_result(axis, result)
|
403 |
+
|
404 |
+
def std(
|
405 |
+
self,
|
406 |
+
*,
|
407 |
+
axis: AxisInt | None = None,
|
408 |
+
dtype: NpDtype | None = None,
|
409 |
+
out=None,
|
410 |
+
ddof: int = 1,
|
411 |
+
keepdims: bool = False,
|
412 |
+
skipna: bool = True,
|
413 |
+
):
|
414 |
+
nv.validate_stat_ddof_func(
|
415 |
+
(), {"dtype": dtype, "out": out, "keepdims": keepdims}, fname="std"
|
416 |
+
)
|
417 |
+
|
418 |
+
result = nanops.nanstd(self._ndarray, axis=axis, skipna=skipna, ddof=ddof)
|
419 |
+
if axis is None or self.ndim == 1:
|
420 |
+
return self._box_func(result)
|
421 |
+
return self._from_backing_data(result)
|
422 |
+
|
423 |
+
# ----------------------------------------------------------------
|
424 |
+
# Accumulations
|
425 |
+
|
426 |
+
def _accumulate(self, name: str, *, skipna: bool = True, **kwargs):
|
427 |
+
if name == "cumsum":
|
428 |
+
op = getattr(datetimelike_accumulations, name)
|
429 |
+
result = op(self._ndarray.copy(), skipna=skipna, **kwargs)
|
430 |
+
|
431 |
+
return type(self)._simple_new(result, freq=None, dtype=self.dtype)
|
432 |
+
elif name == "cumprod":
|
433 |
+
raise TypeError("cumprod not supported for Timedelta.")
|
434 |
+
|
435 |
+
else:
|
436 |
+
return super()._accumulate(name, skipna=skipna, **kwargs)
|
437 |
+
|
438 |
+
# ----------------------------------------------------------------
|
439 |
+
# Rendering Methods
|
440 |
+
|
441 |
+
def _formatter(self, boxed: bool = False):
|
442 |
+
from pandas.io.formats.format import get_format_timedelta64
|
443 |
+
|
444 |
+
return get_format_timedelta64(self, box=True)
|
445 |
+
|
446 |
+
def _format_native_types(
|
447 |
+
self, *, na_rep: str | float = "NaT", date_format=None, **kwargs
|
448 |
+
) -> npt.NDArray[np.object_]:
|
449 |
+
from pandas.io.formats.format import get_format_timedelta64
|
450 |
+
|
451 |
+
# Relies on TimeDelta._repr_base
|
452 |
+
formatter = get_format_timedelta64(self, na_rep)
|
453 |
+
# equiv: np.array([formatter(x) for x in self._ndarray])
|
454 |
+
# but independent of dimension
|
455 |
+
return np.frompyfunc(formatter, 1, 1)(self._ndarray)
|
456 |
+
|
457 |
+
# ----------------------------------------------------------------
|
458 |
+
# Arithmetic Methods
|
459 |
+
|
460 |
+
def _add_offset(self, other):
|
461 |
+
assert not isinstance(other, Tick)
|
462 |
+
raise TypeError(
|
463 |
+
f"cannot add the type {type(other).__name__} to a {type(self).__name__}"
|
464 |
+
)
|
465 |
+
|
466 |
+
@unpack_zerodim_and_defer("__mul__")
|
467 |
+
def __mul__(self, other) -> Self:
|
468 |
+
if is_scalar(other):
|
469 |
+
# numpy will accept float and int, raise TypeError for others
|
470 |
+
result = self._ndarray * other
|
471 |
+
freq = None
|
472 |
+
if self.freq is not None and not isna(other):
|
473 |
+
freq = self.freq * other
|
474 |
+
if freq.n == 0:
|
475 |
+
# GH#51575 Better to have no freq than an incorrect one
|
476 |
+
freq = None
|
477 |
+
return type(self)._simple_new(result, dtype=result.dtype, freq=freq)
|
478 |
+
|
479 |
+
if not hasattr(other, "dtype"):
|
480 |
+
# list, tuple
|
481 |
+
other = np.array(other)
|
482 |
+
if len(other) != len(self) and not lib.is_np_dtype(other.dtype, "m"):
|
483 |
+
# Exclude timedelta64 here so we correctly raise TypeError
|
484 |
+
# for that instead of ValueError
|
485 |
+
raise ValueError("Cannot multiply with unequal lengths")
|
486 |
+
|
487 |
+
if is_object_dtype(other.dtype):
|
488 |
+
# this multiplication will succeed only if all elements of other
|
489 |
+
# are int or float scalars, so we will end up with
|
490 |
+
# timedelta64[ns]-dtyped result
|
491 |
+
arr = self._ndarray
|
492 |
+
result = [arr[n] * other[n] for n in range(len(self))]
|
493 |
+
result = np.array(result)
|
494 |
+
return type(self)._simple_new(result, dtype=result.dtype)
|
495 |
+
|
496 |
+
# numpy will accept float or int dtype, raise TypeError for others
|
497 |
+
result = self._ndarray * other
|
498 |
+
return type(self)._simple_new(result, dtype=result.dtype)
|
499 |
+
|
500 |
+
__rmul__ = __mul__
|
501 |
+
|
502 |
+
def _scalar_divlike_op(self, other, op):
|
503 |
+
"""
|
504 |
+
Shared logic for __truediv__, __rtruediv__, __floordiv__, __rfloordiv__
|
505 |
+
with scalar 'other'.
|
506 |
+
"""
|
507 |
+
if isinstance(other, self._recognized_scalars):
|
508 |
+
other = Timedelta(other)
|
509 |
+
# mypy assumes that __new__ returns an instance of the class
|
510 |
+
# github.com/python/mypy/issues/1020
|
511 |
+
if cast("Timedelta | NaTType", other) is NaT:
|
512 |
+
# specifically timedelta64-NaT
|
513 |
+
res = np.empty(self.shape, dtype=np.float64)
|
514 |
+
res.fill(np.nan)
|
515 |
+
return res
|
516 |
+
|
517 |
+
# otherwise, dispatch to Timedelta implementation
|
518 |
+
return op(self._ndarray, other)
|
519 |
+
|
520 |
+
else:
|
521 |
+
# caller is responsible for checking lib.is_scalar(other)
|
522 |
+
# assume other is numeric, otherwise numpy will raise
|
523 |
+
|
524 |
+
if op in [roperator.rtruediv, roperator.rfloordiv]:
|
525 |
+
raise TypeError(
|
526 |
+
f"Cannot divide {type(other).__name__} by {type(self).__name__}"
|
527 |
+
)
|
528 |
+
|
529 |
+
result = op(self._ndarray, other)
|
530 |
+
freq = None
|
531 |
+
|
532 |
+
if self.freq is not None:
|
533 |
+
# Note: freq gets division, not floor-division, even if op
|
534 |
+
# is floordiv.
|
535 |
+
freq = self.freq / other
|
536 |
+
if freq.nanos == 0 and self.freq.nanos != 0:
|
537 |
+
# e.g. if self.freq is Nano(1) then dividing by 2
|
538 |
+
# rounds down to zero
|
539 |
+
freq = None
|
540 |
+
|
541 |
+
return type(self)._simple_new(result, dtype=result.dtype, freq=freq)
|
542 |
+
|
543 |
+
def _cast_divlike_op(self, other):
|
544 |
+
if not hasattr(other, "dtype"):
|
545 |
+
# e.g. list, tuple
|
546 |
+
other = np.array(other)
|
547 |
+
|
548 |
+
if len(other) != len(self):
|
549 |
+
raise ValueError("Cannot divide vectors with unequal lengths")
|
550 |
+
return other
|
551 |
+
|
552 |
+
def _vector_divlike_op(self, other, op) -> np.ndarray | Self:
|
553 |
+
"""
|
554 |
+
Shared logic for __truediv__, __floordiv__, and their reversed versions
|
555 |
+
with timedelta64-dtype ndarray other.
|
556 |
+
"""
|
557 |
+
# Let numpy handle it
|
558 |
+
result = op(self._ndarray, np.asarray(other))
|
559 |
+
|
560 |
+
if (is_integer_dtype(other.dtype) or is_float_dtype(other.dtype)) and op in [
|
561 |
+
operator.truediv,
|
562 |
+
operator.floordiv,
|
563 |
+
]:
|
564 |
+
return type(self)._simple_new(result, dtype=result.dtype)
|
565 |
+
|
566 |
+
if op in [operator.floordiv, roperator.rfloordiv]:
|
567 |
+
mask = self.isna() | isna(other)
|
568 |
+
if mask.any():
|
569 |
+
result = result.astype(np.float64)
|
570 |
+
np.putmask(result, mask, np.nan)
|
571 |
+
|
572 |
+
return result
|
573 |
+
|
574 |
+
@unpack_zerodim_and_defer("__truediv__")
|
575 |
+
def __truediv__(self, other):
|
576 |
+
# timedelta / X is well-defined for timedelta-like or numeric X
|
577 |
+
op = operator.truediv
|
578 |
+
if is_scalar(other):
|
579 |
+
return self._scalar_divlike_op(other, op)
|
580 |
+
|
581 |
+
other = self._cast_divlike_op(other)
|
582 |
+
if (
|
583 |
+
lib.is_np_dtype(other.dtype, "m")
|
584 |
+
or is_integer_dtype(other.dtype)
|
585 |
+
or is_float_dtype(other.dtype)
|
586 |
+
):
|
587 |
+
return self._vector_divlike_op(other, op)
|
588 |
+
|
589 |
+
if is_object_dtype(other.dtype):
|
590 |
+
other = np.asarray(other)
|
591 |
+
if self.ndim > 1:
|
592 |
+
res_cols = [left / right for left, right in zip(self, other)]
|
593 |
+
res_cols2 = [x.reshape(1, -1) for x in res_cols]
|
594 |
+
result = np.concatenate(res_cols2, axis=0)
|
595 |
+
else:
|
596 |
+
result = truediv_object_array(self._ndarray, other)
|
597 |
+
|
598 |
+
return result
|
599 |
+
|
600 |
+
else:
|
601 |
+
return NotImplemented
|
602 |
+
|
603 |
+
@unpack_zerodim_and_defer("__rtruediv__")
|
604 |
+
def __rtruediv__(self, other):
|
605 |
+
# X / timedelta is defined only for timedelta-like X
|
606 |
+
op = roperator.rtruediv
|
607 |
+
if is_scalar(other):
|
608 |
+
return self._scalar_divlike_op(other, op)
|
609 |
+
|
610 |
+
other = self._cast_divlike_op(other)
|
611 |
+
if lib.is_np_dtype(other.dtype, "m"):
|
612 |
+
return self._vector_divlike_op(other, op)
|
613 |
+
|
614 |
+
elif is_object_dtype(other.dtype):
|
615 |
+
# Note: unlike in __truediv__, we do not _need_ to do type
|
616 |
+
# inference on the result. It does not raise, a numeric array
|
617 |
+
# is returned. GH#23829
|
618 |
+
result_list = [other[n] / self[n] for n in range(len(self))]
|
619 |
+
return np.array(result_list)
|
620 |
+
|
621 |
+
else:
|
622 |
+
return NotImplemented
|
623 |
+
|
624 |
+
@unpack_zerodim_and_defer("__floordiv__")
|
625 |
+
def __floordiv__(self, other):
|
626 |
+
op = operator.floordiv
|
627 |
+
if is_scalar(other):
|
628 |
+
return self._scalar_divlike_op(other, op)
|
629 |
+
|
630 |
+
other = self._cast_divlike_op(other)
|
631 |
+
if (
|
632 |
+
lib.is_np_dtype(other.dtype, "m")
|
633 |
+
or is_integer_dtype(other.dtype)
|
634 |
+
or is_float_dtype(other.dtype)
|
635 |
+
):
|
636 |
+
return self._vector_divlike_op(other, op)
|
637 |
+
|
638 |
+
elif is_object_dtype(other.dtype):
|
639 |
+
other = np.asarray(other)
|
640 |
+
if self.ndim > 1:
|
641 |
+
res_cols = [left // right for left, right in zip(self, other)]
|
642 |
+
res_cols2 = [x.reshape(1, -1) for x in res_cols]
|
643 |
+
result = np.concatenate(res_cols2, axis=0)
|
644 |
+
else:
|
645 |
+
result = floordiv_object_array(self._ndarray, other)
|
646 |
+
|
647 |
+
assert result.dtype == object
|
648 |
+
return result
|
649 |
+
|
650 |
+
else:
|
651 |
+
return NotImplemented
|
652 |
+
|
653 |
+
@unpack_zerodim_and_defer("__rfloordiv__")
|
654 |
+
def __rfloordiv__(self, other):
|
655 |
+
op = roperator.rfloordiv
|
656 |
+
if is_scalar(other):
|
657 |
+
return self._scalar_divlike_op(other, op)
|
658 |
+
|
659 |
+
other = self._cast_divlike_op(other)
|
660 |
+
if lib.is_np_dtype(other.dtype, "m"):
|
661 |
+
return self._vector_divlike_op(other, op)
|
662 |
+
|
663 |
+
elif is_object_dtype(other.dtype):
|
664 |
+
result_list = [other[n] // self[n] for n in range(len(self))]
|
665 |
+
result = np.array(result_list)
|
666 |
+
return result
|
667 |
+
|
668 |
+
else:
|
669 |
+
return NotImplemented
|
670 |
+
|
671 |
+
@unpack_zerodim_and_defer("__mod__")
|
672 |
+
def __mod__(self, other):
|
673 |
+
# Note: This is a naive implementation, can likely be optimized
|
674 |
+
if isinstance(other, self._recognized_scalars):
|
675 |
+
other = Timedelta(other)
|
676 |
+
return self - (self // other) * other
|
677 |
+
|
678 |
+
@unpack_zerodim_and_defer("__rmod__")
|
679 |
+
def __rmod__(self, other):
|
680 |
+
# Note: This is a naive implementation, can likely be optimized
|
681 |
+
if isinstance(other, self._recognized_scalars):
|
682 |
+
other = Timedelta(other)
|
683 |
+
return other - (other // self) * self
|
684 |
+
|
685 |
+
@unpack_zerodim_and_defer("__divmod__")
|
686 |
+
def __divmod__(self, other):
|
687 |
+
# Note: This is a naive implementation, can likely be optimized
|
688 |
+
if isinstance(other, self._recognized_scalars):
|
689 |
+
other = Timedelta(other)
|
690 |
+
|
691 |
+
res1 = self // other
|
692 |
+
res2 = self - res1 * other
|
693 |
+
return res1, res2
|
694 |
+
|
695 |
+
@unpack_zerodim_and_defer("__rdivmod__")
|
696 |
+
def __rdivmod__(self, other):
|
697 |
+
# Note: This is a naive implementation, can likely be optimized
|
698 |
+
if isinstance(other, self._recognized_scalars):
|
699 |
+
other = Timedelta(other)
|
700 |
+
|
701 |
+
res1 = other // self
|
702 |
+
res2 = other - res1 * self
|
703 |
+
return res1, res2
|
704 |
+
|
705 |
+
def __neg__(self) -> TimedeltaArray:
|
706 |
+
freq = None
|
707 |
+
if self.freq is not None:
|
708 |
+
freq = -self.freq
|
709 |
+
return type(self)._simple_new(-self._ndarray, dtype=self.dtype, freq=freq)
|
710 |
+
|
711 |
+
def __pos__(self) -> TimedeltaArray:
|
712 |
+
return type(self)._simple_new(
|
713 |
+
self._ndarray.copy(), dtype=self.dtype, freq=self.freq
|
714 |
+
)
|
715 |
+
|
716 |
+
def __abs__(self) -> TimedeltaArray:
|
717 |
+
# Note: freq is not preserved
|
718 |
+
return type(self)._simple_new(np.abs(self._ndarray), dtype=self.dtype)
|
719 |
+
|
720 |
+
# ----------------------------------------------------------------
|
721 |
+
# Conversion Methods - Vectorized analogues of Timedelta methods
|
722 |
+
|
723 |
+
def total_seconds(self) -> npt.NDArray[np.float64]:
|
724 |
+
"""
|
725 |
+
Return total duration of each element expressed in seconds.
|
726 |
+
|
727 |
+
This method is available directly on TimedeltaArray, TimedeltaIndex
|
728 |
+
and on Series containing timedelta values under the ``.dt`` namespace.
|
729 |
+
|
730 |
+
Returns
|
731 |
+
-------
|
732 |
+
ndarray, Index or Series
|
733 |
+
When the calling object is a TimedeltaArray, the return type
|
734 |
+
is ndarray. When the calling object is a TimedeltaIndex,
|
735 |
+
the return type is an Index with a float64 dtype. When the calling object
|
736 |
+
is a Series, the return type is Series of type `float64` whose
|
737 |
+
index is the same as the original.
|
738 |
+
|
739 |
+
See Also
|
740 |
+
--------
|
741 |
+
datetime.timedelta.total_seconds : Standard library version
|
742 |
+
of this method.
|
743 |
+
TimedeltaIndex.components : Return a DataFrame with components of
|
744 |
+
each Timedelta.
|
745 |
+
|
746 |
+
Examples
|
747 |
+
--------
|
748 |
+
**Series**
|
749 |
+
|
750 |
+
>>> s = pd.Series(pd.to_timedelta(np.arange(5), unit='d'))
|
751 |
+
>>> s
|
752 |
+
0 0 days
|
753 |
+
1 1 days
|
754 |
+
2 2 days
|
755 |
+
3 3 days
|
756 |
+
4 4 days
|
757 |
+
dtype: timedelta64[ns]
|
758 |
+
|
759 |
+
>>> s.dt.total_seconds()
|
760 |
+
0 0.0
|
761 |
+
1 86400.0
|
762 |
+
2 172800.0
|
763 |
+
3 259200.0
|
764 |
+
4 345600.0
|
765 |
+
dtype: float64
|
766 |
+
|
767 |
+
**TimedeltaIndex**
|
768 |
+
|
769 |
+
>>> idx = pd.to_timedelta(np.arange(5), unit='d')
|
770 |
+
>>> idx
|
771 |
+
TimedeltaIndex(['0 days', '1 days', '2 days', '3 days', '4 days'],
|
772 |
+
dtype='timedelta64[ns]', freq=None)
|
773 |
+
|
774 |
+
>>> idx.total_seconds()
|
775 |
+
Index([0.0, 86400.0, 172800.0, 259200.0, 345600.0], dtype='float64')
|
776 |
+
"""
|
777 |
+
pps = periods_per_second(self._creso)
|
778 |
+
return self._maybe_mask_results(self.asi8 / pps, fill_value=None)
|
779 |
+
|
780 |
+
def to_pytimedelta(self) -> npt.NDArray[np.object_]:
|
781 |
+
"""
|
782 |
+
Return an ndarray of datetime.timedelta objects.
|
783 |
+
|
784 |
+
Returns
|
785 |
+
-------
|
786 |
+
numpy.ndarray
|
787 |
+
|
788 |
+
Examples
|
789 |
+
--------
|
790 |
+
>>> tdelta_idx = pd.to_timedelta([1, 2, 3], unit='D')
|
791 |
+
>>> tdelta_idx
|
792 |
+
TimedeltaIndex(['1 days', '2 days', '3 days'],
|
793 |
+
dtype='timedelta64[ns]', freq=None)
|
794 |
+
>>> tdelta_idx.to_pytimedelta()
|
795 |
+
array([datetime.timedelta(days=1), datetime.timedelta(days=2),
|
796 |
+
datetime.timedelta(days=3)], dtype=object)
|
797 |
+
"""
|
798 |
+
return ints_to_pytimedelta(self._ndarray)
|
799 |
+
|
800 |
+
days_docstring = textwrap.dedent(
|
801 |
+
"""Number of days for each element.
|
802 |
+
|
803 |
+
Examples
|
804 |
+
--------
|
805 |
+
For Series:
|
806 |
+
|
807 |
+
>>> ser = pd.Series(pd.to_timedelta([1, 2, 3], unit='d'))
|
808 |
+
>>> ser
|
809 |
+
0 1 days
|
810 |
+
1 2 days
|
811 |
+
2 3 days
|
812 |
+
dtype: timedelta64[ns]
|
813 |
+
>>> ser.dt.days
|
814 |
+
0 1
|
815 |
+
1 2
|
816 |
+
2 3
|
817 |
+
dtype: int64
|
818 |
+
|
819 |
+
For TimedeltaIndex:
|
820 |
+
|
821 |
+
>>> tdelta_idx = pd.to_timedelta(["0 days", "10 days", "20 days"])
|
822 |
+
>>> tdelta_idx
|
823 |
+
TimedeltaIndex(['0 days', '10 days', '20 days'],
|
824 |
+
dtype='timedelta64[ns]', freq=None)
|
825 |
+
>>> tdelta_idx.days
|
826 |
+
Index([0, 10, 20], dtype='int64')"""
|
827 |
+
)
|
828 |
+
days = _field_accessor("days", "days", days_docstring)
|
829 |
+
|
830 |
+
seconds_docstring = textwrap.dedent(
|
831 |
+
"""Number of seconds (>= 0 and less than 1 day) for each element.
|
832 |
+
|
833 |
+
Examples
|
834 |
+
--------
|
835 |
+
For Series:
|
836 |
+
|
837 |
+
>>> ser = pd.Series(pd.to_timedelta([1, 2, 3], unit='s'))
|
838 |
+
>>> ser
|
839 |
+
0 0 days 00:00:01
|
840 |
+
1 0 days 00:00:02
|
841 |
+
2 0 days 00:00:03
|
842 |
+
dtype: timedelta64[ns]
|
843 |
+
>>> ser.dt.seconds
|
844 |
+
0 1
|
845 |
+
1 2
|
846 |
+
2 3
|
847 |
+
dtype: int32
|
848 |
+
|
849 |
+
For TimedeltaIndex:
|
850 |
+
|
851 |
+
>>> tdelta_idx = pd.to_timedelta([1, 2, 3], unit='s')
|
852 |
+
>>> tdelta_idx
|
853 |
+
TimedeltaIndex(['0 days 00:00:01', '0 days 00:00:02', '0 days 00:00:03'],
|
854 |
+
dtype='timedelta64[ns]', freq=None)
|
855 |
+
>>> tdelta_idx.seconds
|
856 |
+
Index([1, 2, 3], dtype='int32')"""
|
857 |
+
)
|
858 |
+
seconds = _field_accessor(
|
859 |
+
"seconds",
|
860 |
+
"seconds",
|
861 |
+
seconds_docstring,
|
862 |
+
)
|
863 |
+
|
864 |
+
microseconds_docstring = textwrap.dedent(
|
865 |
+
"""Number of microseconds (>= 0 and less than 1 second) for each element.
|
866 |
+
|
867 |
+
Examples
|
868 |
+
--------
|
869 |
+
For Series:
|
870 |
+
|
871 |
+
>>> ser = pd.Series(pd.to_timedelta([1, 2, 3], unit='us'))
|
872 |
+
>>> ser
|
873 |
+
0 0 days 00:00:00.000001
|
874 |
+
1 0 days 00:00:00.000002
|
875 |
+
2 0 days 00:00:00.000003
|
876 |
+
dtype: timedelta64[ns]
|
877 |
+
>>> ser.dt.microseconds
|
878 |
+
0 1
|
879 |
+
1 2
|
880 |
+
2 3
|
881 |
+
dtype: int32
|
882 |
+
|
883 |
+
For TimedeltaIndex:
|
884 |
+
|
885 |
+
>>> tdelta_idx = pd.to_timedelta([1, 2, 3], unit='us')
|
886 |
+
>>> tdelta_idx
|
887 |
+
TimedeltaIndex(['0 days 00:00:00.000001', '0 days 00:00:00.000002',
|
888 |
+
'0 days 00:00:00.000003'],
|
889 |
+
dtype='timedelta64[ns]', freq=None)
|
890 |
+
>>> tdelta_idx.microseconds
|
891 |
+
Index([1, 2, 3], dtype='int32')"""
|
892 |
+
)
|
893 |
+
microseconds = _field_accessor(
|
894 |
+
"microseconds",
|
895 |
+
"microseconds",
|
896 |
+
microseconds_docstring,
|
897 |
+
)
|
898 |
+
|
899 |
+
nanoseconds_docstring = textwrap.dedent(
|
900 |
+
"""Number of nanoseconds (>= 0 and less than 1 microsecond) for each element.
|
901 |
+
|
902 |
+
Examples
|
903 |
+
--------
|
904 |
+
For Series:
|
905 |
+
|
906 |
+
>>> ser = pd.Series(pd.to_timedelta([1, 2, 3], unit='ns'))
|
907 |
+
>>> ser
|
908 |
+
0 0 days 00:00:00.000000001
|
909 |
+
1 0 days 00:00:00.000000002
|
910 |
+
2 0 days 00:00:00.000000003
|
911 |
+
dtype: timedelta64[ns]
|
912 |
+
>>> ser.dt.nanoseconds
|
913 |
+
0 1
|
914 |
+
1 2
|
915 |
+
2 3
|
916 |
+
dtype: int32
|
917 |
+
|
918 |
+
For TimedeltaIndex:
|
919 |
+
|
920 |
+
>>> tdelta_idx = pd.to_timedelta([1, 2, 3], unit='ns')
|
921 |
+
>>> tdelta_idx
|
922 |
+
TimedeltaIndex(['0 days 00:00:00.000000001', '0 days 00:00:00.000000002',
|
923 |
+
'0 days 00:00:00.000000003'],
|
924 |
+
dtype='timedelta64[ns]', freq=None)
|
925 |
+
>>> tdelta_idx.nanoseconds
|
926 |
+
Index([1, 2, 3], dtype='int32')"""
|
927 |
+
)
|
928 |
+
nanoseconds = _field_accessor(
|
929 |
+
"nanoseconds",
|
930 |
+
"nanoseconds",
|
931 |
+
nanoseconds_docstring,
|
932 |
+
)
|
933 |
+
|
934 |
+
@property
|
935 |
+
def components(self) -> DataFrame:
|
936 |
+
"""
|
937 |
+
Return a DataFrame of the individual resolution components of the Timedeltas.
|
938 |
+
|
939 |
+
The components (days, hours, minutes seconds, milliseconds, microseconds,
|
940 |
+
nanoseconds) are returned as columns in a DataFrame.
|
941 |
+
|
942 |
+
Returns
|
943 |
+
-------
|
944 |
+
DataFrame
|
945 |
+
|
946 |
+
Examples
|
947 |
+
--------
|
948 |
+
>>> tdelta_idx = pd.to_timedelta(['1 day 3 min 2 us 42 ns'])
|
949 |
+
>>> tdelta_idx
|
950 |
+
TimedeltaIndex(['1 days 00:03:00.000002042'],
|
951 |
+
dtype='timedelta64[ns]', freq=None)
|
952 |
+
>>> tdelta_idx.components
|
953 |
+
days hours minutes seconds milliseconds microseconds nanoseconds
|
954 |
+
0 1 0 3 0 0 2 42
|
955 |
+
"""
|
956 |
+
from pandas import DataFrame
|
957 |
+
|
958 |
+
columns = [
|
959 |
+
"days",
|
960 |
+
"hours",
|
961 |
+
"minutes",
|
962 |
+
"seconds",
|
963 |
+
"milliseconds",
|
964 |
+
"microseconds",
|
965 |
+
"nanoseconds",
|
966 |
+
]
|
967 |
+
hasnans = self._hasna
|
968 |
+
if hasnans:
|
969 |
+
|
970 |
+
def f(x):
|
971 |
+
if isna(x):
|
972 |
+
return [np.nan] * len(columns)
|
973 |
+
return x.components
|
974 |
+
|
975 |
+
else:
|
976 |
+
|
977 |
+
def f(x):
|
978 |
+
return x.components
|
979 |
+
|
980 |
+
result = DataFrame([f(x) for x in self], columns=columns)
|
981 |
+
if not hasnans:
|
982 |
+
result = result.astype("int64")
|
983 |
+
return result
|
984 |
+
|
985 |
+
|
986 |
+
# ---------------------------------------------------------------------
|
987 |
+
# Constructor Helpers
|
988 |
+
|
989 |
+
|
990 |
+
def sequence_to_td64ns(
|
991 |
+
data,
|
992 |
+
copy: bool = False,
|
993 |
+
unit=None,
|
994 |
+
errors: DateTimeErrorChoices = "raise",
|
995 |
+
) -> tuple[np.ndarray, Tick | None]:
|
996 |
+
"""
|
997 |
+
Parameters
|
998 |
+
----------
|
999 |
+
data : list-like
|
1000 |
+
copy : bool, default False
|
1001 |
+
unit : str, optional
|
1002 |
+
The timedelta unit to treat integers as multiples of. For numeric
|
1003 |
+
data this defaults to ``'ns'``.
|
1004 |
+
Must be un-specified if the data contains a str and ``errors=="raise"``.
|
1005 |
+
errors : {"raise", "coerce", "ignore"}, default "raise"
|
1006 |
+
How to handle elements that cannot be converted to timedelta64[ns].
|
1007 |
+
See ``pandas.to_timedelta`` for details.
|
1008 |
+
|
1009 |
+
Returns
|
1010 |
+
-------
|
1011 |
+
converted : numpy.ndarray
|
1012 |
+
The sequence converted to a numpy array with dtype ``timedelta64[ns]``.
|
1013 |
+
inferred_freq : Tick or None
|
1014 |
+
The inferred frequency of the sequence.
|
1015 |
+
|
1016 |
+
Raises
|
1017 |
+
------
|
1018 |
+
ValueError : Data cannot be converted to timedelta64[ns].
|
1019 |
+
|
1020 |
+
Notes
|
1021 |
+
-----
|
1022 |
+
Unlike `pandas.to_timedelta`, if setting ``errors=ignore`` will not cause
|
1023 |
+
errors to be ignored; they are caught and subsequently ignored at a
|
1024 |
+
higher level.
|
1025 |
+
"""
|
1026 |
+
assert unit not in ["Y", "y", "M"] # caller is responsible for checking
|
1027 |
+
|
1028 |
+
inferred_freq = None
|
1029 |
+
if unit is not None:
|
1030 |
+
unit = parse_timedelta_unit(unit)
|
1031 |
+
|
1032 |
+
data, copy = dtl.ensure_arraylike_for_datetimelike(
|
1033 |
+
data, copy, cls_name="TimedeltaArray"
|
1034 |
+
)
|
1035 |
+
|
1036 |
+
if isinstance(data, TimedeltaArray):
|
1037 |
+
inferred_freq = data.freq
|
1038 |
+
|
1039 |
+
# Convert whatever we have into timedelta64[ns] dtype
|
1040 |
+
if data.dtype == object or is_string_dtype(data.dtype):
|
1041 |
+
# no need to make a copy, need to convert if string-dtyped
|
1042 |
+
data = _objects_to_td64ns(data, unit=unit, errors=errors)
|
1043 |
+
copy = False
|
1044 |
+
|
1045 |
+
elif is_integer_dtype(data.dtype):
|
1046 |
+
# treat as multiples of the given unit
|
1047 |
+
data, copy_made = _ints_to_td64ns(data, unit=unit)
|
1048 |
+
copy = copy and not copy_made
|
1049 |
+
|
1050 |
+
elif is_float_dtype(data.dtype):
|
1051 |
+
# cast the unit, multiply base/frac separately
|
1052 |
+
# to avoid precision issues from float -> int
|
1053 |
+
if isinstance(data.dtype, ExtensionDtype):
|
1054 |
+
mask = data._mask
|
1055 |
+
data = data._data
|
1056 |
+
else:
|
1057 |
+
mask = np.isnan(data)
|
1058 |
+
|
1059 |
+
data = cast_from_unit_vectorized(data, unit or "ns")
|
1060 |
+
data[mask] = iNaT
|
1061 |
+
data = data.view("m8[ns]")
|
1062 |
+
copy = False
|
1063 |
+
|
1064 |
+
elif lib.is_np_dtype(data.dtype, "m"):
|
1065 |
+
if not is_supported_dtype(data.dtype):
|
1066 |
+
# cast to closest supported unit, i.e. s or ns
|
1067 |
+
new_dtype = get_supported_dtype(data.dtype)
|
1068 |
+
data = astype_overflowsafe(data, dtype=new_dtype, copy=False)
|
1069 |
+
copy = False
|
1070 |
+
|
1071 |
+
else:
|
1072 |
+
# This includes datetime64-dtype, see GH#23539, GH#29794
|
1073 |
+
raise TypeError(f"dtype {data.dtype} cannot be converted to timedelta64[ns]")
|
1074 |
+
|
1075 |
+
if not copy:
|
1076 |
+
data = np.asarray(data)
|
1077 |
+
else:
|
1078 |
+
data = np.array(data, copy=copy)
|
1079 |
+
|
1080 |
+
assert data.dtype.kind == "m"
|
1081 |
+
assert data.dtype != "m8" # i.e. not unit-less
|
1082 |
+
|
1083 |
+
return data, inferred_freq
|
1084 |
+
|
1085 |
+
|
1086 |
+
def _ints_to_td64ns(data, unit: str = "ns"):
|
1087 |
+
"""
|
1088 |
+
Convert an ndarray with integer-dtype to timedelta64[ns] dtype, treating
|
1089 |
+
the integers as multiples of the given timedelta unit.
|
1090 |
+
|
1091 |
+
Parameters
|
1092 |
+
----------
|
1093 |
+
data : numpy.ndarray with integer-dtype
|
1094 |
+
unit : str, default "ns"
|
1095 |
+
The timedelta unit to treat integers as multiples of.
|
1096 |
+
|
1097 |
+
Returns
|
1098 |
+
-------
|
1099 |
+
numpy.ndarray : timedelta64[ns] array converted from data
|
1100 |
+
bool : whether a copy was made
|
1101 |
+
"""
|
1102 |
+
copy_made = False
|
1103 |
+
unit = unit if unit is not None else "ns"
|
1104 |
+
|
1105 |
+
if data.dtype != np.int64:
|
1106 |
+
# converting to int64 makes a copy, so we can avoid
|
1107 |
+
# re-copying later
|
1108 |
+
data = data.astype(np.int64)
|
1109 |
+
copy_made = True
|
1110 |
+
|
1111 |
+
if unit != "ns":
|
1112 |
+
dtype_str = f"timedelta64[{unit}]"
|
1113 |
+
data = data.view(dtype_str)
|
1114 |
+
|
1115 |
+
data = astype_overflowsafe(data, dtype=TD64NS_DTYPE)
|
1116 |
+
|
1117 |
+
# the astype conversion makes a copy, so we can avoid re-copying later
|
1118 |
+
copy_made = True
|
1119 |
+
|
1120 |
+
else:
|
1121 |
+
data = data.view("timedelta64[ns]")
|
1122 |
+
|
1123 |
+
return data, copy_made
|
1124 |
+
|
1125 |
+
|
1126 |
+
def _objects_to_td64ns(data, unit=None, errors: DateTimeErrorChoices = "raise"):
|
1127 |
+
"""
|
1128 |
+
Convert a object-dtyped or string-dtyped array into an
|
1129 |
+
timedelta64[ns]-dtyped array.
|
1130 |
+
|
1131 |
+
Parameters
|
1132 |
+
----------
|
1133 |
+
data : ndarray or Index
|
1134 |
+
unit : str, default "ns"
|
1135 |
+
The timedelta unit to treat integers as multiples of.
|
1136 |
+
Must not be specified if the data contains a str.
|
1137 |
+
errors : {"raise", "coerce", "ignore"}, default "raise"
|
1138 |
+
How to handle elements that cannot be converted to timedelta64[ns].
|
1139 |
+
See ``pandas.to_timedelta`` for details.
|
1140 |
+
|
1141 |
+
Returns
|
1142 |
+
-------
|
1143 |
+
numpy.ndarray : timedelta64[ns] array converted from data
|
1144 |
+
|
1145 |
+
Raises
|
1146 |
+
------
|
1147 |
+
ValueError : Data cannot be converted to timedelta64[ns].
|
1148 |
+
|
1149 |
+
Notes
|
1150 |
+
-----
|
1151 |
+
Unlike `pandas.to_timedelta`, if setting `errors=ignore` will not cause
|
1152 |
+
errors to be ignored; they are caught and subsequently ignored at a
|
1153 |
+
higher level.
|
1154 |
+
"""
|
1155 |
+
# coerce Index to np.ndarray, converting string-dtype if necessary
|
1156 |
+
values = np.asarray(data, dtype=np.object_)
|
1157 |
+
|
1158 |
+
result = array_to_timedelta64(values, unit=unit, errors=errors)
|
1159 |
+
return result.view("timedelta64[ns]")
|
1160 |
+
|
1161 |
+
|
1162 |
+
def _validate_td64_dtype(dtype) -> DtypeObj:
|
1163 |
+
dtype = pandas_dtype(dtype)
|
1164 |
+
if dtype == np.dtype("m8"):
|
1165 |
+
# no precision disallowed GH#24806
|
1166 |
+
msg = (
|
1167 |
+
"Passing in 'timedelta' dtype with no precision is not allowed. "
|
1168 |
+
"Please pass in 'timedelta64[ns]' instead."
|
1169 |
+
)
|
1170 |
+
raise ValueError(msg)
|
1171 |
+
|
1172 |
+
if not lib.is_np_dtype(dtype, "m"):
|
1173 |
+
raise ValueError(f"dtype '{dtype}' is invalid, should be np.timedelta64 dtype")
|
1174 |
+
elif not is_supported_dtype(dtype):
|
1175 |
+
raise ValueError("Supported timedelta64 resolutions are 's', 'ms', 'us', 'ns'")
|
1176 |
+
|
1177 |
+
return dtype
|
env-llmeval/lib/python3.10/site-packages/pandas/core/base.py
ADDED
@@ -0,0 +1,1391 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
"""
|
2 |
+
Base and utility classes for pandas objects.
|
3 |
+
"""
|
4 |
+
|
5 |
+
from __future__ import annotations
|
6 |
+
|
7 |
+
import textwrap
|
8 |
+
from typing import (
|
9 |
+
TYPE_CHECKING,
|
10 |
+
Any,
|
11 |
+
Generic,
|
12 |
+
Literal,
|
13 |
+
cast,
|
14 |
+
final,
|
15 |
+
overload,
|
16 |
+
)
|
17 |
+
import warnings
|
18 |
+
|
19 |
+
import numpy as np
|
20 |
+
|
21 |
+
from pandas._config import using_copy_on_write
|
22 |
+
|
23 |
+
from pandas._libs import lib
|
24 |
+
from pandas._typing import (
|
25 |
+
AxisInt,
|
26 |
+
DtypeObj,
|
27 |
+
IndexLabel,
|
28 |
+
NDFrameT,
|
29 |
+
Self,
|
30 |
+
Shape,
|
31 |
+
npt,
|
32 |
+
)
|
33 |
+
from pandas.compat import PYPY
|
34 |
+
from pandas.compat.numpy import function as nv
|
35 |
+
from pandas.errors import AbstractMethodError
|
36 |
+
from pandas.util._decorators import (
|
37 |
+
cache_readonly,
|
38 |
+
doc,
|
39 |
+
)
|
40 |
+
from pandas.util._exceptions import find_stack_level
|
41 |
+
|
42 |
+
from pandas.core.dtypes.cast import can_hold_element
|
43 |
+
from pandas.core.dtypes.common import (
|
44 |
+
is_object_dtype,
|
45 |
+
is_scalar,
|
46 |
+
)
|
47 |
+
from pandas.core.dtypes.dtypes import ExtensionDtype
|
48 |
+
from pandas.core.dtypes.generic import (
|
49 |
+
ABCDataFrame,
|
50 |
+
ABCIndex,
|
51 |
+
ABCSeries,
|
52 |
+
)
|
53 |
+
from pandas.core.dtypes.missing import (
|
54 |
+
isna,
|
55 |
+
remove_na_arraylike,
|
56 |
+
)
|
57 |
+
|
58 |
+
from pandas.core import (
|
59 |
+
algorithms,
|
60 |
+
nanops,
|
61 |
+
ops,
|
62 |
+
)
|
63 |
+
from pandas.core.accessor import DirNamesMixin
|
64 |
+
from pandas.core.arraylike import OpsMixin
|
65 |
+
from pandas.core.arrays import ExtensionArray
|
66 |
+
from pandas.core.construction import (
|
67 |
+
ensure_wrapped_if_datetimelike,
|
68 |
+
extract_array,
|
69 |
+
)
|
70 |
+
|
71 |
+
if TYPE_CHECKING:
|
72 |
+
from collections.abc import (
|
73 |
+
Hashable,
|
74 |
+
Iterator,
|
75 |
+
)
|
76 |
+
|
77 |
+
from pandas._typing import (
|
78 |
+
DropKeep,
|
79 |
+
NumpySorter,
|
80 |
+
NumpyValueArrayLike,
|
81 |
+
ScalarLike_co,
|
82 |
+
)
|
83 |
+
|
84 |
+
from pandas import (
|
85 |
+
DataFrame,
|
86 |
+
Index,
|
87 |
+
Series,
|
88 |
+
)
|
89 |
+
|
90 |
+
|
91 |
+
_shared_docs: dict[str, str] = {}
|
92 |
+
_indexops_doc_kwargs = {
|
93 |
+
"klass": "IndexOpsMixin",
|
94 |
+
"inplace": "",
|
95 |
+
"unique": "IndexOpsMixin",
|
96 |
+
"duplicated": "IndexOpsMixin",
|
97 |
+
}
|
98 |
+
|
99 |
+
|
100 |
+
class PandasObject(DirNamesMixin):
|
101 |
+
"""
|
102 |
+
Baseclass for various pandas objects.
|
103 |
+
"""
|
104 |
+
|
105 |
+
# results from calls to methods decorated with cache_readonly get added to _cache
|
106 |
+
_cache: dict[str, Any]
|
107 |
+
|
108 |
+
@property
|
109 |
+
def _constructor(self):
|
110 |
+
"""
|
111 |
+
Class constructor (for this class it's just `__class__`).
|
112 |
+
"""
|
113 |
+
return type(self)
|
114 |
+
|
115 |
+
def __repr__(self) -> str:
|
116 |
+
"""
|
117 |
+
Return a string representation for a particular object.
|
118 |
+
"""
|
119 |
+
# Should be overwritten by base classes
|
120 |
+
return object.__repr__(self)
|
121 |
+
|
122 |
+
def _reset_cache(self, key: str | None = None) -> None:
|
123 |
+
"""
|
124 |
+
Reset cached properties. If ``key`` is passed, only clears that key.
|
125 |
+
"""
|
126 |
+
if not hasattr(self, "_cache"):
|
127 |
+
return
|
128 |
+
if key is None:
|
129 |
+
self._cache.clear()
|
130 |
+
else:
|
131 |
+
self._cache.pop(key, None)
|
132 |
+
|
133 |
+
def __sizeof__(self) -> int:
|
134 |
+
"""
|
135 |
+
Generates the total memory usage for an object that returns
|
136 |
+
either a value or Series of values
|
137 |
+
"""
|
138 |
+
memory_usage = getattr(self, "memory_usage", None)
|
139 |
+
if memory_usage:
|
140 |
+
mem = memory_usage(deep=True) # pylint: disable=not-callable
|
141 |
+
return int(mem if is_scalar(mem) else mem.sum())
|
142 |
+
|
143 |
+
# no memory_usage attribute, so fall back to object's 'sizeof'
|
144 |
+
return super().__sizeof__()
|
145 |
+
|
146 |
+
|
147 |
+
class NoNewAttributesMixin:
|
148 |
+
"""
|
149 |
+
Mixin which prevents adding new attributes.
|
150 |
+
|
151 |
+
Prevents additional attributes via xxx.attribute = "something" after a
|
152 |
+
call to `self.__freeze()`. Mainly used to prevent the user from using
|
153 |
+
wrong attributes on an accessor (`Series.cat/.str/.dt`).
|
154 |
+
|
155 |
+
If you really want to add a new attribute at a later time, you need to use
|
156 |
+
`object.__setattr__(self, key, value)`.
|
157 |
+
"""
|
158 |
+
|
159 |
+
def _freeze(self) -> None:
|
160 |
+
"""
|
161 |
+
Prevents setting additional attributes.
|
162 |
+
"""
|
163 |
+
object.__setattr__(self, "__frozen", True)
|
164 |
+
|
165 |
+
# prevent adding any attribute via s.xxx.new_attribute = ...
|
166 |
+
def __setattr__(self, key: str, value) -> None:
|
167 |
+
# _cache is used by a decorator
|
168 |
+
# We need to check both 1.) cls.__dict__ and 2.) getattr(self, key)
|
169 |
+
# because
|
170 |
+
# 1.) getattr is false for attributes that raise errors
|
171 |
+
# 2.) cls.__dict__ doesn't traverse into base classes
|
172 |
+
if getattr(self, "__frozen", False) and not (
|
173 |
+
key == "_cache"
|
174 |
+
or key in type(self).__dict__
|
175 |
+
or getattr(self, key, None) is not None
|
176 |
+
):
|
177 |
+
raise AttributeError(f"You cannot add any new attribute '{key}'")
|
178 |
+
object.__setattr__(self, key, value)
|
179 |
+
|
180 |
+
|
181 |
+
class SelectionMixin(Generic[NDFrameT]):
|
182 |
+
"""
|
183 |
+
mixin implementing the selection & aggregation interface on a group-like
|
184 |
+
object sub-classes need to define: obj, exclusions
|
185 |
+
"""
|
186 |
+
|
187 |
+
obj: NDFrameT
|
188 |
+
_selection: IndexLabel | None = None
|
189 |
+
exclusions: frozenset[Hashable]
|
190 |
+
_internal_names = ["_cache", "__setstate__"]
|
191 |
+
_internal_names_set = set(_internal_names)
|
192 |
+
|
193 |
+
@final
|
194 |
+
@property
|
195 |
+
def _selection_list(self):
|
196 |
+
if not isinstance(
|
197 |
+
self._selection, (list, tuple, ABCSeries, ABCIndex, np.ndarray)
|
198 |
+
):
|
199 |
+
return [self._selection]
|
200 |
+
return self._selection
|
201 |
+
|
202 |
+
@cache_readonly
|
203 |
+
def _selected_obj(self):
|
204 |
+
if self._selection is None or isinstance(self.obj, ABCSeries):
|
205 |
+
return self.obj
|
206 |
+
else:
|
207 |
+
return self.obj[self._selection]
|
208 |
+
|
209 |
+
@final
|
210 |
+
@cache_readonly
|
211 |
+
def ndim(self) -> int:
|
212 |
+
return self._selected_obj.ndim
|
213 |
+
|
214 |
+
@final
|
215 |
+
@cache_readonly
|
216 |
+
def _obj_with_exclusions(self):
|
217 |
+
if isinstance(self.obj, ABCSeries):
|
218 |
+
return self.obj
|
219 |
+
|
220 |
+
if self._selection is not None:
|
221 |
+
return self.obj._getitem_nocopy(self._selection_list)
|
222 |
+
|
223 |
+
if len(self.exclusions) > 0:
|
224 |
+
# equivalent to `self.obj.drop(self.exclusions, axis=1)
|
225 |
+
# but this avoids consolidating and making a copy
|
226 |
+
# TODO: following GH#45287 can we now use .drop directly without
|
227 |
+
# making a copy?
|
228 |
+
return self.obj._drop_axis(self.exclusions, axis=1, only_slice=True)
|
229 |
+
else:
|
230 |
+
return self.obj
|
231 |
+
|
232 |
+
def __getitem__(self, key):
|
233 |
+
if self._selection is not None:
|
234 |
+
raise IndexError(f"Column(s) {self._selection} already selected")
|
235 |
+
|
236 |
+
if isinstance(key, (list, tuple, ABCSeries, ABCIndex, np.ndarray)):
|
237 |
+
if len(self.obj.columns.intersection(key)) != len(set(key)):
|
238 |
+
bad_keys = list(set(key).difference(self.obj.columns))
|
239 |
+
raise KeyError(f"Columns not found: {str(bad_keys)[1:-1]}")
|
240 |
+
return self._gotitem(list(key), ndim=2)
|
241 |
+
|
242 |
+
else:
|
243 |
+
if key not in self.obj:
|
244 |
+
raise KeyError(f"Column not found: {key}")
|
245 |
+
ndim = self.obj[key].ndim
|
246 |
+
return self._gotitem(key, ndim=ndim)
|
247 |
+
|
248 |
+
def _gotitem(self, key, ndim: int, subset=None):
|
249 |
+
"""
|
250 |
+
sub-classes to define
|
251 |
+
return a sliced object
|
252 |
+
|
253 |
+
Parameters
|
254 |
+
----------
|
255 |
+
key : str / list of selections
|
256 |
+
ndim : {1, 2}
|
257 |
+
requested ndim of result
|
258 |
+
subset : object, default None
|
259 |
+
subset to act on
|
260 |
+
"""
|
261 |
+
raise AbstractMethodError(self)
|
262 |
+
|
263 |
+
@final
|
264 |
+
def _infer_selection(self, key, subset: Series | DataFrame):
|
265 |
+
"""
|
266 |
+
Infer the `selection` to pass to our constructor in _gotitem.
|
267 |
+
"""
|
268 |
+
# Shared by Rolling and Resample
|
269 |
+
selection = None
|
270 |
+
if subset.ndim == 2 and (
|
271 |
+
(lib.is_scalar(key) and key in subset) or lib.is_list_like(key)
|
272 |
+
):
|
273 |
+
selection = key
|
274 |
+
elif subset.ndim == 1 and lib.is_scalar(key) and key == subset.name:
|
275 |
+
selection = key
|
276 |
+
return selection
|
277 |
+
|
278 |
+
def aggregate(self, func, *args, **kwargs):
|
279 |
+
raise AbstractMethodError(self)
|
280 |
+
|
281 |
+
agg = aggregate
|
282 |
+
|
283 |
+
|
284 |
+
class IndexOpsMixin(OpsMixin):
|
285 |
+
"""
|
286 |
+
Common ops mixin to support a unified interface / docs for Series / Index
|
287 |
+
"""
|
288 |
+
|
289 |
+
# ndarray compatibility
|
290 |
+
__array_priority__ = 1000
|
291 |
+
_hidden_attrs: frozenset[str] = frozenset(
|
292 |
+
["tolist"] # tolist is not deprecated, just suppressed in the __dir__
|
293 |
+
)
|
294 |
+
|
295 |
+
@property
|
296 |
+
def dtype(self) -> DtypeObj:
|
297 |
+
# must be defined here as a property for mypy
|
298 |
+
raise AbstractMethodError(self)
|
299 |
+
|
300 |
+
@property
|
301 |
+
def _values(self) -> ExtensionArray | np.ndarray:
|
302 |
+
# must be defined here as a property for mypy
|
303 |
+
raise AbstractMethodError(self)
|
304 |
+
|
305 |
+
@final
|
306 |
+
def transpose(self, *args, **kwargs) -> Self:
|
307 |
+
"""
|
308 |
+
Return the transpose, which is by definition self.
|
309 |
+
|
310 |
+
Returns
|
311 |
+
-------
|
312 |
+
%(klass)s
|
313 |
+
"""
|
314 |
+
nv.validate_transpose(args, kwargs)
|
315 |
+
return self
|
316 |
+
|
317 |
+
T = property(
|
318 |
+
transpose,
|
319 |
+
doc="""
|
320 |
+
Return the transpose, which is by definition self.
|
321 |
+
|
322 |
+
Examples
|
323 |
+
--------
|
324 |
+
For Series:
|
325 |
+
|
326 |
+
>>> s = pd.Series(['Ant', 'Bear', 'Cow'])
|
327 |
+
>>> s
|
328 |
+
0 Ant
|
329 |
+
1 Bear
|
330 |
+
2 Cow
|
331 |
+
dtype: object
|
332 |
+
>>> s.T
|
333 |
+
0 Ant
|
334 |
+
1 Bear
|
335 |
+
2 Cow
|
336 |
+
dtype: object
|
337 |
+
|
338 |
+
For Index:
|
339 |
+
|
340 |
+
>>> idx = pd.Index([1, 2, 3])
|
341 |
+
>>> idx.T
|
342 |
+
Index([1, 2, 3], dtype='int64')
|
343 |
+
""",
|
344 |
+
)
|
345 |
+
|
346 |
+
@property
|
347 |
+
def shape(self) -> Shape:
|
348 |
+
"""
|
349 |
+
Return a tuple of the shape of the underlying data.
|
350 |
+
|
351 |
+
Examples
|
352 |
+
--------
|
353 |
+
>>> s = pd.Series([1, 2, 3])
|
354 |
+
>>> s.shape
|
355 |
+
(3,)
|
356 |
+
"""
|
357 |
+
return self._values.shape
|
358 |
+
|
359 |
+
def __len__(self) -> int:
|
360 |
+
# We need this defined here for mypy
|
361 |
+
raise AbstractMethodError(self)
|
362 |
+
|
363 |
+
@property
|
364 |
+
def ndim(self) -> Literal[1]:
|
365 |
+
"""
|
366 |
+
Number of dimensions of the underlying data, by definition 1.
|
367 |
+
|
368 |
+
Examples
|
369 |
+
--------
|
370 |
+
>>> s = pd.Series(['Ant', 'Bear', 'Cow'])
|
371 |
+
>>> s
|
372 |
+
0 Ant
|
373 |
+
1 Bear
|
374 |
+
2 Cow
|
375 |
+
dtype: object
|
376 |
+
>>> s.ndim
|
377 |
+
1
|
378 |
+
|
379 |
+
For Index:
|
380 |
+
|
381 |
+
>>> idx = pd.Index([1, 2, 3])
|
382 |
+
>>> idx
|
383 |
+
Index([1, 2, 3], dtype='int64')
|
384 |
+
>>> idx.ndim
|
385 |
+
1
|
386 |
+
"""
|
387 |
+
return 1
|
388 |
+
|
389 |
+
@final
|
390 |
+
def item(self):
|
391 |
+
"""
|
392 |
+
Return the first element of the underlying data as a Python scalar.
|
393 |
+
|
394 |
+
Returns
|
395 |
+
-------
|
396 |
+
scalar
|
397 |
+
The first element of Series or Index.
|
398 |
+
|
399 |
+
Raises
|
400 |
+
------
|
401 |
+
ValueError
|
402 |
+
If the data is not length = 1.
|
403 |
+
|
404 |
+
Examples
|
405 |
+
--------
|
406 |
+
>>> s = pd.Series([1])
|
407 |
+
>>> s.item()
|
408 |
+
1
|
409 |
+
|
410 |
+
For an index:
|
411 |
+
|
412 |
+
>>> s = pd.Series([1], index=['a'])
|
413 |
+
>>> s.index.item()
|
414 |
+
'a'
|
415 |
+
"""
|
416 |
+
if len(self) == 1:
|
417 |
+
return next(iter(self))
|
418 |
+
raise ValueError("can only convert an array of size 1 to a Python scalar")
|
419 |
+
|
420 |
+
@property
|
421 |
+
def nbytes(self) -> int:
|
422 |
+
"""
|
423 |
+
Return the number of bytes in the underlying data.
|
424 |
+
|
425 |
+
Examples
|
426 |
+
--------
|
427 |
+
For Series:
|
428 |
+
|
429 |
+
>>> s = pd.Series(['Ant', 'Bear', 'Cow'])
|
430 |
+
>>> s
|
431 |
+
0 Ant
|
432 |
+
1 Bear
|
433 |
+
2 Cow
|
434 |
+
dtype: object
|
435 |
+
>>> s.nbytes
|
436 |
+
24
|
437 |
+
|
438 |
+
For Index:
|
439 |
+
|
440 |
+
>>> idx = pd.Index([1, 2, 3])
|
441 |
+
>>> idx
|
442 |
+
Index([1, 2, 3], dtype='int64')
|
443 |
+
>>> idx.nbytes
|
444 |
+
24
|
445 |
+
"""
|
446 |
+
return self._values.nbytes
|
447 |
+
|
448 |
+
@property
|
449 |
+
def size(self) -> int:
|
450 |
+
"""
|
451 |
+
Return the number of elements in the underlying data.
|
452 |
+
|
453 |
+
Examples
|
454 |
+
--------
|
455 |
+
For Series:
|
456 |
+
|
457 |
+
>>> s = pd.Series(['Ant', 'Bear', 'Cow'])
|
458 |
+
>>> s
|
459 |
+
0 Ant
|
460 |
+
1 Bear
|
461 |
+
2 Cow
|
462 |
+
dtype: object
|
463 |
+
>>> s.size
|
464 |
+
3
|
465 |
+
|
466 |
+
For Index:
|
467 |
+
|
468 |
+
>>> idx = pd.Index([1, 2, 3])
|
469 |
+
>>> idx
|
470 |
+
Index([1, 2, 3], dtype='int64')
|
471 |
+
>>> idx.size
|
472 |
+
3
|
473 |
+
"""
|
474 |
+
return len(self._values)
|
475 |
+
|
476 |
+
@property
|
477 |
+
def array(self) -> ExtensionArray:
|
478 |
+
"""
|
479 |
+
The ExtensionArray of the data backing this Series or Index.
|
480 |
+
|
481 |
+
Returns
|
482 |
+
-------
|
483 |
+
ExtensionArray
|
484 |
+
An ExtensionArray of the values stored within. For extension
|
485 |
+
types, this is the actual array. For NumPy native types, this
|
486 |
+
is a thin (no copy) wrapper around :class:`numpy.ndarray`.
|
487 |
+
|
488 |
+
``.array`` differs from ``.values``, which may require converting
|
489 |
+
the data to a different form.
|
490 |
+
|
491 |
+
See Also
|
492 |
+
--------
|
493 |
+
Index.to_numpy : Similar method that always returns a NumPy array.
|
494 |
+
Series.to_numpy : Similar method that always returns a NumPy array.
|
495 |
+
|
496 |
+
Notes
|
497 |
+
-----
|
498 |
+
This table lays out the different array types for each extension
|
499 |
+
dtype within pandas.
|
500 |
+
|
501 |
+
================== =============================
|
502 |
+
dtype array type
|
503 |
+
================== =============================
|
504 |
+
category Categorical
|
505 |
+
period PeriodArray
|
506 |
+
interval IntervalArray
|
507 |
+
IntegerNA IntegerArray
|
508 |
+
string StringArray
|
509 |
+
boolean BooleanArray
|
510 |
+
datetime64[ns, tz] DatetimeArray
|
511 |
+
================== =============================
|
512 |
+
|
513 |
+
For any 3rd-party extension types, the array type will be an
|
514 |
+
ExtensionArray.
|
515 |
+
|
516 |
+
For all remaining dtypes ``.array`` will be a
|
517 |
+
:class:`arrays.NumpyExtensionArray` wrapping the actual ndarray
|
518 |
+
stored within. If you absolutely need a NumPy array (possibly with
|
519 |
+
copying / coercing data), then use :meth:`Series.to_numpy` instead.
|
520 |
+
|
521 |
+
Examples
|
522 |
+
--------
|
523 |
+
For regular NumPy types like int, and float, a NumpyExtensionArray
|
524 |
+
is returned.
|
525 |
+
|
526 |
+
>>> pd.Series([1, 2, 3]).array
|
527 |
+
<NumpyExtensionArray>
|
528 |
+
[1, 2, 3]
|
529 |
+
Length: 3, dtype: int64
|
530 |
+
|
531 |
+
For extension types, like Categorical, the actual ExtensionArray
|
532 |
+
is returned
|
533 |
+
|
534 |
+
>>> ser = pd.Series(pd.Categorical(['a', 'b', 'a']))
|
535 |
+
>>> ser.array
|
536 |
+
['a', 'b', 'a']
|
537 |
+
Categories (2, object): ['a', 'b']
|
538 |
+
"""
|
539 |
+
raise AbstractMethodError(self)
|
540 |
+
|
541 |
+
@final
|
542 |
+
def to_numpy(
|
543 |
+
self,
|
544 |
+
dtype: npt.DTypeLike | None = None,
|
545 |
+
copy: bool = False,
|
546 |
+
na_value: object = lib.no_default,
|
547 |
+
**kwargs,
|
548 |
+
) -> np.ndarray:
|
549 |
+
"""
|
550 |
+
A NumPy ndarray representing the values in this Series or Index.
|
551 |
+
|
552 |
+
Parameters
|
553 |
+
----------
|
554 |
+
dtype : str or numpy.dtype, optional
|
555 |
+
The dtype to pass to :meth:`numpy.asarray`.
|
556 |
+
copy : bool, default False
|
557 |
+
Whether to ensure that the returned value is not a view on
|
558 |
+
another array. Note that ``copy=False`` does not *ensure* that
|
559 |
+
``to_numpy()`` is no-copy. Rather, ``copy=True`` ensure that
|
560 |
+
a copy is made, even if not strictly necessary.
|
561 |
+
na_value : Any, optional
|
562 |
+
The value to use for missing values. The default value depends
|
563 |
+
on `dtype` and the type of the array.
|
564 |
+
**kwargs
|
565 |
+
Additional keywords passed through to the ``to_numpy`` method
|
566 |
+
of the underlying array (for extension arrays).
|
567 |
+
|
568 |
+
Returns
|
569 |
+
-------
|
570 |
+
numpy.ndarray
|
571 |
+
|
572 |
+
See Also
|
573 |
+
--------
|
574 |
+
Series.array : Get the actual data stored within.
|
575 |
+
Index.array : Get the actual data stored within.
|
576 |
+
DataFrame.to_numpy : Similar method for DataFrame.
|
577 |
+
|
578 |
+
Notes
|
579 |
+
-----
|
580 |
+
The returned array will be the same up to equality (values equal
|
581 |
+
in `self` will be equal in the returned array; likewise for values
|
582 |
+
that are not equal). When `self` contains an ExtensionArray, the
|
583 |
+
dtype may be different. For example, for a category-dtype Series,
|
584 |
+
``to_numpy()`` will return a NumPy array and the categorical dtype
|
585 |
+
will be lost.
|
586 |
+
|
587 |
+
For NumPy dtypes, this will be a reference to the actual data stored
|
588 |
+
in this Series or Index (assuming ``copy=False``). Modifying the result
|
589 |
+
in place will modify the data stored in the Series or Index (not that
|
590 |
+
we recommend doing that).
|
591 |
+
|
592 |
+
For extension types, ``to_numpy()`` *may* require copying data and
|
593 |
+
coercing the result to a NumPy type (possibly object), which may be
|
594 |
+
expensive. When you need a no-copy reference to the underlying data,
|
595 |
+
:attr:`Series.array` should be used instead.
|
596 |
+
|
597 |
+
This table lays out the different dtypes and default return types of
|
598 |
+
``to_numpy()`` for various dtypes within pandas.
|
599 |
+
|
600 |
+
================== ================================
|
601 |
+
dtype array type
|
602 |
+
================== ================================
|
603 |
+
category[T] ndarray[T] (same dtype as input)
|
604 |
+
period ndarray[object] (Periods)
|
605 |
+
interval ndarray[object] (Intervals)
|
606 |
+
IntegerNA ndarray[object]
|
607 |
+
datetime64[ns] datetime64[ns]
|
608 |
+
datetime64[ns, tz] ndarray[object] (Timestamps)
|
609 |
+
================== ================================
|
610 |
+
|
611 |
+
Examples
|
612 |
+
--------
|
613 |
+
>>> ser = pd.Series(pd.Categorical(['a', 'b', 'a']))
|
614 |
+
>>> ser.to_numpy()
|
615 |
+
array(['a', 'b', 'a'], dtype=object)
|
616 |
+
|
617 |
+
Specify the `dtype` to control how datetime-aware data is represented.
|
618 |
+
Use ``dtype=object`` to return an ndarray of pandas :class:`Timestamp`
|
619 |
+
objects, each with the correct ``tz``.
|
620 |
+
|
621 |
+
>>> ser = pd.Series(pd.date_range('2000', periods=2, tz="CET"))
|
622 |
+
>>> ser.to_numpy(dtype=object)
|
623 |
+
array([Timestamp('2000-01-01 00:00:00+0100', tz='CET'),
|
624 |
+
Timestamp('2000-01-02 00:00:00+0100', tz='CET')],
|
625 |
+
dtype=object)
|
626 |
+
|
627 |
+
Or ``dtype='datetime64[ns]'`` to return an ndarray of native
|
628 |
+
datetime64 values. The values are converted to UTC and the timezone
|
629 |
+
info is dropped.
|
630 |
+
|
631 |
+
>>> ser.to_numpy(dtype="datetime64[ns]")
|
632 |
+
... # doctest: +ELLIPSIS
|
633 |
+
array(['1999-12-31T23:00:00.000000000', '2000-01-01T23:00:00...'],
|
634 |
+
dtype='datetime64[ns]')
|
635 |
+
"""
|
636 |
+
if isinstance(self.dtype, ExtensionDtype):
|
637 |
+
return self.array.to_numpy(dtype, copy=copy, na_value=na_value, **kwargs)
|
638 |
+
elif kwargs:
|
639 |
+
bad_keys = next(iter(kwargs.keys()))
|
640 |
+
raise TypeError(
|
641 |
+
f"to_numpy() got an unexpected keyword argument '{bad_keys}'"
|
642 |
+
)
|
643 |
+
|
644 |
+
fillna = (
|
645 |
+
na_value is not lib.no_default
|
646 |
+
# no need to fillna with np.nan if we already have a float dtype
|
647 |
+
and not (na_value is np.nan and np.issubdtype(self.dtype, np.floating))
|
648 |
+
)
|
649 |
+
|
650 |
+
values = self._values
|
651 |
+
if fillna:
|
652 |
+
if not can_hold_element(values, na_value):
|
653 |
+
# if we can't hold the na_value asarray either makes a copy or we
|
654 |
+
# error before modifying values. The asarray later on thus won't make
|
655 |
+
# another copy
|
656 |
+
values = np.asarray(values, dtype=dtype)
|
657 |
+
else:
|
658 |
+
values = values.copy()
|
659 |
+
|
660 |
+
values[np.asanyarray(isna(self))] = na_value
|
661 |
+
|
662 |
+
result = np.asarray(values, dtype=dtype)
|
663 |
+
|
664 |
+
if (copy and not fillna) or (not copy and using_copy_on_write()):
|
665 |
+
if np.shares_memory(self._values[:2], result[:2]):
|
666 |
+
# Take slices to improve performance of check
|
667 |
+
if using_copy_on_write() and not copy:
|
668 |
+
result = result.view()
|
669 |
+
result.flags.writeable = False
|
670 |
+
else:
|
671 |
+
result = result.copy()
|
672 |
+
|
673 |
+
return result
|
674 |
+
|
675 |
+
@final
|
676 |
+
@property
|
677 |
+
def empty(self) -> bool:
|
678 |
+
return not self.size
|
679 |
+
|
680 |
+
@doc(op="max", oppose="min", value="largest")
|
681 |
+
def argmax(
|
682 |
+
self, axis: AxisInt | None = None, skipna: bool = True, *args, **kwargs
|
683 |
+
) -> int:
|
684 |
+
"""
|
685 |
+
Return int position of the {value} value in the Series.
|
686 |
+
|
687 |
+
If the {op}imum is achieved in multiple locations,
|
688 |
+
the first row position is returned.
|
689 |
+
|
690 |
+
Parameters
|
691 |
+
----------
|
692 |
+
axis : {{None}}
|
693 |
+
Unused. Parameter needed for compatibility with DataFrame.
|
694 |
+
skipna : bool, default True
|
695 |
+
Exclude NA/null values when showing the result.
|
696 |
+
*args, **kwargs
|
697 |
+
Additional arguments and keywords for compatibility with NumPy.
|
698 |
+
|
699 |
+
Returns
|
700 |
+
-------
|
701 |
+
int
|
702 |
+
Row position of the {op}imum value.
|
703 |
+
|
704 |
+
See Also
|
705 |
+
--------
|
706 |
+
Series.arg{op} : Return position of the {op}imum value.
|
707 |
+
Series.arg{oppose} : Return position of the {oppose}imum value.
|
708 |
+
numpy.ndarray.arg{op} : Equivalent method for numpy arrays.
|
709 |
+
Series.idxmax : Return index label of the maximum values.
|
710 |
+
Series.idxmin : Return index label of the minimum values.
|
711 |
+
|
712 |
+
Examples
|
713 |
+
--------
|
714 |
+
Consider dataset containing cereal calories
|
715 |
+
|
716 |
+
>>> s = pd.Series({{'Corn Flakes': 100.0, 'Almond Delight': 110.0,
|
717 |
+
... 'Cinnamon Toast Crunch': 120.0, 'Cocoa Puff': 110.0}})
|
718 |
+
>>> s
|
719 |
+
Corn Flakes 100.0
|
720 |
+
Almond Delight 110.0
|
721 |
+
Cinnamon Toast Crunch 120.0
|
722 |
+
Cocoa Puff 110.0
|
723 |
+
dtype: float64
|
724 |
+
|
725 |
+
>>> s.argmax()
|
726 |
+
2
|
727 |
+
>>> s.argmin()
|
728 |
+
0
|
729 |
+
|
730 |
+
The maximum cereal calories is the third element and
|
731 |
+
the minimum cereal calories is the first element,
|
732 |
+
since series is zero-indexed.
|
733 |
+
"""
|
734 |
+
delegate = self._values
|
735 |
+
nv.validate_minmax_axis(axis)
|
736 |
+
skipna = nv.validate_argmax_with_skipna(skipna, args, kwargs)
|
737 |
+
|
738 |
+
if isinstance(delegate, ExtensionArray):
|
739 |
+
if not skipna and delegate.isna().any():
|
740 |
+
warnings.warn(
|
741 |
+
f"The behavior of {type(self).__name__}.argmax/argmin "
|
742 |
+
"with skipna=False and NAs, or with all-NAs is deprecated. "
|
743 |
+
"In a future version this will raise ValueError.",
|
744 |
+
FutureWarning,
|
745 |
+
stacklevel=find_stack_level(),
|
746 |
+
)
|
747 |
+
return -1
|
748 |
+
else:
|
749 |
+
return delegate.argmax()
|
750 |
+
else:
|
751 |
+
result = nanops.nanargmax(delegate, skipna=skipna)
|
752 |
+
if result == -1:
|
753 |
+
warnings.warn(
|
754 |
+
f"The behavior of {type(self).__name__}.argmax/argmin "
|
755 |
+
"with skipna=False and NAs, or with all-NAs is deprecated. "
|
756 |
+
"In a future version this will raise ValueError.",
|
757 |
+
FutureWarning,
|
758 |
+
stacklevel=find_stack_level(),
|
759 |
+
)
|
760 |
+
# error: Incompatible return value type (got "Union[int, ndarray]", expected
|
761 |
+
# "int")
|
762 |
+
return result # type: ignore[return-value]
|
763 |
+
|
764 |
+
@doc(argmax, op="min", oppose="max", value="smallest")
|
765 |
+
def argmin(
|
766 |
+
self, axis: AxisInt | None = None, skipna: bool = True, *args, **kwargs
|
767 |
+
) -> int:
|
768 |
+
delegate = self._values
|
769 |
+
nv.validate_minmax_axis(axis)
|
770 |
+
skipna = nv.validate_argmin_with_skipna(skipna, args, kwargs)
|
771 |
+
|
772 |
+
if isinstance(delegate, ExtensionArray):
|
773 |
+
if not skipna and delegate.isna().any():
|
774 |
+
warnings.warn(
|
775 |
+
f"The behavior of {type(self).__name__}.argmax/argmin "
|
776 |
+
"with skipna=False and NAs, or with all-NAs is deprecated. "
|
777 |
+
"In a future version this will raise ValueError.",
|
778 |
+
FutureWarning,
|
779 |
+
stacklevel=find_stack_level(),
|
780 |
+
)
|
781 |
+
return -1
|
782 |
+
else:
|
783 |
+
return delegate.argmin()
|
784 |
+
else:
|
785 |
+
result = nanops.nanargmin(delegate, skipna=skipna)
|
786 |
+
if result == -1:
|
787 |
+
warnings.warn(
|
788 |
+
f"The behavior of {type(self).__name__}.argmax/argmin "
|
789 |
+
"with skipna=False and NAs, or with all-NAs is deprecated. "
|
790 |
+
"In a future version this will raise ValueError.",
|
791 |
+
FutureWarning,
|
792 |
+
stacklevel=find_stack_level(),
|
793 |
+
)
|
794 |
+
# error: Incompatible return value type (got "Union[int, ndarray]", expected
|
795 |
+
# "int")
|
796 |
+
return result # type: ignore[return-value]
|
797 |
+
|
798 |
+
def tolist(self):
|
799 |
+
"""
|
800 |
+
Return a list of the values.
|
801 |
+
|
802 |
+
These are each a scalar type, which is a Python scalar
|
803 |
+
(for str, int, float) or a pandas scalar
|
804 |
+
(for Timestamp/Timedelta/Interval/Period)
|
805 |
+
|
806 |
+
Returns
|
807 |
+
-------
|
808 |
+
list
|
809 |
+
|
810 |
+
See Also
|
811 |
+
--------
|
812 |
+
numpy.ndarray.tolist : Return the array as an a.ndim-levels deep
|
813 |
+
nested list of Python scalars.
|
814 |
+
|
815 |
+
Examples
|
816 |
+
--------
|
817 |
+
For Series
|
818 |
+
|
819 |
+
>>> s = pd.Series([1, 2, 3])
|
820 |
+
>>> s.to_list()
|
821 |
+
[1, 2, 3]
|
822 |
+
|
823 |
+
For Index:
|
824 |
+
|
825 |
+
>>> idx = pd.Index([1, 2, 3])
|
826 |
+
>>> idx
|
827 |
+
Index([1, 2, 3], dtype='int64')
|
828 |
+
|
829 |
+
>>> idx.to_list()
|
830 |
+
[1, 2, 3]
|
831 |
+
"""
|
832 |
+
return self._values.tolist()
|
833 |
+
|
834 |
+
to_list = tolist
|
835 |
+
|
836 |
+
def __iter__(self) -> Iterator:
|
837 |
+
"""
|
838 |
+
Return an iterator of the values.
|
839 |
+
|
840 |
+
These are each a scalar type, which is a Python scalar
|
841 |
+
(for str, int, float) or a pandas scalar
|
842 |
+
(for Timestamp/Timedelta/Interval/Period)
|
843 |
+
|
844 |
+
Returns
|
845 |
+
-------
|
846 |
+
iterator
|
847 |
+
|
848 |
+
Examples
|
849 |
+
--------
|
850 |
+
>>> s = pd.Series([1, 2, 3])
|
851 |
+
>>> for x in s:
|
852 |
+
... print(x)
|
853 |
+
1
|
854 |
+
2
|
855 |
+
3
|
856 |
+
"""
|
857 |
+
# We are explicitly making element iterators.
|
858 |
+
if not isinstance(self._values, np.ndarray):
|
859 |
+
# Check type instead of dtype to catch DTA/TDA
|
860 |
+
return iter(self._values)
|
861 |
+
else:
|
862 |
+
return map(self._values.item, range(self._values.size))
|
863 |
+
|
864 |
+
@cache_readonly
|
865 |
+
def hasnans(self) -> bool:
|
866 |
+
"""
|
867 |
+
Return True if there are any NaNs.
|
868 |
+
|
869 |
+
Enables various performance speedups.
|
870 |
+
|
871 |
+
Returns
|
872 |
+
-------
|
873 |
+
bool
|
874 |
+
|
875 |
+
Examples
|
876 |
+
--------
|
877 |
+
>>> s = pd.Series([1, 2, 3, None])
|
878 |
+
>>> s
|
879 |
+
0 1.0
|
880 |
+
1 2.0
|
881 |
+
2 3.0
|
882 |
+
3 NaN
|
883 |
+
dtype: float64
|
884 |
+
>>> s.hasnans
|
885 |
+
True
|
886 |
+
"""
|
887 |
+
# error: Item "bool" of "Union[bool, ndarray[Any, dtype[bool_]], NDFrame]"
|
888 |
+
# has no attribute "any"
|
889 |
+
return bool(isna(self).any()) # type: ignore[union-attr]
|
890 |
+
|
891 |
+
@final
|
892 |
+
def _map_values(self, mapper, na_action=None, convert: bool = True):
|
893 |
+
"""
|
894 |
+
An internal function that maps values using the input
|
895 |
+
correspondence (which can be a dict, Series, or function).
|
896 |
+
|
897 |
+
Parameters
|
898 |
+
----------
|
899 |
+
mapper : function, dict, or Series
|
900 |
+
The input correspondence object
|
901 |
+
na_action : {None, 'ignore'}
|
902 |
+
If 'ignore', propagate NA values, without passing them to the
|
903 |
+
mapping function
|
904 |
+
convert : bool, default True
|
905 |
+
Try to find better dtype for elementwise function results. If
|
906 |
+
False, leave as dtype=object. Note that the dtype is always
|
907 |
+
preserved for some extension array dtypes, such as Categorical.
|
908 |
+
|
909 |
+
Returns
|
910 |
+
-------
|
911 |
+
Union[Index, MultiIndex], inferred
|
912 |
+
The output of the mapping function applied to the index.
|
913 |
+
If the function returns a tuple with more than one element
|
914 |
+
a MultiIndex will be returned.
|
915 |
+
"""
|
916 |
+
arr = self._values
|
917 |
+
|
918 |
+
if isinstance(arr, ExtensionArray):
|
919 |
+
return arr.map(mapper, na_action=na_action)
|
920 |
+
|
921 |
+
return algorithms.map_array(arr, mapper, na_action=na_action, convert=convert)
|
922 |
+
|
923 |
+
@final
|
924 |
+
def value_counts(
|
925 |
+
self,
|
926 |
+
normalize: bool = False,
|
927 |
+
sort: bool = True,
|
928 |
+
ascending: bool = False,
|
929 |
+
bins=None,
|
930 |
+
dropna: bool = True,
|
931 |
+
) -> Series:
|
932 |
+
"""
|
933 |
+
Return a Series containing counts of unique values.
|
934 |
+
|
935 |
+
The resulting object will be in descending order so that the
|
936 |
+
first element is the most frequently-occurring element.
|
937 |
+
Excludes NA values by default.
|
938 |
+
|
939 |
+
Parameters
|
940 |
+
----------
|
941 |
+
normalize : bool, default False
|
942 |
+
If True then the object returned will contain the relative
|
943 |
+
frequencies of the unique values.
|
944 |
+
sort : bool, default True
|
945 |
+
Sort by frequencies when True. Preserve the order of the data when False.
|
946 |
+
ascending : bool, default False
|
947 |
+
Sort in ascending order.
|
948 |
+
bins : int, optional
|
949 |
+
Rather than count values, group them into half-open bins,
|
950 |
+
a convenience for ``pd.cut``, only works with numeric data.
|
951 |
+
dropna : bool, default True
|
952 |
+
Don't include counts of NaN.
|
953 |
+
|
954 |
+
Returns
|
955 |
+
-------
|
956 |
+
Series
|
957 |
+
|
958 |
+
See Also
|
959 |
+
--------
|
960 |
+
Series.count: Number of non-NA elements in a Series.
|
961 |
+
DataFrame.count: Number of non-NA elements in a DataFrame.
|
962 |
+
DataFrame.value_counts: Equivalent method on DataFrames.
|
963 |
+
|
964 |
+
Examples
|
965 |
+
--------
|
966 |
+
>>> index = pd.Index([3, 1, 2, 3, 4, np.nan])
|
967 |
+
>>> index.value_counts()
|
968 |
+
3.0 2
|
969 |
+
1.0 1
|
970 |
+
2.0 1
|
971 |
+
4.0 1
|
972 |
+
Name: count, dtype: int64
|
973 |
+
|
974 |
+
With `normalize` set to `True`, returns the relative frequency by
|
975 |
+
dividing all values by the sum of values.
|
976 |
+
|
977 |
+
>>> s = pd.Series([3, 1, 2, 3, 4, np.nan])
|
978 |
+
>>> s.value_counts(normalize=True)
|
979 |
+
3.0 0.4
|
980 |
+
1.0 0.2
|
981 |
+
2.0 0.2
|
982 |
+
4.0 0.2
|
983 |
+
Name: proportion, dtype: float64
|
984 |
+
|
985 |
+
**bins**
|
986 |
+
|
987 |
+
Bins can be useful for going from a continuous variable to a
|
988 |
+
categorical variable; instead of counting unique
|
989 |
+
apparitions of values, divide the index in the specified
|
990 |
+
number of half-open bins.
|
991 |
+
|
992 |
+
>>> s.value_counts(bins=3)
|
993 |
+
(0.996, 2.0] 2
|
994 |
+
(2.0, 3.0] 2
|
995 |
+
(3.0, 4.0] 1
|
996 |
+
Name: count, dtype: int64
|
997 |
+
|
998 |
+
**dropna**
|
999 |
+
|
1000 |
+
With `dropna` set to `False` we can also see NaN index values.
|
1001 |
+
|
1002 |
+
>>> s.value_counts(dropna=False)
|
1003 |
+
3.0 2
|
1004 |
+
1.0 1
|
1005 |
+
2.0 1
|
1006 |
+
4.0 1
|
1007 |
+
NaN 1
|
1008 |
+
Name: count, dtype: int64
|
1009 |
+
"""
|
1010 |
+
return algorithms.value_counts_internal(
|
1011 |
+
self,
|
1012 |
+
sort=sort,
|
1013 |
+
ascending=ascending,
|
1014 |
+
normalize=normalize,
|
1015 |
+
bins=bins,
|
1016 |
+
dropna=dropna,
|
1017 |
+
)
|
1018 |
+
|
1019 |
+
def unique(self):
|
1020 |
+
values = self._values
|
1021 |
+
if not isinstance(values, np.ndarray):
|
1022 |
+
# i.e. ExtensionArray
|
1023 |
+
result = values.unique()
|
1024 |
+
else:
|
1025 |
+
result = algorithms.unique1d(values)
|
1026 |
+
return result
|
1027 |
+
|
1028 |
+
@final
|
1029 |
+
def nunique(self, dropna: bool = True) -> int:
|
1030 |
+
"""
|
1031 |
+
Return number of unique elements in the object.
|
1032 |
+
|
1033 |
+
Excludes NA values by default.
|
1034 |
+
|
1035 |
+
Parameters
|
1036 |
+
----------
|
1037 |
+
dropna : bool, default True
|
1038 |
+
Don't include NaN in the count.
|
1039 |
+
|
1040 |
+
Returns
|
1041 |
+
-------
|
1042 |
+
int
|
1043 |
+
|
1044 |
+
See Also
|
1045 |
+
--------
|
1046 |
+
DataFrame.nunique: Method nunique for DataFrame.
|
1047 |
+
Series.count: Count non-NA/null observations in the Series.
|
1048 |
+
|
1049 |
+
Examples
|
1050 |
+
--------
|
1051 |
+
>>> s = pd.Series([1, 3, 5, 7, 7])
|
1052 |
+
>>> s
|
1053 |
+
0 1
|
1054 |
+
1 3
|
1055 |
+
2 5
|
1056 |
+
3 7
|
1057 |
+
4 7
|
1058 |
+
dtype: int64
|
1059 |
+
|
1060 |
+
>>> s.nunique()
|
1061 |
+
4
|
1062 |
+
"""
|
1063 |
+
uniqs = self.unique()
|
1064 |
+
if dropna:
|
1065 |
+
uniqs = remove_na_arraylike(uniqs)
|
1066 |
+
return len(uniqs)
|
1067 |
+
|
1068 |
+
@property
|
1069 |
+
def is_unique(self) -> bool:
|
1070 |
+
"""
|
1071 |
+
Return boolean if values in the object are unique.
|
1072 |
+
|
1073 |
+
Returns
|
1074 |
+
-------
|
1075 |
+
bool
|
1076 |
+
|
1077 |
+
Examples
|
1078 |
+
--------
|
1079 |
+
>>> s = pd.Series([1, 2, 3])
|
1080 |
+
>>> s.is_unique
|
1081 |
+
True
|
1082 |
+
|
1083 |
+
>>> s = pd.Series([1, 2, 3, 1])
|
1084 |
+
>>> s.is_unique
|
1085 |
+
False
|
1086 |
+
"""
|
1087 |
+
return self.nunique(dropna=False) == len(self)
|
1088 |
+
|
1089 |
+
@property
|
1090 |
+
def is_monotonic_increasing(self) -> bool:
|
1091 |
+
"""
|
1092 |
+
Return boolean if values in the object are monotonically increasing.
|
1093 |
+
|
1094 |
+
Returns
|
1095 |
+
-------
|
1096 |
+
bool
|
1097 |
+
|
1098 |
+
Examples
|
1099 |
+
--------
|
1100 |
+
>>> s = pd.Series([1, 2, 2])
|
1101 |
+
>>> s.is_monotonic_increasing
|
1102 |
+
True
|
1103 |
+
|
1104 |
+
>>> s = pd.Series([3, 2, 1])
|
1105 |
+
>>> s.is_monotonic_increasing
|
1106 |
+
False
|
1107 |
+
"""
|
1108 |
+
from pandas import Index
|
1109 |
+
|
1110 |
+
return Index(self).is_monotonic_increasing
|
1111 |
+
|
1112 |
+
@property
|
1113 |
+
def is_monotonic_decreasing(self) -> bool:
|
1114 |
+
"""
|
1115 |
+
Return boolean if values in the object are monotonically decreasing.
|
1116 |
+
|
1117 |
+
Returns
|
1118 |
+
-------
|
1119 |
+
bool
|
1120 |
+
|
1121 |
+
Examples
|
1122 |
+
--------
|
1123 |
+
>>> s = pd.Series([3, 2, 2, 1])
|
1124 |
+
>>> s.is_monotonic_decreasing
|
1125 |
+
True
|
1126 |
+
|
1127 |
+
>>> s = pd.Series([1, 2, 3])
|
1128 |
+
>>> s.is_monotonic_decreasing
|
1129 |
+
False
|
1130 |
+
"""
|
1131 |
+
from pandas import Index
|
1132 |
+
|
1133 |
+
return Index(self).is_monotonic_decreasing
|
1134 |
+
|
1135 |
+
@final
|
1136 |
+
def _memory_usage(self, deep: bool = False) -> int:
|
1137 |
+
"""
|
1138 |
+
Memory usage of the values.
|
1139 |
+
|
1140 |
+
Parameters
|
1141 |
+
----------
|
1142 |
+
deep : bool, default False
|
1143 |
+
Introspect the data deeply, interrogate
|
1144 |
+
`object` dtypes for system-level memory consumption.
|
1145 |
+
|
1146 |
+
Returns
|
1147 |
+
-------
|
1148 |
+
bytes used
|
1149 |
+
|
1150 |
+
See Also
|
1151 |
+
--------
|
1152 |
+
numpy.ndarray.nbytes : Total bytes consumed by the elements of the
|
1153 |
+
array.
|
1154 |
+
|
1155 |
+
Notes
|
1156 |
+
-----
|
1157 |
+
Memory usage does not include memory consumed by elements that
|
1158 |
+
are not components of the array if deep=False or if used on PyPy
|
1159 |
+
|
1160 |
+
Examples
|
1161 |
+
--------
|
1162 |
+
>>> idx = pd.Index([1, 2, 3])
|
1163 |
+
>>> idx.memory_usage()
|
1164 |
+
24
|
1165 |
+
"""
|
1166 |
+
if hasattr(self.array, "memory_usage"):
|
1167 |
+
return self.array.memory_usage( # pyright: ignore[reportGeneralTypeIssues]
|
1168 |
+
deep=deep,
|
1169 |
+
)
|
1170 |
+
|
1171 |
+
v = self.array.nbytes
|
1172 |
+
if deep and is_object_dtype(self.dtype) and not PYPY:
|
1173 |
+
values = cast(np.ndarray, self._values)
|
1174 |
+
v += lib.memory_usage_of_objects(values)
|
1175 |
+
return v
|
1176 |
+
|
1177 |
+
@doc(
|
1178 |
+
algorithms.factorize,
|
1179 |
+
values="",
|
1180 |
+
order="",
|
1181 |
+
size_hint="",
|
1182 |
+
sort=textwrap.dedent(
|
1183 |
+
"""\
|
1184 |
+
sort : bool, default False
|
1185 |
+
Sort `uniques` and shuffle `codes` to maintain the
|
1186 |
+
relationship.
|
1187 |
+
"""
|
1188 |
+
),
|
1189 |
+
)
|
1190 |
+
def factorize(
|
1191 |
+
self,
|
1192 |
+
sort: bool = False,
|
1193 |
+
use_na_sentinel: bool = True,
|
1194 |
+
) -> tuple[npt.NDArray[np.intp], Index]:
|
1195 |
+
codes, uniques = algorithms.factorize(
|
1196 |
+
self._values, sort=sort, use_na_sentinel=use_na_sentinel
|
1197 |
+
)
|
1198 |
+
if uniques.dtype == np.float16:
|
1199 |
+
uniques = uniques.astype(np.float32)
|
1200 |
+
|
1201 |
+
if isinstance(self, ABCIndex):
|
1202 |
+
# preserve e.g. MultiIndex
|
1203 |
+
uniques = self._constructor(uniques)
|
1204 |
+
else:
|
1205 |
+
from pandas import Index
|
1206 |
+
|
1207 |
+
uniques = Index(uniques)
|
1208 |
+
return codes, uniques
|
1209 |
+
|
1210 |
+
_shared_docs[
|
1211 |
+
"searchsorted"
|
1212 |
+
] = """
|
1213 |
+
Find indices where elements should be inserted to maintain order.
|
1214 |
+
|
1215 |
+
Find the indices into a sorted {klass} `self` such that, if the
|
1216 |
+
corresponding elements in `value` were inserted before the indices,
|
1217 |
+
the order of `self` would be preserved.
|
1218 |
+
|
1219 |
+
.. note::
|
1220 |
+
|
1221 |
+
The {klass} *must* be monotonically sorted, otherwise
|
1222 |
+
wrong locations will likely be returned. Pandas does *not*
|
1223 |
+
check this for you.
|
1224 |
+
|
1225 |
+
Parameters
|
1226 |
+
----------
|
1227 |
+
value : array-like or scalar
|
1228 |
+
Values to insert into `self`.
|
1229 |
+
side : {{'left', 'right'}}, optional
|
1230 |
+
If 'left', the index of the first suitable location found is given.
|
1231 |
+
If 'right', return the last such index. If there is no suitable
|
1232 |
+
index, return either 0 or N (where N is the length of `self`).
|
1233 |
+
sorter : 1-D array-like, optional
|
1234 |
+
Optional array of integer indices that sort `self` into ascending
|
1235 |
+
order. They are typically the result of ``np.argsort``.
|
1236 |
+
|
1237 |
+
Returns
|
1238 |
+
-------
|
1239 |
+
int or array of int
|
1240 |
+
A scalar or array of insertion points with the
|
1241 |
+
same shape as `value`.
|
1242 |
+
|
1243 |
+
See Also
|
1244 |
+
--------
|
1245 |
+
sort_values : Sort by the values along either axis.
|
1246 |
+
numpy.searchsorted : Similar method from NumPy.
|
1247 |
+
|
1248 |
+
Notes
|
1249 |
+
-----
|
1250 |
+
Binary search is used to find the required insertion points.
|
1251 |
+
|
1252 |
+
Examples
|
1253 |
+
--------
|
1254 |
+
>>> ser = pd.Series([1, 2, 3])
|
1255 |
+
>>> ser
|
1256 |
+
0 1
|
1257 |
+
1 2
|
1258 |
+
2 3
|
1259 |
+
dtype: int64
|
1260 |
+
|
1261 |
+
>>> ser.searchsorted(4)
|
1262 |
+
3
|
1263 |
+
|
1264 |
+
>>> ser.searchsorted([0, 4])
|
1265 |
+
array([0, 3])
|
1266 |
+
|
1267 |
+
>>> ser.searchsorted([1, 3], side='left')
|
1268 |
+
array([0, 2])
|
1269 |
+
|
1270 |
+
>>> ser.searchsorted([1, 3], side='right')
|
1271 |
+
array([1, 3])
|
1272 |
+
|
1273 |
+
>>> ser = pd.Series(pd.to_datetime(['3/11/2000', '3/12/2000', '3/13/2000']))
|
1274 |
+
>>> ser
|
1275 |
+
0 2000-03-11
|
1276 |
+
1 2000-03-12
|
1277 |
+
2 2000-03-13
|
1278 |
+
dtype: datetime64[ns]
|
1279 |
+
|
1280 |
+
>>> ser.searchsorted('3/14/2000')
|
1281 |
+
3
|
1282 |
+
|
1283 |
+
>>> ser = pd.Categorical(
|
1284 |
+
... ['apple', 'bread', 'bread', 'cheese', 'milk'], ordered=True
|
1285 |
+
... )
|
1286 |
+
>>> ser
|
1287 |
+
['apple', 'bread', 'bread', 'cheese', 'milk']
|
1288 |
+
Categories (4, object): ['apple' < 'bread' < 'cheese' < 'milk']
|
1289 |
+
|
1290 |
+
>>> ser.searchsorted('bread')
|
1291 |
+
1
|
1292 |
+
|
1293 |
+
>>> ser.searchsorted(['bread'], side='right')
|
1294 |
+
array([3])
|
1295 |
+
|
1296 |
+
If the values are not monotonically sorted, wrong locations
|
1297 |
+
may be returned:
|
1298 |
+
|
1299 |
+
>>> ser = pd.Series([2, 1, 3])
|
1300 |
+
>>> ser
|
1301 |
+
0 2
|
1302 |
+
1 1
|
1303 |
+
2 3
|
1304 |
+
dtype: int64
|
1305 |
+
|
1306 |
+
>>> ser.searchsorted(1) # doctest: +SKIP
|
1307 |
+
0 # wrong result, correct would be 1
|
1308 |
+
"""
|
1309 |
+
|
1310 |
+
# This overload is needed so that the call to searchsorted in
|
1311 |
+
# pandas.core.resample.TimeGrouper._get_period_bins picks the correct result
|
1312 |
+
|
1313 |
+
# error: Overloaded function signatures 1 and 2 overlap with incompatible
|
1314 |
+
# return types
|
1315 |
+
@overload
|
1316 |
+
def searchsorted( # type: ignore[overload-overlap]
|
1317 |
+
self,
|
1318 |
+
value: ScalarLike_co,
|
1319 |
+
side: Literal["left", "right"] = ...,
|
1320 |
+
sorter: NumpySorter = ...,
|
1321 |
+
) -> np.intp:
|
1322 |
+
...
|
1323 |
+
|
1324 |
+
@overload
|
1325 |
+
def searchsorted(
|
1326 |
+
self,
|
1327 |
+
value: npt.ArrayLike | ExtensionArray,
|
1328 |
+
side: Literal["left", "right"] = ...,
|
1329 |
+
sorter: NumpySorter = ...,
|
1330 |
+
) -> npt.NDArray[np.intp]:
|
1331 |
+
...
|
1332 |
+
|
1333 |
+
@doc(_shared_docs["searchsorted"], klass="Index")
|
1334 |
+
def searchsorted(
|
1335 |
+
self,
|
1336 |
+
value: NumpyValueArrayLike | ExtensionArray,
|
1337 |
+
side: Literal["left", "right"] = "left",
|
1338 |
+
sorter: NumpySorter | None = None,
|
1339 |
+
) -> npt.NDArray[np.intp] | np.intp:
|
1340 |
+
if isinstance(value, ABCDataFrame):
|
1341 |
+
msg = (
|
1342 |
+
"Value must be 1-D array-like or scalar, "
|
1343 |
+
f"{type(value).__name__} is not supported"
|
1344 |
+
)
|
1345 |
+
raise ValueError(msg)
|
1346 |
+
|
1347 |
+
values = self._values
|
1348 |
+
if not isinstance(values, np.ndarray):
|
1349 |
+
# Going through EA.searchsorted directly improves performance GH#38083
|
1350 |
+
return values.searchsorted(value, side=side, sorter=sorter)
|
1351 |
+
|
1352 |
+
return algorithms.searchsorted(
|
1353 |
+
values,
|
1354 |
+
value,
|
1355 |
+
side=side,
|
1356 |
+
sorter=sorter,
|
1357 |
+
)
|
1358 |
+
|
1359 |
+
def drop_duplicates(self, *, keep: DropKeep = "first"):
|
1360 |
+
duplicated = self._duplicated(keep=keep)
|
1361 |
+
# error: Value of type "IndexOpsMixin" is not indexable
|
1362 |
+
return self[~duplicated] # type: ignore[index]
|
1363 |
+
|
1364 |
+
@final
|
1365 |
+
def _duplicated(self, keep: DropKeep = "first") -> npt.NDArray[np.bool_]:
|
1366 |
+
arr = self._values
|
1367 |
+
if isinstance(arr, ExtensionArray):
|
1368 |
+
return arr.duplicated(keep=keep)
|
1369 |
+
return algorithms.duplicated(arr, keep=keep)
|
1370 |
+
|
1371 |
+
def _arith_method(self, other, op):
|
1372 |
+
res_name = ops.get_op_result_name(self, other)
|
1373 |
+
|
1374 |
+
lvalues = self._values
|
1375 |
+
rvalues = extract_array(other, extract_numpy=True, extract_range=True)
|
1376 |
+
rvalues = ops.maybe_prepare_scalar_for_op(rvalues, lvalues.shape)
|
1377 |
+
rvalues = ensure_wrapped_if_datetimelike(rvalues)
|
1378 |
+
if isinstance(rvalues, range):
|
1379 |
+
rvalues = np.arange(rvalues.start, rvalues.stop, rvalues.step)
|
1380 |
+
|
1381 |
+
with np.errstate(all="ignore"):
|
1382 |
+
result = ops.arithmetic_op(lvalues, rvalues, op)
|
1383 |
+
|
1384 |
+
return self._construct_result(result, name=res_name)
|
1385 |
+
|
1386 |
+
def _construct_result(self, result, name):
|
1387 |
+
"""
|
1388 |
+
Construct an appropriately-wrapped result from the ArrayLike result
|
1389 |
+
of an arithmetic-like operation.
|
1390 |
+
"""
|
1391 |
+
raise AbstractMethodError(self)
|
env-llmeval/lib/python3.10/site-packages/pandas/core/common.py
ADDED
@@ -0,0 +1,657 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
"""
|
2 |
+
Misc tools for implementing data structures
|
3 |
+
|
4 |
+
Note: pandas.core.common is *not* part of the public API.
|
5 |
+
"""
|
6 |
+
from __future__ import annotations
|
7 |
+
|
8 |
+
import builtins
|
9 |
+
from collections import (
|
10 |
+
abc,
|
11 |
+
defaultdict,
|
12 |
+
)
|
13 |
+
from collections.abc import (
|
14 |
+
Collection,
|
15 |
+
Generator,
|
16 |
+
Hashable,
|
17 |
+
Iterable,
|
18 |
+
Sequence,
|
19 |
+
)
|
20 |
+
import contextlib
|
21 |
+
from functools import partial
|
22 |
+
import inspect
|
23 |
+
from typing import (
|
24 |
+
TYPE_CHECKING,
|
25 |
+
Any,
|
26 |
+
Callable,
|
27 |
+
cast,
|
28 |
+
overload,
|
29 |
+
)
|
30 |
+
import warnings
|
31 |
+
|
32 |
+
import numpy as np
|
33 |
+
|
34 |
+
from pandas._libs import lib
|
35 |
+
from pandas.compat.numpy import np_version_gte1p24
|
36 |
+
|
37 |
+
from pandas.core.dtypes.cast import construct_1d_object_array_from_listlike
|
38 |
+
from pandas.core.dtypes.common import (
|
39 |
+
is_bool_dtype,
|
40 |
+
is_integer,
|
41 |
+
)
|
42 |
+
from pandas.core.dtypes.generic import (
|
43 |
+
ABCExtensionArray,
|
44 |
+
ABCIndex,
|
45 |
+
ABCMultiIndex,
|
46 |
+
ABCSeries,
|
47 |
+
)
|
48 |
+
from pandas.core.dtypes.inference import iterable_not_string
|
49 |
+
|
50 |
+
if TYPE_CHECKING:
|
51 |
+
from pandas._typing import (
|
52 |
+
AnyArrayLike,
|
53 |
+
ArrayLike,
|
54 |
+
NpDtype,
|
55 |
+
RandomState,
|
56 |
+
T,
|
57 |
+
)
|
58 |
+
|
59 |
+
from pandas import Index
|
60 |
+
|
61 |
+
|
62 |
+
def flatten(line):
|
63 |
+
"""
|
64 |
+
Flatten an arbitrarily nested sequence.
|
65 |
+
|
66 |
+
Parameters
|
67 |
+
----------
|
68 |
+
line : sequence
|
69 |
+
The non string sequence to flatten
|
70 |
+
|
71 |
+
Notes
|
72 |
+
-----
|
73 |
+
This doesn't consider strings sequences.
|
74 |
+
|
75 |
+
Returns
|
76 |
+
-------
|
77 |
+
flattened : generator
|
78 |
+
"""
|
79 |
+
for element in line:
|
80 |
+
if iterable_not_string(element):
|
81 |
+
yield from flatten(element)
|
82 |
+
else:
|
83 |
+
yield element
|
84 |
+
|
85 |
+
|
86 |
+
def consensus_name_attr(objs):
|
87 |
+
name = objs[0].name
|
88 |
+
for obj in objs[1:]:
|
89 |
+
try:
|
90 |
+
if obj.name != name:
|
91 |
+
name = None
|
92 |
+
except ValueError:
|
93 |
+
name = None
|
94 |
+
return name
|
95 |
+
|
96 |
+
|
97 |
+
def is_bool_indexer(key: Any) -> bool:
|
98 |
+
"""
|
99 |
+
Check whether `key` is a valid boolean indexer.
|
100 |
+
|
101 |
+
Parameters
|
102 |
+
----------
|
103 |
+
key : Any
|
104 |
+
Only list-likes may be considered boolean indexers.
|
105 |
+
All other types are not considered a boolean indexer.
|
106 |
+
For array-like input, boolean ndarrays or ExtensionArrays
|
107 |
+
with ``_is_boolean`` set are considered boolean indexers.
|
108 |
+
|
109 |
+
Returns
|
110 |
+
-------
|
111 |
+
bool
|
112 |
+
Whether `key` is a valid boolean indexer.
|
113 |
+
|
114 |
+
Raises
|
115 |
+
------
|
116 |
+
ValueError
|
117 |
+
When the array is an object-dtype ndarray or ExtensionArray
|
118 |
+
and contains missing values.
|
119 |
+
|
120 |
+
See Also
|
121 |
+
--------
|
122 |
+
check_array_indexer : Check that `key` is a valid array to index,
|
123 |
+
and convert to an ndarray.
|
124 |
+
"""
|
125 |
+
if isinstance(
|
126 |
+
key, (ABCSeries, np.ndarray, ABCIndex, ABCExtensionArray)
|
127 |
+
) and not isinstance(key, ABCMultiIndex):
|
128 |
+
if key.dtype == np.object_:
|
129 |
+
key_array = np.asarray(key)
|
130 |
+
|
131 |
+
if not lib.is_bool_array(key_array):
|
132 |
+
na_msg = "Cannot mask with non-boolean array containing NA / NaN values"
|
133 |
+
if lib.is_bool_array(key_array, skipna=True):
|
134 |
+
# Don't raise on e.g. ["A", "B", np.nan], see
|
135 |
+
# test_loc_getitem_list_of_labels_categoricalindex_with_na
|
136 |
+
raise ValueError(na_msg)
|
137 |
+
return False
|
138 |
+
return True
|
139 |
+
elif is_bool_dtype(key.dtype):
|
140 |
+
return True
|
141 |
+
elif isinstance(key, list):
|
142 |
+
# check if np.array(key).dtype would be bool
|
143 |
+
if len(key) > 0:
|
144 |
+
if type(key) is not list: # noqa: E721
|
145 |
+
# GH#42461 cython will raise TypeError if we pass a subclass
|
146 |
+
key = list(key)
|
147 |
+
return lib.is_bool_list(key)
|
148 |
+
|
149 |
+
return False
|
150 |
+
|
151 |
+
|
152 |
+
def cast_scalar_indexer(val):
|
153 |
+
"""
|
154 |
+
Disallow indexing with a float key, even if that key is a round number.
|
155 |
+
|
156 |
+
Parameters
|
157 |
+
----------
|
158 |
+
val : scalar
|
159 |
+
|
160 |
+
Returns
|
161 |
+
-------
|
162 |
+
outval : scalar
|
163 |
+
"""
|
164 |
+
# assumes lib.is_scalar(val)
|
165 |
+
if lib.is_float(val) and val.is_integer():
|
166 |
+
raise IndexError(
|
167 |
+
# GH#34193
|
168 |
+
"Indexing with a float is no longer supported. Manually convert "
|
169 |
+
"to an integer key instead."
|
170 |
+
)
|
171 |
+
return val
|
172 |
+
|
173 |
+
|
174 |
+
def not_none(*args):
|
175 |
+
"""
|
176 |
+
Returns a generator consisting of the arguments that are not None.
|
177 |
+
"""
|
178 |
+
return (arg for arg in args if arg is not None)
|
179 |
+
|
180 |
+
|
181 |
+
def any_none(*args) -> bool:
|
182 |
+
"""
|
183 |
+
Returns a boolean indicating if any argument is None.
|
184 |
+
"""
|
185 |
+
return any(arg is None for arg in args)
|
186 |
+
|
187 |
+
|
188 |
+
def all_none(*args) -> bool:
|
189 |
+
"""
|
190 |
+
Returns a boolean indicating if all arguments are None.
|
191 |
+
"""
|
192 |
+
return all(arg is None for arg in args)
|
193 |
+
|
194 |
+
|
195 |
+
def any_not_none(*args) -> bool:
|
196 |
+
"""
|
197 |
+
Returns a boolean indicating if any argument is not None.
|
198 |
+
"""
|
199 |
+
return any(arg is not None for arg in args)
|
200 |
+
|
201 |
+
|
202 |
+
def all_not_none(*args) -> bool:
|
203 |
+
"""
|
204 |
+
Returns a boolean indicating if all arguments are not None.
|
205 |
+
"""
|
206 |
+
return all(arg is not None for arg in args)
|
207 |
+
|
208 |
+
|
209 |
+
def count_not_none(*args) -> int:
|
210 |
+
"""
|
211 |
+
Returns the count of arguments that are not None.
|
212 |
+
"""
|
213 |
+
return sum(x is not None for x in args)
|
214 |
+
|
215 |
+
|
216 |
+
@overload
|
217 |
+
def asarray_tuplesafe(
|
218 |
+
values: ArrayLike | list | tuple | zip, dtype: NpDtype | None = ...
|
219 |
+
) -> np.ndarray:
|
220 |
+
# ExtensionArray can only be returned when values is an Index, all other iterables
|
221 |
+
# will return np.ndarray. Unfortunately "all other" cannot be encoded in a type
|
222 |
+
# signature, so instead we special-case some common types.
|
223 |
+
...
|
224 |
+
|
225 |
+
|
226 |
+
@overload
|
227 |
+
def asarray_tuplesafe(values: Iterable, dtype: NpDtype | None = ...) -> ArrayLike:
|
228 |
+
...
|
229 |
+
|
230 |
+
|
231 |
+
def asarray_tuplesafe(values: Iterable, dtype: NpDtype | None = None) -> ArrayLike:
|
232 |
+
if not (isinstance(values, (list, tuple)) or hasattr(values, "__array__")):
|
233 |
+
values = list(values)
|
234 |
+
elif isinstance(values, ABCIndex):
|
235 |
+
return values._values
|
236 |
+
elif isinstance(values, ABCSeries):
|
237 |
+
return values._values
|
238 |
+
|
239 |
+
if isinstance(values, list) and dtype in [np.object_, object]:
|
240 |
+
return construct_1d_object_array_from_listlike(values)
|
241 |
+
|
242 |
+
try:
|
243 |
+
with warnings.catch_warnings():
|
244 |
+
# Can remove warning filter once NumPy 1.24 is min version
|
245 |
+
if not np_version_gte1p24:
|
246 |
+
warnings.simplefilter("ignore", np.VisibleDeprecationWarning)
|
247 |
+
result = np.asarray(values, dtype=dtype)
|
248 |
+
except ValueError:
|
249 |
+
# Using try/except since it's more performant than checking is_list_like
|
250 |
+
# over each element
|
251 |
+
# error: Argument 1 to "construct_1d_object_array_from_listlike"
|
252 |
+
# has incompatible type "Iterable[Any]"; expected "Sized"
|
253 |
+
return construct_1d_object_array_from_listlike(values) # type: ignore[arg-type]
|
254 |
+
|
255 |
+
if issubclass(result.dtype.type, str):
|
256 |
+
result = np.asarray(values, dtype=object)
|
257 |
+
|
258 |
+
if result.ndim == 2:
|
259 |
+
# Avoid building an array of arrays:
|
260 |
+
values = [tuple(x) for x in values]
|
261 |
+
result = construct_1d_object_array_from_listlike(values)
|
262 |
+
|
263 |
+
return result
|
264 |
+
|
265 |
+
|
266 |
+
def index_labels_to_array(
|
267 |
+
labels: np.ndarray | Iterable, dtype: NpDtype | None = None
|
268 |
+
) -> np.ndarray:
|
269 |
+
"""
|
270 |
+
Transform label or iterable of labels to array, for use in Index.
|
271 |
+
|
272 |
+
Parameters
|
273 |
+
----------
|
274 |
+
dtype : dtype
|
275 |
+
If specified, use as dtype of the resulting array, otherwise infer.
|
276 |
+
|
277 |
+
Returns
|
278 |
+
-------
|
279 |
+
array
|
280 |
+
"""
|
281 |
+
if isinstance(labels, (str, tuple)):
|
282 |
+
labels = [labels]
|
283 |
+
|
284 |
+
if not isinstance(labels, (list, np.ndarray)):
|
285 |
+
try:
|
286 |
+
labels = list(labels)
|
287 |
+
except TypeError: # non-iterable
|
288 |
+
labels = [labels]
|
289 |
+
|
290 |
+
labels = asarray_tuplesafe(labels, dtype=dtype)
|
291 |
+
|
292 |
+
return labels
|
293 |
+
|
294 |
+
|
295 |
+
def maybe_make_list(obj):
|
296 |
+
if obj is not None and not isinstance(obj, (tuple, list)):
|
297 |
+
return [obj]
|
298 |
+
return obj
|
299 |
+
|
300 |
+
|
301 |
+
def maybe_iterable_to_list(obj: Iterable[T] | T) -> Collection[T] | T:
|
302 |
+
"""
|
303 |
+
If obj is Iterable but not list-like, consume into list.
|
304 |
+
"""
|
305 |
+
if isinstance(obj, abc.Iterable) and not isinstance(obj, abc.Sized):
|
306 |
+
return list(obj)
|
307 |
+
obj = cast(Collection, obj)
|
308 |
+
return obj
|
309 |
+
|
310 |
+
|
311 |
+
def is_null_slice(obj) -> bool:
|
312 |
+
"""
|
313 |
+
We have a null slice.
|
314 |
+
"""
|
315 |
+
return (
|
316 |
+
isinstance(obj, slice)
|
317 |
+
and obj.start is None
|
318 |
+
and obj.stop is None
|
319 |
+
and obj.step is None
|
320 |
+
)
|
321 |
+
|
322 |
+
|
323 |
+
def is_empty_slice(obj) -> bool:
|
324 |
+
"""
|
325 |
+
We have an empty slice, e.g. no values are selected.
|
326 |
+
"""
|
327 |
+
return (
|
328 |
+
isinstance(obj, slice)
|
329 |
+
and obj.start is not None
|
330 |
+
and obj.stop is not None
|
331 |
+
and obj.start == obj.stop
|
332 |
+
)
|
333 |
+
|
334 |
+
|
335 |
+
def is_true_slices(line) -> list[bool]:
|
336 |
+
"""
|
337 |
+
Find non-trivial slices in "line": return a list of booleans with same length.
|
338 |
+
"""
|
339 |
+
return [isinstance(k, slice) and not is_null_slice(k) for k in line]
|
340 |
+
|
341 |
+
|
342 |
+
# TODO: used only once in indexing; belongs elsewhere?
|
343 |
+
def is_full_slice(obj, line: int) -> bool:
|
344 |
+
"""
|
345 |
+
We have a full length slice.
|
346 |
+
"""
|
347 |
+
return (
|
348 |
+
isinstance(obj, slice)
|
349 |
+
and obj.start == 0
|
350 |
+
and obj.stop == line
|
351 |
+
and obj.step is None
|
352 |
+
)
|
353 |
+
|
354 |
+
|
355 |
+
def get_callable_name(obj):
|
356 |
+
# typical case has name
|
357 |
+
if hasattr(obj, "__name__"):
|
358 |
+
return getattr(obj, "__name__")
|
359 |
+
# some objects don't; could recurse
|
360 |
+
if isinstance(obj, partial):
|
361 |
+
return get_callable_name(obj.func)
|
362 |
+
# fall back to class name
|
363 |
+
if callable(obj):
|
364 |
+
return type(obj).__name__
|
365 |
+
# everything failed (probably because the argument
|
366 |
+
# wasn't actually callable); we return None
|
367 |
+
# instead of the empty string in this case to allow
|
368 |
+
# distinguishing between no name and a name of ''
|
369 |
+
return None
|
370 |
+
|
371 |
+
|
372 |
+
def apply_if_callable(maybe_callable, obj, **kwargs):
|
373 |
+
"""
|
374 |
+
Evaluate possibly callable input using obj and kwargs if it is callable,
|
375 |
+
otherwise return as it is.
|
376 |
+
|
377 |
+
Parameters
|
378 |
+
----------
|
379 |
+
maybe_callable : possibly a callable
|
380 |
+
obj : NDFrame
|
381 |
+
**kwargs
|
382 |
+
"""
|
383 |
+
if callable(maybe_callable):
|
384 |
+
return maybe_callable(obj, **kwargs)
|
385 |
+
|
386 |
+
return maybe_callable
|
387 |
+
|
388 |
+
|
389 |
+
def standardize_mapping(into):
|
390 |
+
"""
|
391 |
+
Helper function to standardize a supplied mapping.
|
392 |
+
|
393 |
+
Parameters
|
394 |
+
----------
|
395 |
+
into : instance or subclass of collections.abc.Mapping
|
396 |
+
Must be a class, an initialized collections.defaultdict,
|
397 |
+
or an instance of a collections.abc.Mapping subclass.
|
398 |
+
|
399 |
+
Returns
|
400 |
+
-------
|
401 |
+
mapping : a collections.abc.Mapping subclass or other constructor
|
402 |
+
a callable object that can accept an iterator to create
|
403 |
+
the desired Mapping.
|
404 |
+
|
405 |
+
See Also
|
406 |
+
--------
|
407 |
+
DataFrame.to_dict
|
408 |
+
Series.to_dict
|
409 |
+
"""
|
410 |
+
if not inspect.isclass(into):
|
411 |
+
if isinstance(into, defaultdict):
|
412 |
+
return partial(defaultdict, into.default_factory)
|
413 |
+
into = type(into)
|
414 |
+
if not issubclass(into, abc.Mapping):
|
415 |
+
raise TypeError(f"unsupported type: {into}")
|
416 |
+
if into == defaultdict:
|
417 |
+
raise TypeError("to_dict() only accepts initialized defaultdicts")
|
418 |
+
return into
|
419 |
+
|
420 |
+
|
421 |
+
@overload
|
422 |
+
def random_state(state: np.random.Generator) -> np.random.Generator:
|
423 |
+
...
|
424 |
+
|
425 |
+
|
426 |
+
@overload
|
427 |
+
def random_state(
|
428 |
+
state: int | np.ndarray | np.random.BitGenerator | np.random.RandomState | None,
|
429 |
+
) -> np.random.RandomState:
|
430 |
+
...
|
431 |
+
|
432 |
+
|
433 |
+
def random_state(state: RandomState | None = None):
|
434 |
+
"""
|
435 |
+
Helper function for processing random_state arguments.
|
436 |
+
|
437 |
+
Parameters
|
438 |
+
----------
|
439 |
+
state : int, array-like, BitGenerator, Generator, np.random.RandomState, None.
|
440 |
+
If receives an int, array-like, or BitGenerator, passes to
|
441 |
+
np.random.RandomState() as seed.
|
442 |
+
If receives an np.random RandomState or Generator, just returns that unchanged.
|
443 |
+
If receives `None`, returns np.random.
|
444 |
+
If receives anything else, raises an informative ValueError.
|
445 |
+
|
446 |
+
Default None.
|
447 |
+
|
448 |
+
Returns
|
449 |
+
-------
|
450 |
+
np.random.RandomState or np.random.Generator. If state is None, returns np.random
|
451 |
+
|
452 |
+
"""
|
453 |
+
if is_integer(state) or isinstance(state, (np.ndarray, np.random.BitGenerator)):
|
454 |
+
return np.random.RandomState(state)
|
455 |
+
elif isinstance(state, np.random.RandomState):
|
456 |
+
return state
|
457 |
+
elif isinstance(state, np.random.Generator):
|
458 |
+
return state
|
459 |
+
elif state is None:
|
460 |
+
return np.random
|
461 |
+
else:
|
462 |
+
raise ValueError(
|
463 |
+
"random_state must be an integer, array-like, a BitGenerator, Generator, "
|
464 |
+
"a numpy RandomState, or None"
|
465 |
+
)
|
466 |
+
|
467 |
+
|
468 |
+
def pipe(
|
469 |
+
obj, func: Callable[..., T] | tuple[Callable[..., T], str], *args, **kwargs
|
470 |
+
) -> T:
|
471 |
+
"""
|
472 |
+
Apply a function ``func`` to object ``obj`` either by passing obj as the
|
473 |
+
first argument to the function or, in the case that the func is a tuple,
|
474 |
+
interpret the first element of the tuple as a function and pass the obj to
|
475 |
+
that function as a keyword argument whose key is the value of the second
|
476 |
+
element of the tuple.
|
477 |
+
|
478 |
+
Parameters
|
479 |
+
----------
|
480 |
+
func : callable or tuple of (callable, str)
|
481 |
+
Function to apply to this object or, alternatively, a
|
482 |
+
``(callable, data_keyword)`` tuple where ``data_keyword`` is a
|
483 |
+
string indicating the keyword of ``callable`` that expects the
|
484 |
+
object.
|
485 |
+
*args : iterable, optional
|
486 |
+
Positional arguments passed into ``func``.
|
487 |
+
**kwargs : dict, optional
|
488 |
+
A dictionary of keyword arguments passed into ``func``.
|
489 |
+
|
490 |
+
Returns
|
491 |
+
-------
|
492 |
+
object : the return type of ``func``.
|
493 |
+
"""
|
494 |
+
if isinstance(func, tuple):
|
495 |
+
func, target = func
|
496 |
+
if target in kwargs:
|
497 |
+
msg = f"{target} is both the pipe target and a keyword argument"
|
498 |
+
raise ValueError(msg)
|
499 |
+
kwargs[target] = obj
|
500 |
+
return func(*args, **kwargs)
|
501 |
+
else:
|
502 |
+
return func(obj, *args, **kwargs)
|
503 |
+
|
504 |
+
|
505 |
+
def get_rename_function(mapper):
|
506 |
+
"""
|
507 |
+
Returns a function that will map names/labels, dependent if mapper
|
508 |
+
is a dict, Series or just a function.
|
509 |
+
"""
|
510 |
+
|
511 |
+
def f(x):
|
512 |
+
if x in mapper:
|
513 |
+
return mapper[x]
|
514 |
+
else:
|
515 |
+
return x
|
516 |
+
|
517 |
+
return f if isinstance(mapper, (abc.Mapping, ABCSeries)) else mapper
|
518 |
+
|
519 |
+
|
520 |
+
def convert_to_list_like(
|
521 |
+
values: Hashable | Iterable | AnyArrayLike,
|
522 |
+
) -> list | AnyArrayLike:
|
523 |
+
"""
|
524 |
+
Convert list-like or scalar input to list-like. List, numpy and pandas array-like
|
525 |
+
inputs are returned unmodified whereas others are converted to list.
|
526 |
+
"""
|
527 |
+
if isinstance(values, (list, np.ndarray, ABCIndex, ABCSeries, ABCExtensionArray)):
|
528 |
+
return values
|
529 |
+
elif isinstance(values, abc.Iterable) and not isinstance(values, str):
|
530 |
+
return list(values)
|
531 |
+
|
532 |
+
return [values]
|
533 |
+
|
534 |
+
|
535 |
+
@contextlib.contextmanager
|
536 |
+
def temp_setattr(
|
537 |
+
obj, attr: str, value, condition: bool = True
|
538 |
+
) -> Generator[None, None, None]:
|
539 |
+
"""
|
540 |
+
Temporarily set attribute on an object.
|
541 |
+
|
542 |
+
Parameters
|
543 |
+
----------
|
544 |
+
obj : object
|
545 |
+
Object whose attribute will be modified.
|
546 |
+
attr : str
|
547 |
+
Attribute to modify.
|
548 |
+
value : Any
|
549 |
+
Value to temporarily set attribute to.
|
550 |
+
condition : bool, default True
|
551 |
+
Whether to set the attribute. Provided in order to not have to
|
552 |
+
conditionally use this context manager.
|
553 |
+
|
554 |
+
Yields
|
555 |
+
------
|
556 |
+
object : obj with modified attribute.
|
557 |
+
"""
|
558 |
+
if condition:
|
559 |
+
old_value = getattr(obj, attr)
|
560 |
+
setattr(obj, attr, value)
|
561 |
+
try:
|
562 |
+
yield obj
|
563 |
+
finally:
|
564 |
+
if condition:
|
565 |
+
setattr(obj, attr, old_value)
|
566 |
+
|
567 |
+
|
568 |
+
def require_length_match(data, index: Index) -> None:
|
569 |
+
"""
|
570 |
+
Check the length of data matches the length of the index.
|
571 |
+
"""
|
572 |
+
if len(data) != len(index):
|
573 |
+
raise ValueError(
|
574 |
+
"Length of values "
|
575 |
+
f"({len(data)}) "
|
576 |
+
"does not match length of index "
|
577 |
+
f"({len(index)})"
|
578 |
+
)
|
579 |
+
|
580 |
+
|
581 |
+
# the ufuncs np.maximum.reduce and np.minimum.reduce default to axis=0,
|
582 |
+
# whereas np.min and np.max (which directly call obj.min and obj.max)
|
583 |
+
# default to axis=None.
|
584 |
+
_builtin_table = {
|
585 |
+
builtins.sum: np.sum,
|
586 |
+
builtins.max: np.maximum.reduce,
|
587 |
+
builtins.min: np.minimum.reduce,
|
588 |
+
}
|
589 |
+
|
590 |
+
# GH#53425: Only for deprecation
|
591 |
+
_builtin_table_alias = {
|
592 |
+
builtins.sum: "np.sum",
|
593 |
+
builtins.max: "np.maximum.reduce",
|
594 |
+
builtins.min: "np.minimum.reduce",
|
595 |
+
}
|
596 |
+
|
597 |
+
_cython_table = {
|
598 |
+
builtins.sum: "sum",
|
599 |
+
builtins.max: "max",
|
600 |
+
builtins.min: "min",
|
601 |
+
np.all: "all",
|
602 |
+
np.any: "any",
|
603 |
+
np.sum: "sum",
|
604 |
+
np.nansum: "sum",
|
605 |
+
np.mean: "mean",
|
606 |
+
np.nanmean: "mean",
|
607 |
+
np.prod: "prod",
|
608 |
+
np.nanprod: "prod",
|
609 |
+
np.std: "std",
|
610 |
+
np.nanstd: "std",
|
611 |
+
np.var: "var",
|
612 |
+
np.nanvar: "var",
|
613 |
+
np.median: "median",
|
614 |
+
np.nanmedian: "median",
|
615 |
+
np.max: "max",
|
616 |
+
np.nanmax: "max",
|
617 |
+
np.min: "min",
|
618 |
+
np.nanmin: "min",
|
619 |
+
np.cumprod: "cumprod",
|
620 |
+
np.nancumprod: "cumprod",
|
621 |
+
np.cumsum: "cumsum",
|
622 |
+
np.nancumsum: "cumsum",
|
623 |
+
}
|
624 |
+
|
625 |
+
|
626 |
+
def get_cython_func(arg: Callable) -> str | None:
|
627 |
+
"""
|
628 |
+
if we define an internal function for this argument, return it
|
629 |
+
"""
|
630 |
+
return _cython_table.get(arg)
|
631 |
+
|
632 |
+
|
633 |
+
def is_builtin_func(arg):
|
634 |
+
"""
|
635 |
+
if we define a builtin function for this argument, return it,
|
636 |
+
otherwise return the arg
|
637 |
+
"""
|
638 |
+
return _builtin_table.get(arg, arg)
|
639 |
+
|
640 |
+
|
641 |
+
def fill_missing_names(names: Sequence[Hashable | None]) -> list[Hashable]:
|
642 |
+
"""
|
643 |
+
If a name is missing then replace it by level_n, where n is the count
|
644 |
+
|
645 |
+
.. versionadded:: 1.4.0
|
646 |
+
|
647 |
+
Parameters
|
648 |
+
----------
|
649 |
+
names : list-like
|
650 |
+
list of column names or None values.
|
651 |
+
|
652 |
+
Returns
|
653 |
+
-------
|
654 |
+
list
|
655 |
+
list of column names with the None values replaced.
|
656 |
+
"""
|
657 |
+
return [f"level_{i}" if name is None else name for i, name in enumerate(names)]
|
env-llmeval/lib/python3.10/site-packages/pandas/core/config_init.py
ADDED
@@ -0,0 +1,924 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
"""
|
2 |
+
This module is imported from the pandas package __init__.py file
|
3 |
+
in order to ensure that the core.config options registered here will
|
4 |
+
be available as soon as the user loads the package. if register_option
|
5 |
+
is invoked inside specific modules, they will not be registered until that
|
6 |
+
module is imported, which may or may not be a problem.
|
7 |
+
|
8 |
+
If you need to make sure options are available even before a certain
|
9 |
+
module is imported, register them here rather than in the module.
|
10 |
+
|
11 |
+
"""
|
12 |
+
from __future__ import annotations
|
13 |
+
|
14 |
+
import os
|
15 |
+
from typing import Callable
|
16 |
+
|
17 |
+
import pandas._config.config as cf
|
18 |
+
from pandas._config.config import (
|
19 |
+
is_bool,
|
20 |
+
is_callable,
|
21 |
+
is_instance_factory,
|
22 |
+
is_int,
|
23 |
+
is_nonnegative_int,
|
24 |
+
is_one_of_factory,
|
25 |
+
is_str,
|
26 |
+
is_text,
|
27 |
+
)
|
28 |
+
|
29 |
+
# compute
|
30 |
+
|
31 |
+
use_bottleneck_doc = """
|
32 |
+
: bool
|
33 |
+
Use the bottleneck library to accelerate if it is installed,
|
34 |
+
the default is True
|
35 |
+
Valid values: False,True
|
36 |
+
"""
|
37 |
+
|
38 |
+
|
39 |
+
def use_bottleneck_cb(key) -> None:
|
40 |
+
from pandas.core import nanops
|
41 |
+
|
42 |
+
nanops.set_use_bottleneck(cf.get_option(key))
|
43 |
+
|
44 |
+
|
45 |
+
use_numexpr_doc = """
|
46 |
+
: bool
|
47 |
+
Use the numexpr library to accelerate computation if it is installed,
|
48 |
+
the default is True
|
49 |
+
Valid values: False,True
|
50 |
+
"""
|
51 |
+
|
52 |
+
|
53 |
+
def use_numexpr_cb(key) -> None:
|
54 |
+
from pandas.core.computation import expressions
|
55 |
+
|
56 |
+
expressions.set_use_numexpr(cf.get_option(key))
|
57 |
+
|
58 |
+
|
59 |
+
use_numba_doc = """
|
60 |
+
: bool
|
61 |
+
Use the numba engine option for select operations if it is installed,
|
62 |
+
the default is False
|
63 |
+
Valid values: False,True
|
64 |
+
"""
|
65 |
+
|
66 |
+
|
67 |
+
def use_numba_cb(key) -> None:
|
68 |
+
from pandas.core.util import numba_
|
69 |
+
|
70 |
+
numba_.set_use_numba(cf.get_option(key))
|
71 |
+
|
72 |
+
|
73 |
+
with cf.config_prefix("compute"):
|
74 |
+
cf.register_option(
|
75 |
+
"use_bottleneck",
|
76 |
+
True,
|
77 |
+
use_bottleneck_doc,
|
78 |
+
validator=is_bool,
|
79 |
+
cb=use_bottleneck_cb,
|
80 |
+
)
|
81 |
+
cf.register_option(
|
82 |
+
"use_numexpr", True, use_numexpr_doc, validator=is_bool, cb=use_numexpr_cb
|
83 |
+
)
|
84 |
+
cf.register_option(
|
85 |
+
"use_numba", False, use_numba_doc, validator=is_bool, cb=use_numba_cb
|
86 |
+
)
|
87 |
+
#
|
88 |
+
# options from the "display" namespace
|
89 |
+
|
90 |
+
pc_precision_doc = """
|
91 |
+
: int
|
92 |
+
Floating point output precision in terms of number of places after the
|
93 |
+
decimal, for regular formatting as well as scientific notation. Similar
|
94 |
+
to ``precision`` in :meth:`numpy.set_printoptions`.
|
95 |
+
"""
|
96 |
+
|
97 |
+
pc_colspace_doc = """
|
98 |
+
: int
|
99 |
+
Default space for DataFrame columns.
|
100 |
+
"""
|
101 |
+
|
102 |
+
pc_max_rows_doc = """
|
103 |
+
: int
|
104 |
+
If max_rows is exceeded, switch to truncate view. Depending on
|
105 |
+
`large_repr`, objects are either centrally truncated or printed as
|
106 |
+
a summary view. 'None' value means unlimited.
|
107 |
+
|
108 |
+
In case python/IPython is running in a terminal and `large_repr`
|
109 |
+
equals 'truncate' this can be set to 0 and pandas will auto-detect
|
110 |
+
the height of the terminal and print a truncated object which fits
|
111 |
+
the screen height. The IPython notebook, IPython qtconsole, or
|
112 |
+
IDLE do not run in a terminal and hence it is not possible to do
|
113 |
+
correct auto-detection.
|
114 |
+
"""
|
115 |
+
|
116 |
+
pc_min_rows_doc = """
|
117 |
+
: int
|
118 |
+
The numbers of rows to show in a truncated view (when `max_rows` is
|
119 |
+
exceeded). Ignored when `max_rows` is set to None or 0. When set to
|
120 |
+
None, follows the value of `max_rows`.
|
121 |
+
"""
|
122 |
+
|
123 |
+
pc_max_cols_doc = """
|
124 |
+
: int
|
125 |
+
If max_cols is exceeded, switch to truncate view. Depending on
|
126 |
+
`large_repr`, objects are either centrally truncated or printed as
|
127 |
+
a summary view. 'None' value means unlimited.
|
128 |
+
|
129 |
+
In case python/IPython is running in a terminal and `large_repr`
|
130 |
+
equals 'truncate' this can be set to 0 or None and pandas will auto-detect
|
131 |
+
the width of the terminal and print a truncated object which fits
|
132 |
+
the screen width. The IPython notebook, IPython qtconsole, or IDLE
|
133 |
+
do not run in a terminal and hence it is not possible to do
|
134 |
+
correct auto-detection and defaults to 20.
|
135 |
+
"""
|
136 |
+
|
137 |
+
pc_max_categories_doc = """
|
138 |
+
: int
|
139 |
+
This sets the maximum number of categories pandas should output when
|
140 |
+
printing out a `Categorical` or a Series of dtype "category".
|
141 |
+
"""
|
142 |
+
|
143 |
+
pc_max_info_cols_doc = """
|
144 |
+
: int
|
145 |
+
max_info_columns is used in DataFrame.info method to decide if
|
146 |
+
per column information will be printed.
|
147 |
+
"""
|
148 |
+
|
149 |
+
pc_nb_repr_h_doc = """
|
150 |
+
: boolean
|
151 |
+
When True, IPython notebook will use html representation for
|
152 |
+
pandas objects (if it is available).
|
153 |
+
"""
|
154 |
+
|
155 |
+
pc_pprint_nest_depth = """
|
156 |
+
: int
|
157 |
+
Controls the number of nested levels to process when pretty-printing
|
158 |
+
"""
|
159 |
+
|
160 |
+
pc_multi_sparse_doc = """
|
161 |
+
: boolean
|
162 |
+
"sparsify" MultiIndex display (don't display repeated
|
163 |
+
elements in outer levels within groups)
|
164 |
+
"""
|
165 |
+
|
166 |
+
float_format_doc = """
|
167 |
+
: callable
|
168 |
+
The callable should accept a floating point number and return
|
169 |
+
a string with the desired format of the number. This is used
|
170 |
+
in some places like SeriesFormatter.
|
171 |
+
See formats.format.EngFormatter for an example.
|
172 |
+
"""
|
173 |
+
|
174 |
+
max_colwidth_doc = """
|
175 |
+
: int or None
|
176 |
+
The maximum width in characters of a column in the repr of
|
177 |
+
a pandas data structure. When the column overflows, a "..."
|
178 |
+
placeholder is embedded in the output. A 'None' value means unlimited.
|
179 |
+
"""
|
180 |
+
|
181 |
+
colheader_justify_doc = """
|
182 |
+
: 'left'/'right'
|
183 |
+
Controls the justification of column headers. used by DataFrameFormatter.
|
184 |
+
"""
|
185 |
+
|
186 |
+
pc_expand_repr_doc = """
|
187 |
+
: boolean
|
188 |
+
Whether to print out the full DataFrame repr for wide DataFrames across
|
189 |
+
multiple lines, `max_columns` is still respected, but the output will
|
190 |
+
wrap-around across multiple "pages" if its width exceeds `display.width`.
|
191 |
+
"""
|
192 |
+
|
193 |
+
pc_show_dimensions_doc = """
|
194 |
+
: boolean or 'truncate'
|
195 |
+
Whether to print out dimensions at the end of DataFrame repr.
|
196 |
+
If 'truncate' is specified, only print out the dimensions if the
|
197 |
+
frame is truncated (e.g. not display all rows and/or columns)
|
198 |
+
"""
|
199 |
+
|
200 |
+
pc_east_asian_width_doc = """
|
201 |
+
: boolean
|
202 |
+
Whether to use the Unicode East Asian Width to calculate the display text
|
203 |
+
width.
|
204 |
+
Enabling this may affect to the performance (default: False)
|
205 |
+
"""
|
206 |
+
|
207 |
+
pc_ambiguous_as_wide_doc = """
|
208 |
+
: boolean
|
209 |
+
Whether to handle Unicode characters belong to Ambiguous as Wide (width=2)
|
210 |
+
(default: False)
|
211 |
+
"""
|
212 |
+
|
213 |
+
pc_table_schema_doc = """
|
214 |
+
: boolean
|
215 |
+
Whether to publish a Table Schema representation for frontends
|
216 |
+
that support it.
|
217 |
+
(default: False)
|
218 |
+
"""
|
219 |
+
|
220 |
+
pc_html_border_doc = """
|
221 |
+
: int
|
222 |
+
A ``border=value`` attribute is inserted in the ``<table>`` tag
|
223 |
+
for the DataFrame HTML repr.
|
224 |
+
"""
|
225 |
+
|
226 |
+
pc_html_use_mathjax_doc = """\
|
227 |
+
: boolean
|
228 |
+
When True, Jupyter notebook will process table contents using MathJax,
|
229 |
+
rendering mathematical expressions enclosed by the dollar symbol.
|
230 |
+
(default: True)
|
231 |
+
"""
|
232 |
+
|
233 |
+
pc_max_dir_items = """\
|
234 |
+
: int
|
235 |
+
The number of items that will be added to `dir(...)`. 'None' value means
|
236 |
+
unlimited. Because dir is cached, changing this option will not immediately
|
237 |
+
affect already existing dataframes until a column is deleted or added.
|
238 |
+
|
239 |
+
This is for instance used to suggest columns from a dataframe to tab
|
240 |
+
completion.
|
241 |
+
"""
|
242 |
+
|
243 |
+
pc_width_doc = """
|
244 |
+
: int
|
245 |
+
Width of the display in characters. In case python/IPython is running in
|
246 |
+
a terminal this can be set to None and pandas will correctly auto-detect
|
247 |
+
the width.
|
248 |
+
Note that the IPython notebook, IPython qtconsole, or IDLE do not run in a
|
249 |
+
terminal and hence it is not possible to correctly detect the width.
|
250 |
+
"""
|
251 |
+
|
252 |
+
pc_chop_threshold_doc = """
|
253 |
+
: float or None
|
254 |
+
if set to a float value, all float values smaller than the given threshold
|
255 |
+
will be displayed as exactly 0 by repr and friends.
|
256 |
+
"""
|
257 |
+
|
258 |
+
pc_max_seq_items = """
|
259 |
+
: int or None
|
260 |
+
When pretty-printing a long sequence, no more then `max_seq_items`
|
261 |
+
will be printed. If items are omitted, they will be denoted by the
|
262 |
+
addition of "..." to the resulting string.
|
263 |
+
|
264 |
+
If set to None, the number of items to be printed is unlimited.
|
265 |
+
"""
|
266 |
+
|
267 |
+
pc_max_info_rows_doc = """
|
268 |
+
: int
|
269 |
+
df.info() will usually show null-counts for each column.
|
270 |
+
For large frames this can be quite slow. max_info_rows and max_info_cols
|
271 |
+
limit this null check only to frames with smaller dimensions than
|
272 |
+
specified.
|
273 |
+
"""
|
274 |
+
|
275 |
+
pc_large_repr_doc = """
|
276 |
+
: 'truncate'/'info'
|
277 |
+
For DataFrames exceeding max_rows/max_cols, the repr (and HTML repr) can
|
278 |
+
show a truncated table, or switch to the view from
|
279 |
+
df.info() (the behaviour in earlier versions of pandas).
|
280 |
+
"""
|
281 |
+
|
282 |
+
pc_memory_usage_doc = """
|
283 |
+
: bool, string or None
|
284 |
+
This specifies if the memory usage of a DataFrame should be displayed when
|
285 |
+
df.info() is called. Valid values True,False,'deep'
|
286 |
+
"""
|
287 |
+
|
288 |
+
|
289 |
+
def table_schema_cb(key) -> None:
|
290 |
+
from pandas.io.formats.printing import enable_data_resource_formatter
|
291 |
+
|
292 |
+
enable_data_resource_formatter(cf.get_option(key))
|
293 |
+
|
294 |
+
|
295 |
+
def is_terminal() -> bool:
|
296 |
+
"""
|
297 |
+
Detect if Python is running in a terminal.
|
298 |
+
|
299 |
+
Returns True if Python is running in a terminal or False if not.
|
300 |
+
"""
|
301 |
+
try:
|
302 |
+
# error: Name 'get_ipython' is not defined
|
303 |
+
ip = get_ipython() # type: ignore[name-defined]
|
304 |
+
except NameError: # assume standard Python interpreter in a terminal
|
305 |
+
return True
|
306 |
+
else:
|
307 |
+
if hasattr(ip, "kernel"): # IPython as a Jupyter kernel
|
308 |
+
return False
|
309 |
+
else: # IPython in a terminal
|
310 |
+
return True
|
311 |
+
|
312 |
+
|
313 |
+
with cf.config_prefix("display"):
|
314 |
+
cf.register_option("precision", 6, pc_precision_doc, validator=is_nonnegative_int)
|
315 |
+
cf.register_option(
|
316 |
+
"float_format",
|
317 |
+
None,
|
318 |
+
float_format_doc,
|
319 |
+
validator=is_one_of_factory([None, is_callable]),
|
320 |
+
)
|
321 |
+
cf.register_option(
|
322 |
+
"max_info_rows",
|
323 |
+
1690785,
|
324 |
+
pc_max_info_rows_doc,
|
325 |
+
validator=is_int,
|
326 |
+
)
|
327 |
+
cf.register_option("max_rows", 60, pc_max_rows_doc, validator=is_nonnegative_int)
|
328 |
+
cf.register_option(
|
329 |
+
"min_rows",
|
330 |
+
10,
|
331 |
+
pc_min_rows_doc,
|
332 |
+
validator=is_instance_factory([type(None), int]),
|
333 |
+
)
|
334 |
+
cf.register_option("max_categories", 8, pc_max_categories_doc, validator=is_int)
|
335 |
+
|
336 |
+
cf.register_option(
|
337 |
+
"max_colwidth",
|
338 |
+
50,
|
339 |
+
max_colwidth_doc,
|
340 |
+
validator=is_nonnegative_int,
|
341 |
+
)
|
342 |
+
if is_terminal():
|
343 |
+
max_cols = 0 # automatically determine optimal number of columns
|
344 |
+
else:
|
345 |
+
max_cols = 20 # cannot determine optimal number of columns
|
346 |
+
cf.register_option(
|
347 |
+
"max_columns", max_cols, pc_max_cols_doc, validator=is_nonnegative_int
|
348 |
+
)
|
349 |
+
cf.register_option(
|
350 |
+
"large_repr",
|
351 |
+
"truncate",
|
352 |
+
pc_large_repr_doc,
|
353 |
+
validator=is_one_of_factory(["truncate", "info"]),
|
354 |
+
)
|
355 |
+
cf.register_option("max_info_columns", 100, pc_max_info_cols_doc, validator=is_int)
|
356 |
+
cf.register_option(
|
357 |
+
"colheader_justify", "right", colheader_justify_doc, validator=is_text
|
358 |
+
)
|
359 |
+
cf.register_option("notebook_repr_html", True, pc_nb_repr_h_doc, validator=is_bool)
|
360 |
+
cf.register_option("pprint_nest_depth", 3, pc_pprint_nest_depth, validator=is_int)
|
361 |
+
cf.register_option("multi_sparse", True, pc_multi_sparse_doc, validator=is_bool)
|
362 |
+
cf.register_option("expand_frame_repr", True, pc_expand_repr_doc)
|
363 |
+
cf.register_option(
|
364 |
+
"show_dimensions",
|
365 |
+
"truncate",
|
366 |
+
pc_show_dimensions_doc,
|
367 |
+
validator=is_one_of_factory([True, False, "truncate"]),
|
368 |
+
)
|
369 |
+
cf.register_option("chop_threshold", None, pc_chop_threshold_doc)
|
370 |
+
cf.register_option("max_seq_items", 100, pc_max_seq_items)
|
371 |
+
cf.register_option(
|
372 |
+
"width", 80, pc_width_doc, validator=is_instance_factory([type(None), int])
|
373 |
+
)
|
374 |
+
cf.register_option(
|
375 |
+
"memory_usage",
|
376 |
+
True,
|
377 |
+
pc_memory_usage_doc,
|
378 |
+
validator=is_one_of_factory([None, True, False, "deep"]),
|
379 |
+
)
|
380 |
+
cf.register_option(
|
381 |
+
"unicode.east_asian_width", False, pc_east_asian_width_doc, validator=is_bool
|
382 |
+
)
|
383 |
+
cf.register_option(
|
384 |
+
"unicode.ambiguous_as_wide", False, pc_east_asian_width_doc, validator=is_bool
|
385 |
+
)
|
386 |
+
cf.register_option(
|
387 |
+
"html.table_schema",
|
388 |
+
False,
|
389 |
+
pc_table_schema_doc,
|
390 |
+
validator=is_bool,
|
391 |
+
cb=table_schema_cb,
|
392 |
+
)
|
393 |
+
cf.register_option("html.border", 1, pc_html_border_doc, validator=is_int)
|
394 |
+
cf.register_option(
|
395 |
+
"html.use_mathjax", True, pc_html_use_mathjax_doc, validator=is_bool
|
396 |
+
)
|
397 |
+
cf.register_option(
|
398 |
+
"max_dir_items", 100, pc_max_dir_items, validator=is_nonnegative_int
|
399 |
+
)
|
400 |
+
|
401 |
+
tc_sim_interactive_doc = """
|
402 |
+
: boolean
|
403 |
+
Whether to simulate interactive mode for purposes of testing
|
404 |
+
"""
|
405 |
+
|
406 |
+
with cf.config_prefix("mode"):
|
407 |
+
cf.register_option("sim_interactive", False, tc_sim_interactive_doc)
|
408 |
+
|
409 |
+
use_inf_as_na_doc = """
|
410 |
+
: boolean
|
411 |
+
True means treat None, NaN, INF, -INF as NA (old way),
|
412 |
+
False means None and NaN are null, but INF, -INF are not NA
|
413 |
+
(new way).
|
414 |
+
|
415 |
+
This option is deprecated in pandas 2.1.0 and will be removed in 3.0.
|
416 |
+
"""
|
417 |
+
|
418 |
+
# We don't want to start importing everything at the global context level
|
419 |
+
# or we'll hit circular deps.
|
420 |
+
|
421 |
+
|
422 |
+
def use_inf_as_na_cb(key) -> None:
|
423 |
+
# TODO(3.0): enforcing this deprecation will close GH#52501
|
424 |
+
from pandas.core.dtypes.missing import _use_inf_as_na
|
425 |
+
|
426 |
+
_use_inf_as_na(key)
|
427 |
+
|
428 |
+
|
429 |
+
with cf.config_prefix("mode"):
|
430 |
+
cf.register_option("use_inf_as_na", False, use_inf_as_na_doc, cb=use_inf_as_na_cb)
|
431 |
+
|
432 |
+
cf.deprecate_option(
|
433 |
+
# GH#51684
|
434 |
+
"mode.use_inf_as_na",
|
435 |
+
"use_inf_as_na option is deprecated and will be removed in a future "
|
436 |
+
"version. Convert inf values to NaN before operating instead.",
|
437 |
+
)
|
438 |
+
|
439 |
+
data_manager_doc = """
|
440 |
+
: string
|
441 |
+
Internal data manager type; can be "block" or "array". Defaults to "block",
|
442 |
+
unless overridden by the 'PANDAS_DATA_MANAGER' environment variable (needs
|
443 |
+
to be set before pandas is imported).
|
444 |
+
"""
|
445 |
+
|
446 |
+
|
447 |
+
with cf.config_prefix("mode"):
|
448 |
+
cf.register_option(
|
449 |
+
"data_manager",
|
450 |
+
# Get the default from an environment variable, if set, otherwise defaults
|
451 |
+
# to "block". This environment variable can be set for testing.
|
452 |
+
os.environ.get("PANDAS_DATA_MANAGER", "block"),
|
453 |
+
data_manager_doc,
|
454 |
+
validator=is_one_of_factory(["block", "array"]),
|
455 |
+
)
|
456 |
+
|
457 |
+
cf.deprecate_option(
|
458 |
+
# GH#55043
|
459 |
+
"mode.data_manager",
|
460 |
+
"data_manager option is deprecated and will be removed in a future "
|
461 |
+
"version. Only the BlockManager will be available.",
|
462 |
+
)
|
463 |
+
|
464 |
+
|
465 |
+
# TODO better name?
|
466 |
+
copy_on_write_doc = """
|
467 |
+
: bool
|
468 |
+
Use new copy-view behaviour using Copy-on-Write. Defaults to False,
|
469 |
+
unless overridden by the 'PANDAS_COPY_ON_WRITE' environment variable
|
470 |
+
(if set to "1" for True, needs to be set before pandas is imported).
|
471 |
+
"""
|
472 |
+
|
473 |
+
|
474 |
+
with cf.config_prefix("mode"):
|
475 |
+
cf.register_option(
|
476 |
+
"copy_on_write",
|
477 |
+
# Get the default from an environment variable, if set, otherwise defaults
|
478 |
+
# to False. This environment variable can be set for testing.
|
479 |
+
"warn"
|
480 |
+
if os.environ.get("PANDAS_COPY_ON_WRITE", "0") == "warn"
|
481 |
+
else os.environ.get("PANDAS_COPY_ON_WRITE", "0") == "1",
|
482 |
+
copy_on_write_doc,
|
483 |
+
validator=is_one_of_factory([True, False, "warn"]),
|
484 |
+
)
|
485 |
+
|
486 |
+
|
487 |
+
# user warnings
|
488 |
+
chained_assignment = """
|
489 |
+
: string
|
490 |
+
Raise an exception, warn, or no action if trying to use chained assignment,
|
491 |
+
The default is warn
|
492 |
+
"""
|
493 |
+
|
494 |
+
with cf.config_prefix("mode"):
|
495 |
+
cf.register_option(
|
496 |
+
"chained_assignment",
|
497 |
+
"warn",
|
498 |
+
chained_assignment,
|
499 |
+
validator=is_one_of_factory([None, "warn", "raise"]),
|
500 |
+
)
|
501 |
+
|
502 |
+
|
503 |
+
string_storage_doc = """
|
504 |
+
: string
|
505 |
+
The default storage for StringDtype. This option is ignored if
|
506 |
+
``future.infer_string`` is set to True.
|
507 |
+
"""
|
508 |
+
|
509 |
+
with cf.config_prefix("mode"):
|
510 |
+
cf.register_option(
|
511 |
+
"string_storage",
|
512 |
+
"python",
|
513 |
+
string_storage_doc,
|
514 |
+
validator=is_one_of_factory(["python", "pyarrow", "pyarrow_numpy"]),
|
515 |
+
)
|
516 |
+
|
517 |
+
|
518 |
+
# Set up the io.excel specific reader configuration.
|
519 |
+
reader_engine_doc = """
|
520 |
+
: string
|
521 |
+
The default Excel reader engine for '{ext}' files. Available options:
|
522 |
+
auto, {others}.
|
523 |
+
"""
|
524 |
+
|
525 |
+
_xls_options = ["xlrd", "calamine"]
|
526 |
+
_xlsm_options = ["xlrd", "openpyxl", "calamine"]
|
527 |
+
_xlsx_options = ["xlrd", "openpyxl", "calamine"]
|
528 |
+
_ods_options = ["odf", "calamine"]
|
529 |
+
_xlsb_options = ["pyxlsb", "calamine"]
|
530 |
+
|
531 |
+
|
532 |
+
with cf.config_prefix("io.excel.xls"):
|
533 |
+
cf.register_option(
|
534 |
+
"reader",
|
535 |
+
"auto",
|
536 |
+
reader_engine_doc.format(ext="xls", others=", ".join(_xls_options)),
|
537 |
+
validator=is_one_of_factory(_xls_options + ["auto"]),
|
538 |
+
)
|
539 |
+
|
540 |
+
with cf.config_prefix("io.excel.xlsm"):
|
541 |
+
cf.register_option(
|
542 |
+
"reader",
|
543 |
+
"auto",
|
544 |
+
reader_engine_doc.format(ext="xlsm", others=", ".join(_xlsm_options)),
|
545 |
+
validator=is_one_of_factory(_xlsm_options + ["auto"]),
|
546 |
+
)
|
547 |
+
|
548 |
+
|
549 |
+
with cf.config_prefix("io.excel.xlsx"):
|
550 |
+
cf.register_option(
|
551 |
+
"reader",
|
552 |
+
"auto",
|
553 |
+
reader_engine_doc.format(ext="xlsx", others=", ".join(_xlsx_options)),
|
554 |
+
validator=is_one_of_factory(_xlsx_options + ["auto"]),
|
555 |
+
)
|
556 |
+
|
557 |
+
|
558 |
+
with cf.config_prefix("io.excel.ods"):
|
559 |
+
cf.register_option(
|
560 |
+
"reader",
|
561 |
+
"auto",
|
562 |
+
reader_engine_doc.format(ext="ods", others=", ".join(_ods_options)),
|
563 |
+
validator=is_one_of_factory(_ods_options + ["auto"]),
|
564 |
+
)
|
565 |
+
|
566 |
+
with cf.config_prefix("io.excel.xlsb"):
|
567 |
+
cf.register_option(
|
568 |
+
"reader",
|
569 |
+
"auto",
|
570 |
+
reader_engine_doc.format(ext="xlsb", others=", ".join(_xlsb_options)),
|
571 |
+
validator=is_one_of_factory(_xlsb_options + ["auto"]),
|
572 |
+
)
|
573 |
+
|
574 |
+
# Set up the io.excel specific writer configuration.
|
575 |
+
writer_engine_doc = """
|
576 |
+
: string
|
577 |
+
The default Excel writer engine for '{ext}' files. Available options:
|
578 |
+
auto, {others}.
|
579 |
+
"""
|
580 |
+
|
581 |
+
_xlsm_options = ["openpyxl"]
|
582 |
+
_xlsx_options = ["openpyxl", "xlsxwriter"]
|
583 |
+
_ods_options = ["odf"]
|
584 |
+
|
585 |
+
|
586 |
+
with cf.config_prefix("io.excel.xlsm"):
|
587 |
+
cf.register_option(
|
588 |
+
"writer",
|
589 |
+
"auto",
|
590 |
+
writer_engine_doc.format(ext="xlsm", others=", ".join(_xlsm_options)),
|
591 |
+
validator=str,
|
592 |
+
)
|
593 |
+
|
594 |
+
|
595 |
+
with cf.config_prefix("io.excel.xlsx"):
|
596 |
+
cf.register_option(
|
597 |
+
"writer",
|
598 |
+
"auto",
|
599 |
+
writer_engine_doc.format(ext="xlsx", others=", ".join(_xlsx_options)),
|
600 |
+
validator=str,
|
601 |
+
)
|
602 |
+
|
603 |
+
|
604 |
+
with cf.config_prefix("io.excel.ods"):
|
605 |
+
cf.register_option(
|
606 |
+
"writer",
|
607 |
+
"auto",
|
608 |
+
writer_engine_doc.format(ext="ods", others=", ".join(_ods_options)),
|
609 |
+
validator=str,
|
610 |
+
)
|
611 |
+
|
612 |
+
|
613 |
+
# Set up the io.parquet specific configuration.
|
614 |
+
parquet_engine_doc = """
|
615 |
+
: string
|
616 |
+
The default parquet reader/writer engine. Available options:
|
617 |
+
'auto', 'pyarrow', 'fastparquet', the default is 'auto'
|
618 |
+
"""
|
619 |
+
|
620 |
+
with cf.config_prefix("io.parquet"):
|
621 |
+
cf.register_option(
|
622 |
+
"engine",
|
623 |
+
"auto",
|
624 |
+
parquet_engine_doc,
|
625 |
+
validator=is_one_of_factory(["auto", "pyarrow", "fastparquet"]),
|
626 |
+
)
|
627 |
+
|
628 |
+
|
629 |
+
# Set up the io.sql specific configuration.
|
630 |
+
sql_engine_doc = """
|
631 |
+
: string
|
632 |
+
The default sql reader/writer engine. Available options:
|
633 |
+
'auto', 'sqlalchemy', the default is 'auto'
|
634 |
+
"""
|
635 |
+
|
636 |
+
with cf.config_prefix("io.sql"):
|
637 |
+
cf.register_option(
|
638 |
+
"engine",
|
639 |
+
"auto",
|
640 |
+
sql_engine_doc,
|
641 |
+
validator=is_one_of_factory(["auto", "sqlalchemy"]),
|
642 |
+
)
|
643 |
+
|
644 |
+
# --------
|
645 |
+
# Plotting
|
646 |
+
# ---------
|
647 |
+
|
648 |
+
plotting_backend_doc = """
|
649 |
+
: str
|
650 |
+
The plotting backend to use. The default value is "matplotlib", the
|
651 |
+
backend provided with pandas. Other backends can be specified by
|
652 |
+
providing the name of the module that implements the backend.
|
653 |
+
"""
|
654 |
+
|
655 |
+
|
656 |
+
def register_plotting_backend_cb(key) -> None:
|
657 |
+
if key == "matplotlib":
|
658 |
+
# We defer matplotlib validation, since it's the default
|
659 |
+
return
|
660 |
+
from pandas.plotting._core import _get_plot_backend
|
661 |
+
|
662 |
+
_get_plot_backend(key)
|
663 |
+
|
664 |
+
|
665 |
+
with cf.config_prefix("plotting"):
|
666 |
+
cf.register_option(
|
667 |
+
"backend",
|
668 |
+
defval="matplotlib",
|
669 |
+
doc=plotting_backend_doc,
|
670 |
+
validator=register_plotting_backend_cb,
|
671 |
+
)
|
672 |
+
|
673 |
+
|
674 |
+
register_converter_doc = """
|
675 |
+
: bool or 'auto'.
|
676 |
+
Whether to register converters with matplotlib's units registry for
|
677 |
+
dates, times, datetimes, and Periods. Toggling to False will remove
|
678 |
+
the converters, restoring any converters that pandas overwrote.
|
679 |
+
"""
|
680 |
+
|
681 |
+
|
682 |
+
def register_converter_cb(key) -> None:
|
683 |
+
from pandas.plotting import (
|
684 |
+
deregister_matplotlib_converters,
|
685 |
+
register_matplotlib_converters,
|
686 |
+
)
|
687 |
+
|
688 |
+
if cf.get_option(key):
|
689 |
+
register_matplotlib_converters()
|
690 |
+
else:
|
691 |
+
deregister_matplotlib_converters()
|
692 |
+
|
693 |
+
|
694 |
+
with cf.config_prefix("plotting.matplotlib"):
|
695 |
+
cf.register_option(
|
696 |
+
"register_converters",
|
697 |
+
"auto",
|
698 |
+
register_converter_doc,
|
699 |
+
validator=is_one_of_factory(["auto", True, False]),
|
700 |
+
cb=register_converter_cb,
|
701 |
+
)
|
702 |
+
|
703 |
+
# ------
|
704 |
+
# Styler
|
705 |
+
# ------
|
706 |
+
|
707 |
+
styler_sparse_index_doc = """
|
708 |
+
: bool
|
709 |
+
Whether to sparsify the display of a hierarchical index. Setting to False will
|
710 |
+
display each explicit level element in a hierarchical key for each row.
|
711 |
+
"""
|
712 |
+
|
713 |
+
styler_sparse_columns_doc = """
|
714 |
+
: bool
|
715 |
+
Whether to sparsify the display of hierarchical columns. Setting to False will
|
716 |
+
display each explicit level element in a hierarchical key for each column.
|
717 |
+
"""
|
718 |
+
|
719 |
+
styler_render_repr = """
|
720 |
+
: str
|
721 |
+
Determine which output to use in Jupyter Notebook in {"html", "latex"}.
|
722 |
+
"""
|
723 |
+
|
724 |
+
styler_max_elements = """
|
725 |
+
: int
|
726 |
+
The maximum number of data-cell (<td>) elements that will be rendered before
|
727 |
+
trimming will occur over columns, rows or both if needed.
|
728 |
+
"""
|
729 |
+
|
730 |
+
styler_max_rows = """
|
731 |
+
: int, optional
|
732 |
+
The maximum number of rows that will be rendered. May still be reduced to
|
733 |
+
satisfy ``max_elements``, which takes precedence.
|
734 |
+
"""
|
735 |
+
|
736 |
+
styler_max_columns = """
|
737 |
+
: int, optional
|
738 |
+
The maximum number of columns that will be rendered. May still be reduced to
|
739 |
+
satisfy ``max_elements``, which takes precedence.
|
740 |
+
"""
|
741 |
+
|
742 |
+
styler_precision = """
|
743 |
+
: int
|
744 |
+
The precision for floats and complex numbers.
|
745 |
+
"""
|
746 |
+
|
747 |
+
styler_decimal = """
|
748 |
+
: str
|
749 |
+
The character representation for the decimal separator for floats and complex.
|
750 |
+
"""
|
751 |
+
|
752 |
+
styler_thousands = """
|
753 |
+
: str, optional
|
754 |
+
The character representation for thousands separator for floats, int and complex.
|
755 |
+
"""
|
756 |
+
|
757 |
+
styler_na_rep = """
|
758 |
+
: str, optional
|
759 |
+
The string representation for values identified as missing.
|
760 |
+
"""
|
761 |
+
|
762 |
+
styler_escape = """
|
763 |
+
: str, optional
|
764 |
+
Whether to escape certain characters according to the given context; html or latex.
|
765 |
+
"""
|
766 |
+
|
767 |
+
styler_formatter = """
|
768 |
+
: str, callable, dict, optional
|
769 |
+
A formatter object to be used as default within ``Styler.format``.
|
770 |
+
"""
|
771 |
+
|
772 |
+
styler_multirow_align = """
|
773 |
+
: {"c", "t", "b"}
|
774 |
+
The specifier for vertical alignment of sparsified LaTeX multirows.
|
775 |
+
"""
|
776 |
+
|
777 |
+
styler_multicol_align = r"""
|
778 |
+
: {"r", "c", "l", "naive-l", "naive-r"}
|
779 |
+
The specifier for horizontal alignment of sparsified LaTeX multicolumns. Pipe
|
780 |
+
decorators can also be added to non-naive values to draw vertical
|
781 |
+
rules, e.g. "\|r" will draw a rule on the left side of right aligned merged cells.
|
782 |
+
"""
|
783 |
+
|
784 |
+
styler_hrules = """
|
785 |
+
: bool
|
786 |
+
Whether to add horizontal rules on top and bottom and below the headers.
|
787 |
+
"""
|
788 |
+
|
789 |
+
styler_environment = """
|
790 |
+
: str
|
791 |
+
The environment to replace ``\\begin{table}``. If "longtable" is used results
|
792 |
+
in a specific longtable environment format.
|
793 |
+
"""
|
794 |
+
|
795 |
+
styler_encoding = """
|
796 |
+
: str
|
797 |
+
The encoding used for output HTML and LaTeX files.
|
798 |
+
"""
|
799 |
+
|
800 |
+
styler_mathjax = """
|
801 |
+
: bool
|
802 |
+
If False will render special CSS classes to table attributes that indicate Mathjax
|
803 |
+
will not be used in Jupyter Notebook.
|
804 |
+
"""
|
805 |
+
|
806 |
+
with cf.config_prefix("styler"):
|
807 |
+
cf.register_option("sparse.index", True, styler_sparse_index_doc, validator=is_bool)
|
808 |
+
|
809 |
+
cf.register_option(
|
810 |
+
"sparse.columns", True, styler_sparse_columns_doc, validator=is_bool
|
811 |
+
)
|
812 |
+
|
813 |
+
cf.register_option(
|
814 |
+
"render.repr",
|
815 |
+
"html",
|
816 |
+
styler_render_repr,
|
817 |
+
validator=is_one_of_factory(["html", "latex"]),
|
818 |
+
)
|
819 |
+
|
820 |
+
cf.register_option(
|
821 |
+
"render.max_elements",
|
822 |
+
2**18,
|
823 |
+
styler_max_elements,
|
824 |
+
validator=is_nonnegative_int,
|
825 |
+
)
|
826 |
+
|
827 |
+
cf.register_option(
|
828 |
+
"render.max_rows",
|
829 |
+
None,
|
830 |
+
styler_max_rows,
|
831 |
+
validator=is_nonnegative_int,
|
832 |
+
)
|
833 |
+
|
834 |
+
cf.register_option(
|
835 |
+
"render.max_columns",
|
836 |
+
None,
|
837 |
+
styler_max_columns,
|
838 |
+
validator=is_nonnegative_int,
|
839 |
+
)
|
840 |
+
|
841 |
+
cf.register_option("render.encoding", "utf-8", styler_encoding, validator=is_str)
|
842 |
+
|
843 |
+
cf.register_option("format.decimal", ".", styler_decimal, validator=is_str)
|
844 |
+
|
845 |
+
cf.register_option(
|
846 |
+
"format.precision", 6, styler_precision, validator=is_nonnegative_int
|
847 |
+
)
|
848 |
+
|
849 |
+
cf.register_option(
|
850 |
+
"format.thousands",
|
851 |
+
None,
|
852 |
+
styler_thousands,
|
853 |
+
validator=is_instance_factory([type(None), str]),
|
854 |
+
)
|
855 |
+
|
856 |
+
cf.register_option(
|
857 |
+
"format.na_rep",
|
858 |
+
None,
|
859 |
+
styler_na_rep,
|
860 |
+
validator=is_instance_factory([type(None), str]),
|
861 |
+
)
|
862 |
+
|
863 |
+
cf.register_option(
|
864 |
+
"format.escape",
|
865 |
+
None,
|
866 |
+
styler_escape,
|
867 |
+
validator=is_one_of_factory([None, "html", "latex", "latex-math"]),
|
868 |
+
)
|
869 |
+
|
870 |
+
cf.register_option(
|
871 |
+
"format.formatter",
|
872 |
+
None,
|
873 |
+
styler_formatter,
|
874 |
+
validator=is_instance_factory([type(None), dict, Callable, str]),
|
875 |
+
)
|
876 |
+
|
877 |
+
cf.register_option("html.mathjax", True, styler_mathjax, validator=is_bool)
|
878 |
+
|
879 |
+
cf.register_option(
|
880 |
+
"latex.multirow_align",
|
881 |
+
"c",
|
882 |
+
styler_multirow_align,
|
883 |
+
validator=is_one_of_factory(["c", "t", "b", "naive"]),
|
884 |
+
)
|
885 |
+
|
886 |
+
val_mca = ["r", "|r|", "|r", "r|", "c", "|c|", "|c", "c|", "l", "|l|", "|l", "l|"]
|
887 |
+
val_mca += ["naive-l", "naive-r"]
|
888 |
+
cf.register_option(
|
889 |
+
"latex.multicol_align",
|
890 |
+
"r",
|
891 |
+
styler_multicol_align,
|
892 |
+
validator=is_one_of_factory(val_mca),
|
893 |
+
)
|
894 |
+
|
895 |
+
cf.register_option("latex.hrules", False, styler_hrules, validator=is_bool)
|
896 |
+
|
897 |
+
cf.register_option(
|
898 |
+
"latex.environment",
|
899 |
+
None,
|
900 |
+
styler_environment,
|
901 |
+
validator=is_instance_factory([type(None), str]),
|
902 |
+
)
|
903 |
+
|
904 |
+
|
905 |
+
with cf.config_prefix("future"):
|
906 |
+
cf.register_option(
|
907 |
+
"infer_string",
|
908 |
+
False,
|
909 |
+
"Whether to infer sequence of str objects as pyarrow string "
|
910 |
+
"dtype, which will be the default in pandas 3.0 "
|
911 |
+
"(at which point this option will be deprecated).",
|
912 |
+
validator=is_one_of_factory([True, False]),
|
913 |
+
)
|
914 |
+
|
915 |
+
cf.register_option(
|
916 |
+
"no_silent_downcasting",
|
917 |
+
False,
|
918 |
+
"Whether to opt-in to the future behavior which will *not* silently "
|
919 |
+
"downcast results from Series and DataFrame `where`, `mask`, and `clip` "
|
920 |
+
"methods. "
|
921 |
+
"Silent downcasting will be removed in pandas 3.0 "
|
922 |
+
"(at which point this option will be deprecated).",
|
923 |
+
validator=is_one_of_factory([True, False]),
|
924 |
+
)
|
env-llmeval/lib/python3.10/site-packages/pandas/core/construction.py
ADDED
@@ -0,0 +1,824 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
"""
|
2 |
+
Constructor functions intended to be shared by pd.array, Series.__init__,
|
3 |
+
and Index.__new__.
|
4 |
+
|
5 |
+
These should not depend on core.internals.
|
6 |
+
"""
|
7 |
+
from __future__ import annotations
|
8 |
+
|
9 |
+
from collections.abc import Sequence
|
10 |
+
from typing import (
|
11 |
+
TYPE_CHECKING,
|
12 |
+
Optional,
|
13 |
+
Union,
|
14 |
+
cast,
|
15 |
+
overload,
|
16 |
+
)
|
17 |
+
import warnings
|
18 |
+
|
19 |
+
import numpy as np
|
20 |
+
from numpy import ma
|
21 |
+
|
22 |
+
from pandas._config import using_pyarrow_string_dtype
|
23 |
+
|
24 |
+
from pandas._libs import lib
|
25 |
+
from pandas._libs.tslibs import (
|
26 |
+
Period,
|
27 |
+
get_supported_dtype,
|
28 |
+
is_supported_dtype,
|
29 |
+
)
|
30 |
+
from pandas._typing import (
|
31 |
+
AnyArrayLike,
|
32 |
+
ArrayLike,
|
33 |
+
Dtype,
|
34 |
+
DtypeObj,
|
35 |
+
T,
|
36 |
+
)
|
37 |
+
from pandas.util._exceptions import find_stack_level
|
38 |
+
|
39 |
+
from pandas.core.dtypes.base import ExtensionDtype
|
40 |
+
from pandas.core.dtypes.cast import (
|
41 |
+
construct_1d_arraylike_from_scalar,
|
42 |
+
construct_1d_object_array_from_listlike,
|
43 |
+
maybe_cast_to_datetime,
|
44 |
+
maybe_cast_to_integer_array,
|
45 |
+
maybe_convert_platform,
|
46 |
+
maybe_infer_to_datetimelike,
|
47 |
+
maybe_promote,
|
48 |
+
)
|
49 |
+
from pandas.core.dtypes.common import (
|
50 |
+
is_list_like,
|
51 |
+
is_object_dtype,
|
52 |
+
is_string_dtype,
|
53 |
+
pandas_dtype,
|
54 |
+
)
|
55 |
+
from pandas.core.dtypes.dtypes import NumpyEADtype
|
56 |
+
from pandas.core.dtypes.generic import (
|
57 |
+
ABCDataFrame,
|
58 |
+
ABCExtensionArray,
|
59 |
+
ABCIndex,
|
60 |
+
ABCSeries,
|
61 |
+
)
|
62 |
+
from pandas.core.dtypes.missing import isna
|
63 |
+
|
64 |
+
import pandas.core.common as com
|
65 |
+
|
66 |
+
if TYPE_CHECKING:
|
67 |
+
from pandas import (
|
68 |
+
Index,
|
69 |
+
Series,
|
70 |
+
)
|
71 |
+
from pandas.core.arrays.base import ExtensionArray
|
72 |
+
|
73 |
+
|
74 |
+
def array(
|
75 |
+
data: Sequence[object] | AnyArrayLike,
|
76 |
+
dtype: Dtype | None = None,
|
77 |
+
copy: bool = True,
|
78 |
+
) -> ExtensionArray:
|
79 |
+
"""
|
80 |
+
Create an array.
|
81 |
+
|
82 |
+
Parameters
|
83 |
+
----------
|
84 |
+
data : Sequence of objects
|
85 |
+
The scalars inside `data` should be instances of the
|
86 |
+
scalar type for `dtype`. It's expected that `data`
|
87 |
+
represents a 1-dimensional array of data.
|
88 |
+
|
89 |
+
When `data` is an Index or Series, the underlying array
|
90 |
+
will be extracted from `data`.
|
91 |
+
|
92 |
+
dtype : str, np.dtype, or ExtensionDtype, optional
|
93 |
+
The dtype to use for the array. This may be a NumPy
|
94 |
+
dtype or an extension type registered with pandas using
|
95 |
+
:meth:`pandas.api.extensions.register_extension_dtype`.
|
96 |
+
|
97 |
+
If not specified, there are two possibilities:
|
98 |
+
|
99 |
+
1. When `data` is a :class:`Series`, :class:`Index`, or
|
100 |
+
:class:`ExtensionArray`, the `dtype` will be taken
|
101 |
+
from the data.
|
102 |
+
2. Otherwise, pandas will attempt to infer the `dtype`
|
103 |
+
from the data.
|
104 |
+
|
105 |
+
Note that when `data` is a NumPy array, ``data.dtype`` is
|
106 |
+
*not* used for inferring the array type. This is because
|
107 |
+
NumPy cannot represent all the types of data that can be
|
108 |
+
held in extension arrays.
|
109 |
+
|
110 |
+
Currently, pandas will infer an extension dtype for sequences of
|
111 |
+
|
112 |
+
============================== =======================================
|
113 |
+
Scalar Type Array Type
|
114 |
+
============================== =======================================
|
115 |
+
:class:`pandas.Interval` :class:`pandas.arrays.IntervalArray`
|
116 |
+
:class:`pandas.Period` :class:`pandas.arrays.PeriodArray`
|
117 |
+
:class:`datetime.datetime` :class:`pandas.arrays.DatetimeArray`
|
118 |
+
:class:`datetime.timedelta` :class:`pandas.arrays.TimedeltaArray`
|
119 |
+
:class:`int` :class:`pandas.arrays.IntegerArray`
|
120 |
+
:class:`float` :class:`pandas.arrays.FloatingArray`
|
121 |
+
:class:`str` :class:`pandas.arrays.StringArray` or
|
122 |
+
:class:`pandas.arrays.ArrowStringArray`
|
123 |
+
:class:`bool` :class:`pandas.arrays.BooleanArray`
|
124 |
+
============================== =======================================
|
125 |
+
|
126 |
+
The ExtensionArray created when the scalar type is :class:`str` is determined by
|
127 |
+
``pd.options.mode.string_storage`` if the dtype is not explicitly given.
|
128 |
+
|
129 |
+
For all other cases, NumPy's usual inference rules will be used.
|
130 |
+
copy : bool, default True
|
131 |
+
Whether to copy the data, even if not necessary. Depending
|
132 |
+
on the type of `data`, creating the new array may require
|
133 |
+
copying data, even if ``copy=False``.
|
134 |
+
|
135 |
+
Returns
|
136 |
+
-------
|
137 |
+
ExtensionArray
|
138 |
+
The newly created array.
|
139 |
+
|
140 |
+
Raises
|
141 |
+
------
|
142 |
+
ValueError
|
143 |
+
When `data` is not 1-dimensional.
|
144 |
+
|
145 |
+
See Also
|
146 |
+
--------
|
147 |
+
numpy.array : Construct a NumPy array.
|
148 |
+
Series : Construct a pandas Series.
|
149 |
+
Index : Construct a pandas Index.
|
150 |
+
arrays.NumpyExtensionArray : ExtensionArray wrapping a NumPy array.
|
151 |
+
Series.array : Extract the array stored within a Series.
|
152 |
+
|
153 |
+
Notes
|
154 |
+
-----
|
155 |
+
Omitting the `dtype` argument means pandas will attempt to infer the
|
156 |
+
best array type from the values in the data. As new array types are
|
157 |
+
added by pandas and 3rd party libraries, the "best" array type may
|
158 |
+
change. We recommend specifying `dtype` to ensure that
|
159 |
+
|
160 |
+
1. the correct array type for the data is returned
|
161 |
+
2. the returned array type doesn't change as new extension types
|
162 |
+
are added by pandas and third-party libraries
|
163 |
+
|
164 |
+
Additionally, if the underlying memory representation of the returned
|
165 |
+
array matters, we recommend specifying the `dtype` as a concrete object
|
166 |
+
rather than a string alias or allowing it to be inferred. For example,
|
167 |
+
a future version of pandas or a 3rd-party library may include a
|
168 |
+
dedicated ExtensionArray for string data. In this event, the following
|
169 |
+
would no longer return a :class:`arrays.NumpyExtensionArray` backed by a
|
170 |
+
NumPy array.
|
171 |
+
|
172 |
+
>>> pd.array(['a', 'b'], dtype=str)
|
173 |
+
<NumpyExtensionArray>
|
174 |
+
['a', 'b']
|
175 |
+
Length: 2, dtype: str32
|
176 |
+
|
177 |
+
This would instead return the new ExtensionArray dedicated for string
|
178 |
+
data. If you really need the new array to be backed by a NumPy array,
|
179 |
+
specify that in the dtype.
|
180 |
+
|
181 |
+
>>> pd.array(['a', 'b'], dtype=np.dtype("<U1"))
|
182 |
+
<NumpyExtensionArray>
|
183 |
+
['a', 'b']
|
184 |
+
Length: 2, dtype: str32
|
185 |
+
|
186 |
+
Finally, Pandas has arrays that mostly overlap with NumPy
|
187 |
+
|
188 |
+
* :class:`arrays.DatetimeArray`
|
189 |
+
* :class:`arrays.TimedeltaArray`
|
190 |
+
|
191 |
+
When data with a ``datetime64[ns]`` or ``timedelta64[ns]`` dtype is
|
192 |
+
passed, pandas will always return a ``DatetimeArray`` or ``TimedeltaArray``
|
193 |
+
rather than a ``NumpyExtensionArray``. This is for symmetry with the case of
|
194 |
+
timezone-aware data, which NumPy does not natively support.
|
195 |
+
|
196 |
+
>>> pd.array(['2015', '2016'], dtype='datetime64[ns]')
|
197 |
+
<DatetimeArray>
|
198 |
+
['2015-01-01 00:00:00', '2016-01-01 00:00:00']
|
199 |
+
Length: 2, dtype: datetime64[ns]
|
200 |
+
|
201 |
+
>>> pd.array(["1h", "2h"], dtype='timedelta64[ns]')
|
202 |
+
<TimedeltaArray>
|
203 |
+
['0 days 01:00:00', '0 days 02:00:00']
|
204 |
+
Length: 2, dtype: timedelta64[ns]
|
205 |
+
|
206 |
+
Examples
|
207 |
+
--------
|
208 |
+
If a dtype is not specified, pandas will infer the best dtype from the values.
|
209 |
+
See the description of `dtype` for the types pandas infers for.
|
210 |
+
|
211 |
+
>>> pd.array([1, 2])
|
212 |
+
<IntegerArray>
|
213 |
+
[1, 2]
|
214 |
+
Length: 2, dtype: Int64
|
215 |
+
|
216 |
+
>>> pd.array([1, 2, np.nan])
|
217 |
+
<IntegerArray>
|
218 |
+
[1, 2, <NA>]
|
219 |
+
Length: 3, dtype: Int64
|
220 |
+
|
221 |
+
>>> pd.array([1.1, 2.2])
|
222 |
+
<FloatingArray>
|
223 |
+
[1.1, 2.2]
|
224 |
+
Length: 2, dtype: Float64
|
225 |
+
|
226 |
+
>>> pd.array(["a", None, "c"])
|
227 |
+
<StringArray>
|
228 |
+
['a', <NA>, 'c']
|
229 |
+
Length: 3, dtype: string
|
230 |
+
|
231 |
+
>>> with pd.option_context("string_storage", "pyarrow"):
|
232 |
+
... arr = pd.array(["a", None, "c"])
|
233 |
+
...
|
234 |
+
>>> arr
|
235 |
+
<ArrowStringArray>
|
236 |
+
['a', <NA>, 'c']
|
237 |
+
Length: 3, dtype: string
|
238 |
+
|
239 |
+
>>> pd.array([pd.Period('2000', freq="D"), pd.Period("2000", freq="D")])
|
240 |
+
<PeriodArray>
|
241 |
+
['2000-01-01', '2000-01-01']
|
242 |
+
Length: 2, dtype: period[D]
|
243 |
+
|
244 |
+
You can use the string alias for `dtype`
|
245 |
+
|
246 |
+
>>> pd.array(['a', 'b', 'a'], dtype='category')
|
247 |
+
['a', 'b', 'a']
|
248 |
+
Categories (2, object): ['a', 'b']
|
249 |
+
|
250 |
+
Or specify the actual dtype
|
251 |
+
|
252 |
+
>>> pd.array(['a', 'b', 'a'],
|
253 |
+
... dtype=pd.CategoricalDtype(['a', 'b', 'c'], ordered=True))
|
254 |
+
['a', 'b', 'a']
|
255 |
+
Categories (3, object): ['a' < 'b' < 'c']
|
256 |
+
|
257 |
+
If pandas does not infer a dedicated extension type a
|
258 |
+
:class:`arrays.NumpyExtensionArray` is returned.
|
259 |
+
|
260 |
+
>>> pd.array([1 + 1j, 3 + 2j])
|
261 |
+
<NumpyExtensionArray>
|
262 |
+
[(1+1j), (3+2j)]
|
263 |
+
Length: 2, dtype: complex128
|
264 |
+
|
265 |
+
As mentioned in the "Notes" section, new extension types may be added
|
266 |
+
in the future (by pandas or 3rd party libraries), causing the return
|
267 |
+
value to no longer be a :class:`arrays.NumpyExtensionArray`. Specify the
|
268 |
+
`dtype` as a NumPy dtype if you need to ensure there's no future change in
|
269 |
+
behavior.
|
270 |
+
|
271 |
+
>>> pd.array([1, 2], dtype=np.dtype("int32"))
|
272 |
+
<NumpyExtensionArray>
|
273 |
+
[1, 2]
|
274 |
+
Length: 2, dtype: int32
|
275 |
+
|
276 |
+
`data` must be 1-dimensional. A ValueError is raised when the input
|
277 |
+
has the wrong dimensionality.
|
278 |
+
|
279 |
+
>>> pd.array(1)
|
280 |
+
Traceback (most recent call last):
|
281 |
+
...
|
282 |
+
ValueError: Cannot pass scalar '1' to 'pandas.array'.
|
283 |
+
"""
|
284 |
+
from pandas.core.arrays import (
|
285 |
+
BooleanArray,
|
286 |
+
DatetimeArray,
|
287 |
+
ExtensionArray,
|
288 |
+
FloatingArray,
|
289 |
+
IntegerArray,
|
290 |
+
IntervalArray,
|
291 |
+
NumpyExtensionArray,
|
292 |
+
PeriodArray,
|
293 |
+
TimedeltaArray,
|
294 |
+
)
|
295 |
+
from pandas.core.arrays.string_ import StringDtype
|
296 |
+
|
297 |
+
if lib.is_scalar(data):
|
298 |
+
msg = f"Cannot pass scalar '{data}' to 'pandas.array'."
|
299 |
+
raise ValueError(msg)
|
300 |
+
elif isinstance(data, ABCDataFrame):
|
301 |
+
raise TypeError("Cannot pass DataFrame to 'pandas.array'")
|
302 |
+
|
303 |
+
if dtype is None and isinstance(data, (ABCSeries, ABCIndex, ExtensionArray)):
|
304 |
+
# Note: we exclude np.ndarray here, will do type inference on it
|
305 |
+
dtype = data.dtype
|
306 |
+
|
307 |
+
data = extract_array(data, extract_numpy=True)
|
308 |
+
|
309 |
+
# this returns None for not-found dtypes.
|
310 |
+
if dtype is not None:
|
311 |
+
dtype = pandas_dtype(dtype)
|
312 |
+
|
313 |
+
if isinstance(data, ExtensionArray) and (dtype is None or data.dtype == dtype):
|
314 |
+
# e.g. TimedeltaArray[s], avoid casting to NumpyExtensionArray
|
315 |
+
if copy:
|
316 |
+
return data.copy()
|
317 |
+
return data
|
318 |
+
|
319 |
+
if isinstance(dtype, ExtensionDtype):
|
320 |
+
cls = dtype.construct_array_type()
|
321 |
+
return cls._from_sequence(data, dtype=dtype, copy=copy)
|
322 |
+
|
323 |
+
if dtype is None:
|
324 |
+
inferred_dtype = lib.infer_dtype(data, skipna=True)
|
325 |
+
if inferred_dtype == "period":
|
326 |
+
period_data = cast(Union[Sequence[Optional[Period]], AnyArrayLike], data)
|
327 |
+
return PeriodArray._from_sequence(period_data, copy=copy)
|
328 |
+
|
329 |
+
elif inferred_dtype == "interval":
|
330 |
+
return IntervalArray(data, copy=copy)
|
331 |
+
|
332 |
+
elif inferred_dtype.startswith("datetime"):
|
333 |
+
# datetime, datetime64
|
334 |
+
try:
|
335 |
+
return DatetimeArray._from_sequence(data, copy=copy)
|
336 |
+
except ValueError:
|
337 |
+
# Mixture of timezones, fall back to NumpyExtensionArray
|
338 |
+
pass
|
339 |
+
|
340 |
+
elif inferred_dtype.startswith("timedelta"):
|
341 |
+
# timedelta, timedelta64
|
342 |
+
return TimedeltaArray._from_sequence(data, copy=copy)
|
343 |
+
|
344 |
+
elif inferred_dtype == "string":
|
345 |
+
# StringArray/ArrowStringArray depending on pd.options.mode.string_storage
|
346 |
+
dtype = StringDtype()
|
347 |
+
cls = dtype.construct_array_type()
|
348 |
+
return cls._from_sequence(data, dtype=dtype, copy=copy)
|
349 |
+
|
350 |
+
elif inferred_dtype == "integer":
|
351 |
+
return IntegerArray._from_sequence(data, copy=copy)
|
352 |
+
elif inferred_dtype == "empty" and not hasattr(data, "dtype") and not len(data):
|
353 |
+
return FloatingArray._from_sequence(data, copy=copy)
|
354 |
+
elif (
|
355 |
+
inferred_dtype in ("floating", "mixed-integer-float")
|
356 |
+
and getattr(data, "dtype", None) != np.float16
|
357 |
+
):
|
358 |
+
# GH#44715 Exclude np.float16 bc FloatingArray does not support it;
|
359 |
+
# we will fall back to NumpyExtensionArray.
|
360 |
+
return FloatingArray._from_sequence(data, copy=copy)
|
361 |
+
|
362 |
+
elif inferred_dtype == "boolean":
|
363 |
+
return BooleanArray._from_sequence(data, dtype="boolean", copy=copy)
|
364 |
+
|
365 |
+
# Pandas overrides NumPy for
|
366 |
+
# 1. datetime64[ns,us,ms,s]
|
367 |
+
# 2. timedelta64[ns,us,ms,s]
|
368 |
+
# so that a DatetimeArray is returned.
|
369 |
+
if lib.is_np_dtype(dtype, "M") and is_supported_dtype(dtype):
|
370 |
+
return DatetimeArray._from_sequence(data, dtype=dtype, copy=copy)
|
371 |
+
if lib.is_np_dtype(dtype, "m") and is_supported_dtype(dtype):
|
372 |
+
return TimedeltaArray._from_sequence(data, dtype=dtype, copy=copy)
|
373 |
+
|
374 |
+
elif lib.is_np_dtype(dtype, "mM"):
|
375 |
+
warnings.warn(
|
376 |
+
r"datetime64 and timedelta64 dtype resolutions other than "
|
377 |
+
r"'s', 'ms', 'us', and 'ns' are deprecated. "
|
378 |
+
r"In future releases passing unsupported resolutions will "
|
379 |
+
r"raise an exception.",
|
380 |
+
FutureWarning,
|
381 |
+
stacklevel=find_stack_level(),
|
382 |
+
)
|
383 |
+
|
384 |
+
return NumpyExtensionArray._from_sequence(data, dtype=dtype, copy=copy)
|
385 |
+
|
386 |
+
|
387 |
+
_typs = frozenset(
|
388 |
+
{
|
389 |
+
"index",
|
390 |
+
"rangeindex",
|
391 |
+
"multiindex",
|
392 |
+
"datetimeindex",
|
393 |
+
"timedeltaindex",
|
394 |
+
"periodindex",
|
395 |
+
"categoricalindex",
|
396 |
+
"intervalindex",
|
397 |
+
"series",
|
398 |
+
}
|
399 |
+
)
|
400 |
+
|
401 |
+
|
402 |
+
@overload
|
403 |
+
def extract_array(
|
404 |
+
obj: Series | Index, extract_numpy: bool = ..., extract_range: bool = ...
|
405 |
+
) -> ArrayLike:
|
406 |
+
...
|
407 |
+
|
408 |
+
|
409 |
+
@overload
|
410 |
+
def extract_array(
|
411 |
+
obj: T, extract_numpy: bool = ..., extract_range: bool = ...
|
412 |
+
) -> T | ArrayLike:
|
413 |
+
...
|
414 |
+
|
415 |
+
|
416 |
+
def extract_array(
|
417 |
+
obj: T, extract_numpy: bool = False, extract_range: bool = False
|
418 |
+
) -> T | ArrayLike:
|
419 |
+
"""
|
420 |
+
Extract the ndarray or ExtensionArray from a Series or Index.
|
421 |
+
|
422 |
+
For all other types, `obj` is just returned as is.
|
423 |
+
|
424 |
+
Parameters
|
425 |
+
----------
|
426 |
+
obj : object
|
427 |
+
For Series / Index, the underlying ExtensionArray is unboxed.
|
428 |
+
|
429 |
+
extract_numpy : bool, default False
|
430 |
+
Whether to extract the ndarray from a NumpyExtensionArray.
|
431 |
+
|
432 |
+
extract_range : bool, default False
|
433 |
+
If we have a RangeIndex, return range._values if True
|
434 |
+
(which is a materialized integer ndarray), otherwise return unchanged.
|
435 |
+
|
436 |
+
Returns
|
437 |
+
-------
|
438 |
+
arr : object
|
439 |
+
|
440 |
+
Examples
|
441 |
+
--------
|
442 |
+
>>> extract_array(pd.Series(['a', 'b', 'c'], dtype='category'))
|
443 |
+
['a', 'b', 'c']
|
444 |
+
Categories (3, object): ['a', 'b', 'c']
|
445 |
+
|
446 |
+
Other objects like lists, arrays, and DataFrames are just passed through.
|
447 |
+
|
448 |
+
>>> extract_array([1, 2, 3])
|
449 |
+
[1, 2, 3]
|
450 |
+
|
451 |
+
For an ndarray-backed Series / Index the ndarray is returned.
|
452 |
+
|
453 |
+
>>> extract_array(pd.Series([1, 2, 3]))
|
454 |
+
array([1, 2, 3])
|
455 |
+
|
456 |
+
To extract all the way down to the ndarray, pass ``extract_numpy=True``.
|
457 |
+
|
458 |
+
>>> extract_array(pd.Series([1, 2, 3]), extract_numpy=True)
|
459 |
+
array([1, 2, 3])
|
460 |
+
"""
|
461 |
+
typ = getattr(obj, "_typ", None)
|
462 |
+
if typ in _typs:
|
463 |
+
# i.e. isinstance(obj, (ABCIndex, ABCSeries))
|
464 |
+
if typ == "rangeindex":
|
465 |
+
if extract_range:
|
466 |
+
# error: "T" has no attribute "_values"
|
467 |
+
return obj._values # type: ignore[attr-defined]
|
468 |
+
return obj
|
469 |
+
|
470 |
+
# error: "T" has no attribute "_values"
|
471 |
+
return obj._values # type: ignore[attr-defined]
|
472 |
+
|
473 |
+
elif extract_numpy and typ == "npy_extension":
|
474 |
+
# i.e. isinstance(obj, ABCNumpyExtensionArray)
|
475 |
+
# error: "T" has no attribute "to_numpy"
|
476 |
+
return obj.to_numpy() # type: ignore[attr-defined]
|
477 |
+
|
478 |
+
return obj
|
479 |
+
|
480 |
+
|
481 |
+
def ensure_wrapped_if_datetimelike(arr):
|
482 |
+
"""
|
483 |
+
Wrap datetime64 and timedelta64 ndarrays in DatetimeArray/TimedeltaArray.
|
484 |
+
"""
|
485 |
+
if isinstance(arr, np.ndarray):
|
486 |
+
if arr.dtype.kind == "M":
|
487 |
+
from pandas.core.arrays import DatetimeArray
|
488 |
+
|
489 |
+
dtype = get_supported_dtype(arr.dtype)
|
490 |
+
return DatetimeArray._from_sequence(arr, dtype=dtype)
|
491 |
+
|
492 |
+
elif arr.dtype.kind == "m":
|
493 |
+
from pandas.core.arrays import TimedeltaArray
|
494 |
+
|
495 |
+
dtype = get_supported_dtype(arr.dtype)
|
496 |
+
return TimedeltaArray._from_sequence(arr, dtype=dtype)
|
497 |
+
|
498 |
+
return arr
|
499 |
+
|
500 |
+
|
501 |
+
def sanitize_masked_array(data: ma.MaskedArray) -> np.ndarray:
|
502 |
+
"""
|
503 |
+
Convert numpy MaskedArray to ensure mask is softened.
|
504 |
+
"""
|
505 |
+
mask = ma.getmaskarray(data)
|
506 |
+
if mask.any():
|
507 |
+
dtype, fill_value = maybe_promote(data.dtype, np.nan)
|
508 |
+
dtype = cast(np.dtype, dtype)
|
509 |
+
data = ma.asarray(data.astype(dtype, copy=True))
|
510 |
+
data.soften_mask() # set hardmask False if it was True
|
511 |
+
data[mask] = fill_value
|
512 |
+
else:
|
513 |
+
data = data.copy()
|
514 |
+
return data
|
515 |
+
|
516 |
+
|
517 |
+
def sanitize_array(
|
518 |
+
data,
|
519 |
+
index: Index | None,
|
520 |
+
dtype: DtypeObj | None = None,
|
521 |
+
copy: bool = False,
|
522 |
+
*,
|
523 |
+
allow_2d: bool = False,
|
524 |
+
) -> ArrayLike:
|
525 |
+
"""
|
526 |
+
Sanitize input data to an ndarray or ExtensionArray, copy if specified,
|
527 |
+
coerce to the dtype if specified.
|
528 |
+
|
529 |
+
Parameters
|
530 |
+
----------
|
531 |
+
data : Any
|
532 |
+
index : Index or None, default None
|
533 |
+
dtype : np.dtype, ExtensionDtype, or None, default None
|
534 |
+
copy : bool, default False
|
535 |
+
allow_2d : bool, default False
|
536 |
+
If False, raise if we have a 2D Arraylike.
|
537 |
+
|
538 |
+
Returns
|
539 |
+
-------
|
540 |
+
np.ndarray or ExtensionArray
|
541 |
+
"""
|
542 |
+
original_dtype = dtype
|
543 |
+
if isinstance(data, ma.MaskedArray):
|
544 |
+
data = sanitize_masked_array(data)
|
545 |
+
|
546 |
+
if isinstance(dtype, NumpyEADtype):
|
547 |
+
# Avoid ending up with a NumpyExtensionArray
|
548 |
+
dtype = dtype.numpy_dtype
|
549 |
+
|
550 |
+
object_index = False
|
551 |
+
if isinstance(data, ABCIndex) and data.dtype == object and dtype is None:
|
552 |
+
object_index = True
|
553 |
+
|
554 |
+
# extract ndarray or ExtensionArray, ensure we have no NumpyExtensionArray
|
555 |
+
data = extract_array(data, extract_numpy=True, extract_range=True)
|
556 |
+
|
557 |
+
if isinstance(data, np.ndarray) and data.ndim == 0:
|
558 |
+
if dtype is None:
|
559 |
+
dtype = data.dtype
|
560 |
+
data = lib.item_from_zerodim(data)
|
561 |
+
elif isinstance(data, range):
|
562 |
+
# GH#16804
|
563 |
+
data = range_to_ndarray(data)
|
564 |
+
copy = False
|
565 |
+
|
566 |
+
if not is_list_like(data):
|
567 |
+
if index is None:
|
568 |
+
raise ValueError("index must be specified when data is not list-like")
|
569 |
+
if (
|
570 |
+
isinstance(data, str)
|
571 |
+
and using_pyarrow_string_dtype()
|
572 |
+
and original_dtype is None
|
573 |
+
):
|
574 |
+
from pandas.core.arrays.string_ import StringDtype
|
575 |
+
|
576 |
+
dtype = StringDtype("pyarrow_numpy")
|
577 |
+
data = construct_1d_arraylike_from_scalar(data, len(index), dtype)
|
578 |
+
|
579 |
+
return data
|
580 |
+
|
581 |
+
elif isinstance(data, ABCExtensionArray):
|
582 |
+
# it is already ensured above this is not a NumpyExtensionArray
|
583 |
+
# Until GH#49309 is fixed this check needs to come before the
|
584 |
+
# ExtensionDtype check
|
585 |
+
if dtype is not None:
|
586 |
+
subarr = data.astype(dtype, copy=copy)
|
587 |
+
elif copy:
|
588 |
+
subarr = data.copy()
|
589 |
+
else:
|
590 |
+
subarr = data
|
591 |
+
|
592 |
+
elif isinstance(dtype, ExtensionDtype):
|
593 |
+
# create an extension array from its dtype
|
594 |
+
_sanitize_non_ordered(data)
|
595 |
+
cls = dtype.construct_array_type()
|
596 |
+
subarr = cls._from_sequence(data, dtype=dtype, copy=copy)
|
597 |
+
|
598 |
+
# GH#846
|
599 |
+
elif isinstance(data, np.ndarray):
|
600 |
+
if isinstance(data, np.matrix):
|
601 |
+
data = data.A
|
602 |
+
|
603 |
+
if dtype is None:
|
604 |
+
subarr = data
|
605 |
+
if data.dtype == object:
|
606 |
+
subarr = maybe_infer_to_datetimelike(data)
|
607 |
+
if (
|
608 |
+
object_index
|
609 |
+
and using_pyarrow_string_dtype()
|
610 |
+
and is_string_dtype(subarr)
|
611 |
+
):
|
612 |
+
# Avoid inference when string option is set
|
613 |
+
subarr = data
|
614 |
+
elif data.dtype.kind == "U" and using_pyarrow_string_dtype():
|
615 |
+
from pandas.core.arrays.string_ import StringDtype
|
616 |
+
|
617 |
+
dtype = StringDtype(storage="pyarrow_numpy")
|
618 |
+
subarr = dtype.construct_array_type()._from_sequence(data, dtype=dtype)
|
619 |
+
|
620 |
+
if subarr is data and copy:
|
621 |
+
subarr = subarr.copy()
|
622 |
+
|
623 |
+
else:
|
624 |
+
# we will try to copy by-definition here
|
625 |
+
subarr = _try_cast(data, dtype, copy)
|
626 |
+
|
627 |
+
elif hasattr(data, "__array__"):
|
628 |
+
# e.g. dask array GH#38645
|
629 |
+
if not copy:
|
630 |
+
data = np.asarray(data)
|
631 |
+
else:
|
632 |
+
data = np.array(data, copy=copy)
|
633 |
+
return sanitize_array(
|
634 |
+
data,
|
635 |
+
index=index,
|
636 |
+
dtype=dtype,
|
637 |
+
copy=False,
|
638 |
+
allow_2d=allow_2d,
|
639 |
+
)
|
640 |
+
|
641 |
+
else:
|
642 |
+
_sanitize_non_ordered(data)
|
643 |
+
# materialize e.g. generators, convert e.g. tuples, abc.ValueView
|
644 |
+
data = list(data)
|
645 |
+
|
646 |
+
if len(data) == 0 and dtype is None:
|
647 |
+
# We default to float64, matching numpy
|
648 |
+
subarr = np.array([], dtype=np.float64)
|
649 |
+
|
650 |
+
elif dtype is not None:
|
651 |
+
subarr = _try_cast(data, dtype, copy)
|
652 |
+
|
653 |
+
else:
|
654 |
+
subarr = maybe_convert_platform(data)
|
655 |
+
if subarr.dtype == object:
|
656 |
+
subarr = cast(np.ndarray, subarr)
|
657 |
+
subarr = maybe_infer_to_datetimelike(subarr)
|
658 |
+
|
659 |
+
subarr = _sanitize_ndim(subarr, data, dtype, index, allow_2d=allow_2d)
|
660 |
+
|
661 |
+
if isinstance(subarr, np.ndarray):
|
662 |
+
# at this point we should have dtype be None or subarr.dtype == dtype
|
663 |
+
dtype = cast(np.dtype, dtype)
|
664 |
+
subarr = _sanitize_str_dtypes(subarr, data, dtype, copy)
|
665 |
+
|
666 |
+
return subarr
|
667 |
+
|
668 |
+
|
669 |
+
def range_to_ndarray(rng: range) -> np.ndarray:
|
670 |
+
"""
|
671 |
+
Cast a range object to ndarray.
|
672 |
+
"""
|
673 |
+
# GH#30171 perf avoid realizing range as a list in np.array
|
674 |
+
try:
|
675 |
+
arr = np.arange(rng.start, rng.stop, rng.step, dtype="int64")
|
676 |
+
except OverflowError:
|
677 |
+
# GH#30173 handling for ranges that overflow int64
|
678 |
+
if (rng.start >= 0 and rng.step > 0) or (rng.step < 0 <= rng.stop):
|
679 |
+
try:
|
680 |
+
arr = np.arange(rng.start, rng.stop, rng.step, dtype="uint64")
|
681 |
+
except OverflowError:
|
682 |
+
arr = construct_1d_object_array_from_listlike(list(rng))
|
683 |
+
else:
|
684 |
+
arr = construct_1d_object_array_from_listlike(list(rng))
|
685 |
+
return arr
|
686 |
+
|
687 |
+
|
688 |
+
def _sanitize_non_ordered(data) -> None:
|
689 |
+
"""
|
690 |
+
Raise only for unordered sets, e.g., not for dict_keys
|
691 |
+
"""
|
692 |
+
if isinstance(data, (set, frozenset)):
|
693 |
+
raise TypeError(f"'{type(data).__name__}' type is unordered")
|
694 |
+
|
695 |
+
|
696 |
+
def _sanitize_ndim(
|
697 |
+
result: ArrayLike,
|
698 |
+
data,
|
699 |
+
dtype: DtypeObj | None,
|
700 |
+
index: Index | None,
|
701 |
+
*,
|
702 |
+
allow_2d: bool = False,
|
703 |
+
) -> ArrayLike:
|
704 |
+
"""
|
705 |
+
Ensure we have a 1-dimensional result array.
|
706 |
+
"""
|
707 |
+
if getattr(result, "ndim", 0) == 0:
|
708 |
+
raise ValueError("result should be arraylike with ndim > 0")
|
709 |
+
|
710 |
+
if result.ndim == 1:
|
711 |
+
# the result that we want
|
712 |
+
result = _maybe_repeat(result, index)
|
713 |
+
|
714 |
+
elif result.ndim > 1:
|
715 |
+
if isinstance(data, np.ndarray):
|
716 |
+
if allow_2d:
|
717 |
+
return result
|
718 |
+
raise ValueError(
|
719 |
+
f"Data must be 1-dimensional, got ndarray of shape {data.shape} instead"
|
720 |
+
)
|
721 |
+
if is_object_dtype(dtype) and isinstance(dtype, ExtensionDtype):
|
722 |
+
# i.e. NumpyEADtype("O")
|
723 |
+
|
724 |
+
result = com.asarray_tuplesafe(data, dtype=np.dtype("object"))
|
725 |
+
cls = dtype.construct_array_type()
|
726 |
+
result = cls._from_sequence(result, dtype=dtype)
|
727 |
+
else:
|
728 |
+
# error: Argument "dtype" to "asarray_tuplesafe" has incompatible type
|
729 |
+
# "Union[dtype[Any], ExtensionDtype, None]"; expected "Union[str,
|
730 |
+
# dtype[Any], None]"
|
731 |
+
result = com.asarray_tuplesafe(data, dtype=dtype) # type: ignore[arg-type]
|
732 |
+
return result
|
733 |
+
|
734 |
+
|
735 |
+
def _sanitize_str_dtypes(
|
736 |
+
result: np.ndarray, data, dtype: np.dtype | None, copy: bool
|
737 |
+
) -> np.ndarray:
|
738 |
+
"""
|
739 |
+
Ensure we have a dtype that is supported by pandas.
|
740 |
+
"""
|
741 |
+
|
742 |
+
# This is to prevent mixed-type Series getting all casted to
|
743 |
+
# NumPy string type, e.g. NaN --> '-1#IND'.
|
744 |
+
if issubclass(result.dtype.type, str):
|
745 |
+
# GH#16605
|
746 |
+
# If not empty convert the data to dtype
|
747 |
+
# GH#19853: If data is a scalar, result has already the result
|
748 |
+
if not lib.is_scalar(data):
|
749 |
+
if not np.all(isna(data)):
|
750 |
+
data = np.asarray(data, dtype=dtype)
|
751 |
+
if not copy:
|
752 |
+
result = np.asarray(data, dtype=object)
|
753 |
+
else:
|
754 |
+
result = np.array(data, dtype=object, copy=copy)
|
755 |
+
return result
|
756 |
+
|
757 |
+
|
758 |
+
def _maybe_repeat(arr: ArrayLike, index: Index | None) -> ArrayLike:
|
759 |
+
"""
|
760 |
+
If we have a length-1 array and an index describing how long we expect
|
761 |
+
the result to be, repeat the array.
|
762 |
+
"""
|
763 |
+
if index is not None:
|
764 |
+
if 1 == len(arr) != len(index):
|
765 |
+
arr = arr.repeat(len(index))
|
766 |
+
return arr
|
767 |
+
|
768 |
+
|
769 |
+
def _try_cast(
|
770 |
+
arr: list | np.ndarray,
|
771 |
+
dtype: np.dtype,
|
772 |
+
copy: bool,
|
773 |
+
) -> ArrayLike:
|
774 |
+
"""
|
775 |
+
Convert input to numpy ndarray and optionally cast to a given dtype.
|
776 |
+
|
777 |
+
Parameters
|
778 |
+
----------
|
779 |
+
arr : ndarray or list
|
780 |
+
Excludes: ExtensionArray, Series, Index.
|
781 |
+
dtype : np.dtype
|
782 |
+
copy : bool
|
783 |
+
If False, don't copy the data if not needed.
|
784 |
+
|
785 |
+
Returns
|
786 |
+
-------
|
787 |
+
np.ndarray or ExtensionArray
|
788 |
+
"""
|
789 |
+
is_ndarray = isinstance(arr, np.ndarray)
|
790 |
+
|
791 |
+
if dtype == object:
|
792 |
+
if not is_ndarray:
|
793 |
+
subarr = construct_1d_object_array_from_listlike(arr)
|
794 |
+
return subarr
|
795 |
+
return ensure_wrapped_if_datetimelike(arr).astype(dtype, copy=copy)
|
796 |
+
|
797 |
+
elif dtype.kind == "U":
|
798 |
+
# TODO: test cases with arr.dtype.kind in "mM"
|
799 |
+
if is_ndarray:
|
800 |
+
arr = cast(np.ndarray, arr)
|
801 |
+
shape = arr.shape
|
802 |
+
if arr.ndim > 1:
|
803 |
+
arr = arr.ravel()
|
804 |
+
else:
|
805 |
+
shape = (len(arr),)
|
806 |
+
return lib.ensure_string_array(arr, convert_na_value=False, copy=copy).reshape(
|
807 |
+
shape
|
808 |
+
)
|
809 |
+
|
810 |
+
elif dtype.kind in "mM":
|
811 |
+
return maybe_cast_to_datetime(arr, dtype)
|
812 |
+
|
813 |
+
# GH#15832: Check if we are requesting a numeric dtype and
|
814 |
+
# that we can convert the data to the requested dtype.
|
815 |
+
elif dtype.kind in "iu":
|
816 |
+
# this will raise if we have e.g. floats
|
817 |
+
|
818 |
+
subarr = maybe_cast_to_integer_array(arr, dtype)
|
819 |
+
elif not copy:
|
820 |
+
subarr = np.asarray(arr, dtype=dtype)
|
821 |
+
else:
|
822 |
+
subarr = np.array(arr, dtype=dtype, copy=copy)
|
823 |
+
|
824 |
+
return subarr
|
env-llmeval/lib/python3.10/site-packages/pandas/core/flags.py
ADDED
@@ -0,0 +1,117 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from __future__ import annotations
|
2 |
+
|
3 |
+
from typing import TYPE_CHECKING
|
4 |
+
import weakref
|
5 |
+
|
6 |
+
if TYPE_CHECKING:
|
7 |
+
from pandas.core.generic import NDFrame
|
8 |
+
|
9 |
+
|
10 |
+
class Flags:
|
11 |
+
"""
|
12 |
+
Flags that apply to pandas objects.
|
13 |
+
|
14 |
+
Parameters
|
15 |
+
----------
|
16 |
+
obj : Series or DataFrame
|
17 |
+
The object these flags are associated with.
|
18 |
+
allows_duplicate_labels : bool, default True
|
19 |
+
Whether to allow duplicate labels in this object. By default,
|
20 |
+
duplicate labels are permitted. Setting this to ``False`` will
|
21 |
+
cause an :class:`errors.DuplicateLabelError` to be raised when
|
22 |
+
`index` (or columns for DataFrame) is not unique, or any
|
23 |
+
subsequent operation on introduces duplicates.
|
24 |
+
See :ref:`duplicates.disallow` for more.
|
25 |
+
|
26 |
+
.. warning::
|
27 |
+
|
28 |
+
This is an experimental feature. Currently, many methods fail to
|
29 |
+
propagate the ``allows_duplicate_labels`` value. In future versions
|
30 |
+
it is expected that every method taking or returning one or more
|
31 |
+
DataFrame or Series objects will propagate ``allows_duplicate_labels``.
|
32 |
+
|
33 |
+
Examples
|
34 |
+
--------
|
35 |
+
Attributes can be set in two ways:
|
36 |
+
|
37 |
+
>>> df = pd.DataFrame()
|
38 |
+
>>> df.flags
|
39 |
+
<Flags(allows_duplicate_labels=True)>
|
40 |
+
>>> df.flags.allows_duplicate_labels = False
|
41 |
+
>>> df.flags
|
42 |
+
<Flags(allows_duplicate_labels=False)>
|
43 |
+
|
44 |
+
>>> df.flags['allows_duplicate_labels'] = True
|
45 |
+
>>> df.flags
|
46 |
+
<Flags(allows_duplicate_labels=True)>
|
47 |
+
"""
|
48 |
+
|
49 |
+
_keys: set[str] = {"allows_duplicate_labels"}
|
50 |
+
|
51 |
+
def __init__(self, obj: NDFrame, *, allows_duplicate_labels: bool) -> None:
|
52 |
+
self._allows_duplicate_labels = allows_duplicate_labels
|
53 |
+
self._obj = weakref.ref(obj)
|
54 |
+
|
55 |
+
@property
|
56 |
+
def allows_duplicate_labels(self) -> bool:
|
57 |
+
"""
|
58 |
+
Whether this object allows duplicate labels.
|
59 |
+
|
60 |
+
Setting ``allows_duplicate_labels=False`` ensures that the
|
61 |
+
index (and columns of a DataFrame) are unique. Most methods
|
62 |
+
that accept and return a Series or DataFrame will propagate
|
63 |
+
the value of ``allows_duplicate_labels``.
|
64 |
+
|
65 |
+
See :ref:`duplicates` for more.
|
66 |
+
|
67 |
+
See Also
|
68 |
+
--------
|
69 |
+
DataFrame.attrs : Set global metadata on this object.
|
70 |
+
DataFrame.set_flags : Set global flags on this object.
|
71 |
+
|
72 |
+
Examples
|
73 |
+
--------
|
74 |
+
>>> df = pd.DataFrame({"A": [1, 2]}, index=['a', 'a'])
|
75 |
+
>>> df.flags.allows_duplicate_labels
|
76 |
+
True
|
77 |
+
>>> df.flags.allows_duplicate_labels = False
|
78 |
+
Traceback (most recent call last):
|
79 |
+
...
|
80 |
+
pandas.errors.DuplicateLabelError: Index has duplicates.
|
81 |
+
positions
|
82 |
+
label
|
83 |
+
a [0, 1]
|
84 |
+
"""
|
85 |
+
return self._allows_duplicate_labels
|
86 |
+
|
87 |
+
@allows_duplicate_labels.setter
|
88 |
+
def allows_duplicate_labels(self, value: bool) -> None:
|
89 |
+
value = bool(value)
|
90 |
+
obj = self._obj()
|
91 |
+
if obj is None:
|
92 |
+
raise ValueError("This flag's object has been deleted.")
|
93 |
+
|
94 |
+
if not value:
|
95 |
+
for ax in obj.axes:
|
96 |
+
ax._maybe_check_unique()
|
97 |
+
|
98 |
+
self._allows_duplicate_labels = value
|
99 |
+
|
100 |
+
def __getitem__(self, key: str):
|
101 |
+
if key not in self._keys:
|
102 |
+
raise KeyError(key)
|
103 |
+
|
104 |
+
return getattr(self, key)
|
105 |
+
|
106 |
+
def __setitem__(self, key: str, value) -> None:
|
107 |
+
if key not in self._keys:
|
108 |
+
raise ValueError(f"Unknown flag {key}. Must be one of {self._keys}")
|
109 |
+
setattr(self, key, value)
|
110 |
+
|
111 |
+
def __repr__(self) -> str:
|
112 |
+
return f"<Flags(allows_duplicate_labels={self.allows_duplicate_labels})>"
|
113 |
+
|
114 |
+
def __eq__(self, other) -> bool:
|
115 |
+
if isinstance(other, type(self)):
|
116 |
+
return self.allows_duplicate_labels == other.allows_duplicate_labels
|
117 |
+
return False
|
env-llmeval/lib/python3.10/site-packages/pandas/core/frame.py
ADDED
The diff for this file is too large to render.
See raw diff
|
|
env-llmeval/lib/python3.10/site-packages/pandas/core/generic.py
ADDED
The diff for this file is too large to render.
See raw diff
|
|
env-llmeval/lib/python3.10/site-packages/pandas/core/groupby/__init__.py
ADDED
@@ -0,0 +1,15 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from pandas.core.groupby.generic import (
|
2 |
+
DataFrameGroupBy,
|
3 |
+
NamedAgg,
|
4 |
+
SeriesGroupBy,
|
5 |
+
)
|
6 |
+
from pandas.core.groupby.groupby import GroupBy
|
7 |
+
from pandas.core.groupby.grouper import Grouper
|
8 |
+
|
9 |
+
__all__ = [
|
10 |
+
"DataFrameGroupBy",
|
11 |
+
"NamedAgg",
|
12 |
+
"SeriesGroupBy",
|
13 |
+
"GroupBy",
|
14 |
+
"Grouper",
|
15 |
+
]
|
env-llmeval/lib/python3.10/site-packages/pandas/core/groupby/__pycache__/__init__.cpython-310.pyc
ADDED
Binary file (459 Bytes). View file
|
|
env-llmeval/lib/python3.10/site-packages/pandas/core/groupby/__pycache__/base.cpython-310.pyc
ADDED
Binary file (1.42 kB). View file
|
|
env-llmeval/lib/python3.10/site-packages/pandas/core/groupby/__pycache__/categorical.cpython-310.pyc
ADDED
Binary file (2.3 kB). View file
|
|
env-llmeval/lib/python3.10/site-packages/pandas/core/groupby/__pycache__/generic.cpython-310.pyc
ADDED
Binary file (78.3 kB). View file
|
|
env-llmeval/lib/python3.10/site-packages/pandas/core/groupby/__pycache__/groupby.cpython-310.pyc
ADDED
Binary file (159 kB). View file
|
|
env-llmeval/lib/python3.10/site-packages/pandas/core/groupby/__pycache__/grouper.cpython-310.pyc
ADDED
Binary file (26.3 kB). View file
|
|
env-llmeval/lib/python3.10/site-packages/pandas/core/groupby/__pycache__/indexing.cpython-310.pyc
ADDED
Binary file (9.75 kB). View file
|
|